1/*
2 * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
3 * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/pci.h>
15#include <linux/interrupt.h>
16#include <linux/sched.h>
17#include <linux/wait.h>
18#include <linux/spinlock.h>
19#include <linux/hashtable.h>
20#include <linux/crc32.h>
21#include <linux/sort.h>
22#include <linux/random.h>
23#include <linux/netdevice.h>
24#include <linux/inetdevice.h>
25#include <linux/skbuff.h>
26#include <linux/socket.h>
27#include <linux/etherdevice.h>
28#include <linux/ethtool.h>
29#include <linux/if_ether.h>
30#include <linux/if_vlan.h>
31#include <linux/if_bridge.h>
32#include <linux/bitops.h>
33#include <linux/ctype.h>
34#include <net/switchdev.h>
35#include <net/rtnetlink.h>
36#include <net/ip_fib.h>
37#include <net/netevent.h>
38#include <net/arp.h>
39#include <asm-generic/io-64-nonatomic-lo-hi.h>
40#include <generated/utsrelease.h>
41
42#include "rocker.h"
43
44static const char rocker_driver_name[] = "rocker";
45
46static const struct pci_device_id rocker_pci_id_table[] = {
47	{PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
48	{0, }
49};
50
51struct rocker_flow_tbl_key {
52	u32 priority;
53	enum rocker_of_dpa_table_id tbl_id;
54	union {
55		struct {
56			u32 in_pport;
57			u32 in_pport_mask;
58			enum rocker_of_dpa_table_id goto_tbl;
59		} ig_port;
60		struct {
61			u32 in_pport;
62			__be16 vlan_id;
63			__be16 vlan_id_mask;
64			enum rocker_of_dpa_table_id goto_tbl;
65			bool untagged;
66			__be16 new_vlan_id;
67		} vlan;
68		struct {
69			u32 in_pport;
70			u32 in_pport_mask;
71			__be16 eth_type;
72			u8 eth_dst[ETH_ALEN];
73			u8 eth_dst_mask[ETH_ALEN];
74			__be16 vlan_id;
75			__be16 vlan_id_mask;
76			enum rocker_of_dpa_table_id goto_tbl;
77			bool copy_to_cpu;
78		} term_mac;
79		struct {
80			__be16 eth_type;
81			__be32 dst4;
82			__be32 dst4_mask;
83			enum rocker_of_dpa_table_id goto_tbl;
84			u32 group_id;
85		} ucast_routing;
86		struct {
87			u8 eth_dst[ETH_ALEN];
88			u8 eth_dst_mask[ETH_ALEN];
89			int has_eth_dst;
90			int has_eth_dst_mask;
91			__be16 vlan_id;
92			u32 tunnel_id;
93			enum rocker_of_dpa_table_id goto_tbl;
94			u32 group_id;
95			bool copy_to_cpu;
96		} bridge;
97		struct {
98			u32 in_pport;
99			u32 in_pport_mask;
100			u8 eth_src[ETH_ALEN];
101			u8 eth_src_mask[ETH_ALEN];
102			u8 eth_dst[ETH_ALEN];
103			u8 eth_dst_mask[ETH_ALEN];
104			__be16 eth_type;
105			__be16 vlan_id;
106			__be16 vlan_id_mask;
107			u8 ip_proto;
108			u8 ip_proto_mask;
109			u8 ip_tos;
110			u8 ip_tos_mask;
111			u32 group_id;
112		} acl;
113	};
114};
115
116struct rocker_flow_tbl_entry {
117	struct hlist_node entry;
118	u32 cmd;
119	u64 cookie;
120	struct rocker_flow_tbl_key key;
121	size_t key_len;
122	u32 key_crc32; /* key */
123};
124
125struct rocker_group_tbl_entry {
126	struct hlist_node entry;
127	u32 cmd;
128	u32 group_id; /* key */
129	u16 group_count;
130	u32 *group_ids;
131	union {
132		struct {
133			u8 pop_vlan;
134		} l2_interface;
135		struct {
136			u8 eth_src[ETH_ALEN];
137			u8 eth_dst[ETH_ALEN];
138			__be16 vlan_id;
139			u32 group_id;
140		} l2_rewrite;
141		struct {
142			u8 eth_src[ETH_ALEN];
143			u8 eth_dst[ETH_ALEN];
144			__be16 vlan_id;
145			bool ttl_check;
146			u32 group_id;
147		} l3_unicast;
148	};
149};
150
151struct rocker_fdb_tbl_entry {
152	struct hlist_node entry;
153	u32 key_crc32; /* key */
154	bool learned;
155	struct rocker_fdb_tbl_key {
156		u32 pport;
157		u8 addr[ETH_ALEN];
158		__be16 vlan_id;
159	} key;
160};
161
162struct rocker_internal_vlan_tbl_entry {
163	struct hlist_node entry;
164	int ifindex; /* key */
165	u32 ref_count;
166	__be16 vlan_id;
167};
168
169struct rocker_neigh_tbl_entry {
170	struct hlist_node entry;
171	__be32 ip_addr; /* key */
172	struct net_device *dev;
173	u32 ref_count;
174	u32 index;
175	u8 eth_dst[ETH_ALEN];
176	bool ttl_check;
177};
178
179struct rocker_desc_info {
180	char *data; /* mapped */
181	size_t data_size;
182	size_t tlv_size;
183	struct rocker_desc *desc;
184	DEFINE_DMA_UNMAP_ADDR(mapaddr);
185};
186
187struct rocker_dma_ring_info {
188	size_t size;
189	u32 head;
190	u32 tail;
191	struct rocker_desc *desc; /* mapped */
192	dma_addr_t mapaddr;
193	struct rocker_desc_info *desc_info;
194	unsigned int type;
195};
196
197struct rocker;
198
199enum {
200	ROCKER_CTRL_LINK_LOCAL_MCAST,
201	ROCKER_CTRL_LOCAL_ARP,
202	ROCKER_CTRL_IPV4_MCAST,
203	ROCKER_CTRL_IPV6_MCAST,
204	ROCKER_CTRL_DFLT_BRIDGING,
205	ROCKER_CTRL_MAX,
206};
207
208#define ROCKER_INTERNAL_VLAN_ID_BASE	0x0f00
209#define ROCKER_N_INTERNAL_VLANS		255
210#define ROCKER_VLAN_BITMAP_LEN		BITS_TO_LONGS(VLAN_N_VID)
211#define ROCKER_INTERNAL_VLAN_BITMAP_LEN	BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS)
212
213struct rocker_port {
214	struct net_device *dev;
215	struct net_device *bridge_dev;
216	struct rocker *rocker;
217	unsigned int port_number;
218	u32 pport;
219	__be16 internal_vlan_id;
220	int stp_state;
221	u32 brport_flags;
222	bool ctrls[ROCKER_CTRL_MAX];
223	unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN];
224	struct napi_struct napi_tx;
225	struct napi_struct napi_rx;
226	struct rocker_dma_ring_info tx_ring;
227	struct rocker_dma_ring_info rx_ring;
228};
229
230struct rocker {
231	struct pci_dev *pdev;
232	u8 __iomem *hw_addr;
233	struct msix_entry *msix_entries;
234	unsigned int port_count;
235	struct rocker_port **ports;
236	struct {
237		u64 id;
238	} hw;
239	spinlock_t cmd_ring_lock;
240	struct rocker_dma_ring_info cmd_ring;
241	struct rocker_dma_ring_info event_ring;
242	DECLARE_HASHTABLE(flow_tbl, 16);
243	spinlock_t flow_tbl_lock;
244	u64 flow_tbl_next_cookie;
245	DECLARE_HASHTABLE(group_tbl, 16);
246	spinlock_t group_tbl_lock;
247	DECLARE_HASHTABLE(fdb_tbl, 16);
248	spinlock_t fdb_tbl_lock;
249	unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
250	DECLARE_HASHTABLE(internal_vlan_tbl, 8);
251	spinlock_t internal_vlan_tbl_lock;
252	DECLARE_HASHTABLE(neigh_tbl, 16);
253	spinlock_t neigh_tbl_lock;
254	u32 neigh_tbl_next_index;
255};
256
257static const u8 zero_mac[ETH_ALEN]   = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
258static const u8 ff_mac[ETH_ALEN]     = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
259static const u8 ll_mac[ETH_ALEN]     = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
260static const u8 ll_mask[ETH_ALEN]    = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
261static const u8 mcast_mac[ETH_ALEN]  = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
262static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
263static const u8 ipv4_mask[ETH_ALEN]  = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
264static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
265static const u8 ipv6_mask[ETH_ALEN]  = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
266
267/* Rocker priority levels for flow table entries.  Higher
268 * priority match takes precedence over lower priority match.
269 */
270
271enum {
272	ROCKER_PRIORITY_UNKNOWN = 0,
273	ROCKER_PRIORITY_IG_PORT = 1,
274	ROCKER_PRIORITY_VLAN = 1,
275	ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
276	ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
277	ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
278	ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
279	ROCKER_PRIORITY_BRIDGING_VLAN = 3,
280	ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
281	ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
282	ROCKER_PRIORITY_BRIDGING_TENANT = 3,
283	ROCKER_PRIORITY_ACL_CTRL = 3,
284	ROCKER_PRIORITY_ACL_NORMAL = 2,
285	ROCKER_PRIORITY_ACL_DFLT = 1,
286};
287
288static bool rocker_vlan_id_is_internal(__be16 vlan_id)
289{
290	u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
291	u16 end = 0xffe;
292	u16 _vlan_id = ntohs(vlan_id);
293
294	return (_vlan_id >= start && _vlan_id <= end);
295}
296
297static __be16 rocker_port_vid_to_vlan(struct rocker_port *rocker_port,
298				      u16 vid, bool *pop_vlan)
299{
300	__be16 vlan_id;
301
302	if (pop_vlan)
303		*pop_vlan = false;
304	vlan_id = htons(vid);
305	if (!vlan_id) {
306		vlan_id = rocker_port->internal_vlan_id;
307		if (pop_vlan)
308			*pop_vlan = true;
309	}
310
311	return vlan_id;
312}
313
314static u16 rocker_port_vlan_to_vid(struct rocker_port *rocker_port,
315				   __be16 vlan_id)
316{
317	if (rocker_vlan_id_is_internal(vlan_id))
318		return 0;
319
320	return ntohs(vlan_id);
321}
322
323static bool rocker_port_is_bridged(struct rocker_port *rocker_port)
324{
325	return !!rocker_port->bridge_dev;
326}
327
328struct rocker_wait {
329	wait_queue_head_t wait;
330	bool done;
331	bool nowait;
332};
333
334static void rocker_wait_reset(struct rocker_wait *wait)
335{
336	wait->done = false;
337	wait->nowait = false;
338}
339
340static void rocker_wait_init(struct rocker_wait *wait)
341{
342	init_waitqueue_head(&wait->wait);
343	rocker_wait_reset(wait);
344}
345
346static struct rocker_wait *rocker_wait_create(gfp_t gfp)
347{
348	struct rocker_wait *wait;
349
350	wait = kmalloc(sizeof(*wait), gfp);
351	if (!wait)
352		return NULL;
353	rocker_wait_init(wait);
354	return wait;
355}
356
357static void rocker_wait_destroy(struct rocker_wait *work)
358{
359	kfree(work);
360}
361
362static bool rocker_wait_event_timeout(struct rocker_wait *wait,
363				      unsigned long timeout)
364{
365	wait_event_timeout(wait->wait, wait->done, HZ / 10);
366	if (!wait->done)
367		return false;
368	return true;
369}
370
371static void rocker_wait_wake_up(struct rocker_wait *wait)
372{
373	wait->done = true;
374	wake_up(&wait->wait);
375}
376
377static u32 rocker_msix_vector(struct rocker *rocker, unsigned int vector)
378{
379	return rocker->msix_entries[vector].vector;
380}
381
382static u32 rocker_msix_tx_vector(struct rocker_port *rocker_port)
383{
384	return rocker_msix_vector(rocker_port->rocker,
385				  ROCKER_MSIX_VEC_TX(rocker_port->port_number));
386}
387
388static u32 rocker_msix_rx_vector(struct rocker_port *rocker_port)
389{
390	return rocker_msix_vector(rocker_port->rocker,
391				  ROCKER_MSIX_VEC_RX(rocker_port->port_number));
392}
393
394#define rocker_write32(rocker, reg, val)	\
395	writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
396#define rocker_read32(rocker, reg)	\
397	readl((rocker)->hw_addr + (ROCKER_ ## reg))
398#define rocker_write64(rocker, reg, val)	\
399	writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
400#define rocker_read64(rocker, reg)	\
401	readq((rocker)->hw_addr + (ROCKER_ ## reg))
402
403/*****************************
404 * HW basic testing functions
405 *****************************/
406
407static int rocker_reg_test(struct rocker *rocker)
408{
409	struct pci_dev *pdev = rocker->pdev;
410	u64 test_reg;
411	u64 rnd;
412
413	rnd = prandom_u32();
414	rnd >>= 1;
415	rocker_write32(rocker, TEST_REG, rnd);
416	test_reg = rocker_read32(rocker, TEST_REG);
417	if (test_reg != rnd * 2) {
418		dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
419			test_reg, rnd * 2);
420		return -EIO;
421	}
422
423	rnd = prandom_u32();
424	rnd <<= 31;
425	rnd |= prandom_u32();
426	rocker_write64(rocker, TEST_REG64, rnd);
427	test_reg = rocker_read64(rocker, TEST_REG64);
428	if (test_reg != rnd * 2) {
429		dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
430			test_reg, rnd * 2);
431		return -EIO;
432	}
433
434	return 0;
435}
436
437static int rocker_dma_test_one(struct rocker *rocker, struct rocker_wait *wait,
438			       u32 test_type, dma_addr_t dma_handle,
439			       unsigned char *buf, unsigned char *expect,
440			       size_t size)
441{
442	struct pci_dev *pdev = rocker->pdev;
443	int i;
444
445	rocker_wait_reset(wait);
446	rocker_write32(rocker, TEST_DMA_CTRL, test_type);
447
448	if (!rocker_wait_event_timeout(wait, HZ / 10)) {
449		dev_err(&pdev->dev, "no interrupt received within a timeout\n");
450		return -EIO;
451	}
452
453	for (i = 0; i < size; i++) {
454		if (buf[i] != expect[i]) {
455			dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
456				buf[i], i, expect[i]);
457			return -EIO;
458		}
459	}
460	return 0;
461}
462
463#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
464#define ROCKER_TEST_DMA_FILL_PATTERN 0x96
465
466static int rocker_dma_test_offset(struct rocker *rocker,
467				  struct rocker_wait *wait, int offset)
468{
469	struct pci_dev *pdev = rocker->pdev;
470	unsigned char *alloc;
471	unsigned char *buf;
472	unsigned char *expect;
473	dma_addr_t dma_handle;
474	int i;
475	int err;
476
477	alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
478			GFP_KERNEL | GFP_DMA);
479	if (!alloc)
480		return -ENOMEM;
481	buf = alloc + offset;
482	expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
483
484	dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
485				    PCI_DMA_BIDIRECTIONAL);
486	if (pci_dma_mapping_error(pdev, dma_handle)) {
487		err = -EIO;
488		goto free_alloc;
489	}
490
491	rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
492	rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
493
494	memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
495	err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
496				  dma_handle, buf, expect,
497				  ROCKER_TEST_DMA_BUF_SIZE);
498	if (err)
499		goto unmap;
500
501	memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
502	err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
503				  dma_handle, buf, expect,
504				  ROCKER_TEST_DMA_BUF_SIZE);
505	if (err)
506		goto unmap;
507
508	prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
509	for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
510		expect[i] = ~buf[i];
511	err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
512				  dma_handle, buf, expect,
513				  ROCKER_TEST_DMA_BUF_SIZE);
514	if (err)
515		goto unmap;
516
517unmap:
518	pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
519			 PCI_DMA_BIDIRECTIONAL);
520free_alloc:
521	kfree(alloc);
522
523	return err;
524}
525
526static int rocker_dma_test(struct rocker *rocker, struct rocker_wait *wait)
527{
528	int i;
529	int err;
530
531	for (i = 0; i < 8; i++) {
532		err = rocker_dma_test_offset(rocker, wait, i);
533		if (err)
534			return err;
535	}
536	return 0;
537}
538
539static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
540{
541	struct rocker_wait *wait = dev_id;
542
543	rocker_wait_wake_up(wait);
544
545	return IRQ_HANDLED;
546}
547
548static int rocker_basic_hw_test(struct rocker *rocker)
549{
550	struct pci_dev *pdev = rocker->pdev;
551	struct rocker_wait wait;
552	int err;
553
554	err = rocker_reg_test(rocker);
555	if (err) {
556		dev_err(&pdev->dev, "reg test failed\n");
557		return err;
558	}
559
560	err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
561			  rocker_test_irq_handler, 0,
562			  rocker_driver_name, &wait);
563	if (err) {
564		dev_err(&pdev->dev, "cannot assign test irq\n");
565		return err;
566	}
567
568	rocker_wait_init(&wait);
569	rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
570
571	if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
572		dev_err(&pdev->dev, "no interrupt received within a timeout\n");
573		err = -EIO;
574		goto free_irq;
575	}
576
577	err = rocker_dma_test(rocker, &wait);
578	if (err)
579		dev_err(&pdev->dev, "dma test failed\n");
580
581free_irq:
582	free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
583	return err;
584}
585
586/******
587 * TLV
588 ******/
589
590#define ROCKER_TLV_ALIGNTO 8U
591#define ROCKER_TLV_ALIGN(len) \
592	(((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1))
593#define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv))
594
595/*  <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) --->
596 * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
597 * |             Header          | Pad |           Payload           | Pad |
598 * |      (struct rocker_tlv)    | ing |                             | ing |
599 * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
600 *  <--------------------------- tlv->len -------------------------->
601 */
602
603static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv,
604					  int *remaining)
605{
606	int totlen = ROCKER_TLV_ALIGN(tlv->len);
607
608	*remaining -= totlen;
609	return (struct rocker_tlv *) ((char *) tlv + totlen);
610}
611
612static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining)
613{
614	return remaining >= (int) ROCKER_TLV_HDRLEN &&
615	       tlv->len >= ROCKER_TLV_HDRLEN &&
616	       tlv->len <= remaining;
617}
618
619#define rocker_tlv_for_each(pos, head, len, rem)	\
620	for (pos = head, rem = len;			\
621	     rocker_tlv_ok(pos, rem);			\
622	     pos = rocker_tlv_next(pos, &(rem)))
623
624#define rocker_tlv_for_each_nested(pos, tlv, rem)	\
625	rocker_tlv_for_each(pos, rocker_tlv_data(tlv),	\
626			    rocker_tlv_len(tlv), rem)
627
628static int rocker_tlv_attr_size(int payload)
629{
630	return ROCKER_TLV_HDRLEN + payload;
631}
632
633static int rocker_tlv_total_size(int payload)
634{
635	return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload));
636}
637
638static int rocker_tlv_padlen(int payload)
639{
640	return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload);
641}
642
643static int rocker_tlv_type(const struct rocker_tlv *tlv)
644{
645	return tlv->type;
646}
647
648static void *rocker_tlv_data(const struct rocker_tlv *tlv)
649{
650	return (char *) tlv + ROCKER_TLV_HDRLEN;
651}
652
653static int rocker_tlv_len(const struct rocker_tlv *tlv)
654{
655	return tlv->len - ROCKER_TLV_HDRLEN;
656}
657
658static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv)
659{
660	return *(u8 *) rocker_tlv_data(tlv);
661}
662
663static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv)
664{
665	return *(u16 *) rocker_tlv_data(tlv);
666}
667
668static __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv)
669{
670	return *(__be16 *) rocker_tlv_data(tlv);
671}
672
673static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv)
674{
675	return *(u32 *) rocker_tlv_data(tlv);
676}
677
678static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv)
679{
680	return *(u64 *) rocker_tlv_data(tlv);
681}
682
683static void rocker_tlv_parse(struct rocker_tlv **tb, int maxtype,
684			     const char *buf, int buf_len)
685{
686	const struct rocker_tlv *tlv;
687	const struct rocker_tlv *head = (const struct rocker_tlv *) buf;
688	int rem;
689
690	memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1));
691
692	rocker_tlv_for_each(tlv, head, buf_len, rem) {
693		u32 type = rocker_tlv_type(tlv);
694
695		if (type > 0 && type <= maxtype)
696			tb[type] = (struct rocker_tlv *) tlv;
697	}
698}
699
700static void rocker_tlv_parse_nested(struct rocker_tlv **tb, int maxtype,
701				    const struct rocker_tlv *tlv)
702{
703	rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv),
704			 rocker_tlv_len(tlv));
705}
706
707static void rocker_tlv_parse_desc(struct rocker_tlv **tb, int maxtype,
708				  struct rocker_desc_info *desc_info)
709{
710	rocker_tlv_parse(tb, maxtype, desc_info->data,
711			 desc_info->desc->tlv_size);
712}
713
714static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info)
715{
716	return (struct rocker_tlv *) ((char *) desc_info->data +
717					       desc_info->tlv_size);
718}
719
720static int rocker_tlv_put(struct rocker_desc_info *desc_info,
721			  int attrtype, int attrlen, const void *data)
722{
723	int tail_room = desc_info->data_size - desc_info->tlv_size;
724	int total_size = rocker_tlv_total_size(attrlen);
725	struct rocker_tlv *tlv;
726
727	if (unlikely(tail_room < total_size))
728		return -EMSGSIZE;
729
730	tlv = rocker_tlv_start(desc_info);
731	desc_info->tlv_size += total_size;
732	tlv->type = attrtype;
733	tlv->len = rocker_tlv_attr_size(attrlen);
734	memcpy(rocker_tlv_data(tlv), data, attrlen);
735	memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen));
736	return 0;
737}
738
739static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
740			     int attrtype, u8 value)
741{
742	return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value);
743}
744
745static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
746			      int attrtype, u16 value)
747{
748	return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value);
749}
750
751static int rocker_tlv_put_be16(struct rocker_desc_info *desc_info,
752			       int attrtype, __be16 value)
753{
754	return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value);
755}
756
757static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
758			      int attrtype, u32 value)
759{
760	return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value);
761}
762
763static int rocker_tlv_put_be32(struct rocker_desc_info *desc_info,
764			       int attrtype, __be32 value)
765{
766	return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value);
767}
768
769static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
770			      int attrtype, u64 value)
771{
772	return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value);
773}
774
775static struct rocker_tlv *
776rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype)
777{
778	struct rocker_tlv *start = rocker_tlv_start(desc_info);
779
780	if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0)
781		return NULL;
782
783	return start;
784}
785
786static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info,
787				struct rocker_tlv *start)
788{
789	start->len = (char *) rocker_tlv_start(desc_info) - (char *) start;
790}
791
792static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info,
793				   struct rocker_tlv *start)
794{
795	desc_info->tlv_size = (char *) start - desc_info->data;
796}
797
798/******************************************
799 * DMA rings and descriptors manipulations
800 ******************************************/
801
802static u32 __pos_inc(u32 pos, size_t limit)
803{
804	return ++pos == limit ? 0 : pos;
805}
806
807static int rocker_desc_err(struct rocker_desc_info *desc_info)
808{
809	int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
810
811	switch (err) {
812	case ROCKER_OK:
813		return 0;
814	case -ROCKER_ENOENT:
815		return -ENOENT;
816	case -ROCKER_ENXIO:
817		return -ENXIO;
818	case -ROCKER_ENOMEM:
819		return -ENOMEM;
820	case -ROCKER_EEXIST:
821		return -EEXIST;
822	case -ROCKER_EINVAL:
823		return -EINVAL;
824	case -ROCKER_EMSGSIZE:
825		return -EMSGSIZE;
826	case -ROCKER_ENOTSUP:
827		return -EOPNOTSUPP;
828	case -ROCKER_ENOBUFS:
829		return -ENOBUFS;
830	}
831
832	return -EINVAL;
833}
834
835static void rocker_desc_gen_clear(struct rocker_desc_info *desc_info)
836{
837	desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
838}
839
840static bool rocker_desc_gen(struct rocker_desc_info *desc_info)
841{
842	u32 comp_err = desc_info->desc->comp_err;
843
844	return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
845}
846
847static void *rocker_desc_cookie_ptr_get(struct rocker_desc_info *desc_info)
848{
849	return (void *)(uintptr_t)desc_info->desc->cookie;
850}
851
852static void rocker_desc_cookie_ptr_set(struct rocker_desc_info *desc_info,
853				       void *ptr)
854{
855	desc_info->desc->cookie = (uintptr_t) ptr;
856}
857
858static struct rocker_desc_info *
859rocker_desc_head_get(struct rocker_dma_ring_info *info)
860{
861	static struct rocker_desc_info *desc_info;
862	u32 head = __pos_inc(info->head, info->size);
863
864	desc_info = &info->desc_info[info->head];
865	if (head == info->tail)
866		return NULL; /* ring full */
867	desc_info->tlv_size = 0;
868	return desc_info;
869}
870
871static void rocker_desc_commit(struct rocker_desc_info *desc_info)
872{
873	desc_info->desc->buf_size = desc_info->data_size;
874	desc_info->desc->tlv_size = desc_info->tlv_size;
875}
876
877static void rocker_desc_head_set(struct rocker *rocker,
878				 struct rocker_dma_ring_info *info,
879				 struct rocker_desc_info *desc_info)
880{
881	u32 head = __pos_inc(info->head, info->size);
882
883	BUG_ON(head == info->tail);
884	rocker_desc_commit(desc_info);
885	info->head = head;
886	rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
887}
888
889static struct rocker_desc_info *
890rocker_desc_tail_get(struct rocker_dma_ring_info *info)
891{
892	static struct rocker_desc_info *desc_info;
893
894	if (info->tail == info->head)
895		return NULL; /* nothing to be done between head and tail */
896	desc_info = &info->desc_info[info->tail];
897	if (!rocker_desc_gen(desc_info))
898		return NULL; /* gen bit not set, desc is not ready yet */
899	info->tail = __pos_inc(info->tail, info->size);
900	desc_info->tlv_size = desc_info->desc->tlv_size;
901	return desc_info;
902}
903
904static void rocker_dma_ring_credits_set(struct rocker *rocker,
905					struct rocker_dma_ring_info *info,
906					u32 credits)
907{
908	if (credits)
909		rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
910}
911
912static unsigned long rocker_dma_ring_size_fix(size_t size)
913{
914	return max(ROCKER_DMA_SIZE_MIN,
915		   min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
916}
917
918static int rocker_dma_ring_create(struct rocker *rocker,
919				  unsigned int type,
920				  size_t size,
921				  struct rocker_dma_ring_info *info)
922{
923	int i;
924
925	BUG_ON(size != rocker_dma_ring_size_fix(size));
926	info->size = size;
927	info->type = type;
928	info->head = 0;
929	info->tail = 0;
930	info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
931				  GFP_KERNEL);
932	if (!info->desc_info)
933		return -ENOMEM;
934
935	info->desc = pci_alloc_consistent(rocker->pdev,
936					  info->size * sizeof(*info->desc),
937					  &info->mapaddr);
938	if (!info->desc) {
939		kfree(info->desc_info);
940		return -ENOMEM;
941	}
942
943	for (i = 0; i < info->size; i++)
944		info->desc_info[i].desc = &info->desc[i];
945
946	rocker_write32(rocker, DMA_DESC_CTRL(info->type),
947		       ROCKER_DMA_DESC_CTRL_RESET);
948	rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
949	rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
950
951	return 0;
952}
953
954static void rocker_dma_ring_destroy(struct rocker *rocker,
955				    struct rocker_dma_ring_info *info)
956{
957	rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
958
959	pci_free_consistent(rocker->pdev,
960			    info->size * sizeof(struct rocker_desc),
961			    info->desc, info->mapaddr);
962	kfree(info->desc_info);
963}
964
965static void rocker_dma_ring_pass_to_producer(struct rocker *rocker,
966					     struct rocker_dma_ring_info *info)
967{
968	int i;
969
970	BUG_ON(info->head || info->tail);
971
972	/* When ring is consumer, we need to advance head for each desc.
973	 * That tells hw that the desc is ready to be used by it.
974	 */
975	for (i = 0; i < info->size - 1; i++)
976		rocker_desc_head_set(rocker, info, &info->desc_info[i]);
977	rocker_desc_commit(&info->desc_info[i]);
978}
979
980static int rocker_dma_ring_bufs_alloc(struct rocker *rocker,
981				      struct rocker_dma_ring_info *info,
982				      int direction, size_t buf_size)
983{
984	struct pci_dev *pdev = rocker->pdev;
985	int i;
986	int err;
987
988	for (i = 0; i < info->size; i++) {
989		struct rocker_desc_info *desc_info = &info->desc_info[i];
990		struct rocker_desc *desc = &info->desc[i];
991		dma_addr_t dma_handle;
992		char *buf;
993
994		buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
995		if (!buf) {
996			err = -ENOMEM;
997			goto rollback;
998		}
999
1000		dma_handle = pci_map_single(pdev, buf, buf_size, direction);
1001		if (pci_dma_mapping_error(pdev, dma_handle)) {
1002			kfree(buf);
1003			err = -EIO;
1004			goto rollback;
1005		}
1006
1007		desc_info->data = buf;
1008		desc_info->data_size = buf_size;
1009		dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
1010
1011		desc->buf_addr = dma_handle;
1012		desc->buf_size = buf_size;
1013	}
1014	return 0;
1015
1016rollback:
1017	for (i--; i >= 0; i--) {
1018		struct rocker_desc_info *desc_info = &info->desc_info[i];
1019
1020		pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1021				 desc_info->data_size, direction);
1022		kfree(desc_info->data);
1023	}
1024	return err;
1025}
1026
1027static void rocker_dma_ring_bufs_free(struct rocker *rocker,
1028				      struct rocker_dma_ring_info *info,
1029				      int direction)
1030{
1031	struct pci_dev *pdev = rocker->pdev;
1032	int i;
1033
1034	for (i = 0; i < info->size; i++) {
1035		struct rocker_desc_info *desc_info = &info->desc_info[i];
1036		struct rocker_desc *desc = &info->desc[i];
1037
1038		desc->buf_addr = 0;
1039		desc->buf_size = 0;
1040		pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1041				 desc_info->data_size, direction);
1042		kfree(desc_info->data);
1043	}
1044}
1045
1046static int rocker_dma_rings_init(struct rocker *rocker)
1047{
1048	struct pci_dev *pdev = rocker->pdev;
1049	int err;
1050
1051	err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
1052				     ROCKER_DMA_CMD_DEFAULT_SIZE,
1053				     &rocker->cmd_ring);
1054	if (err) {
1055		dev_err(&pdev->dev, "failed to create command dma ring\n");
1056		return err;
1057	}
1058
1059	spin_lock_init(&rocker->cmd_ring_lock);
1060
1061	err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
1062					 PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
1063	if (err) {
1064		dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
1065		goto err_dma_cmd_ring_bufs_alloc;
1066	}
1067
1068	err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
1069				     ROCKER_DMA_EVENT_DEFAULT_SIZE,
1070				     &rocker->event_ring);
1071	if (err) {
1072		dev_err(&pdev->dev, "failed to create event dma ring\n");
1073		goto err_dma_event_ring_create;
1074	}
1075
1076	err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
1077					 PCI_DMA_FROMDEVICE, PAGE_SIZE);
1078	if (err) {
1079		dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
1080		goto err_dma_event_ring_bufs_alloc;
1081	}
1082	rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
1083	return 0;
1084
1085err_dma_event_ring_bufs_alloc:
1086	rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1087err_dma_event_ring_create:
1088	rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1089				  PCI_DMA_BIDIRECTIONAL);
1090err_dma_cmd_ring_bufs_alloc:
1091	rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1092	return err;
1093}
1094
1095static void rocker_dma_rings_fini(struct rocker *rocker)
1096{
1097	rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
1098				  PCI_DMA_BIDIRECTIONAL);
1099	rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1100	rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1101				  PCI_DMA_BIDIRECTIONAL);
1102	rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1103}
1104
1105static int rocker_dma_rx_ring_skb_map(struct rocker *rocker,
1106				      struct rocker_port *rocker_port,
1107				      struct rocker_desc_info *desc_info,
1108				      struct sk_buff *skb, size_t buf_len)
1109{
1110	struct pci_dev *pdev = rocker->pdev;
1111	dma_addr_t dma_handle;
1112
1113	dma_handle = pci_map_single(pdev, skb->data, buf_len,
1114				    PCI_DMA_FROMDEVICE);
1115	if (pci_dma_mapping_error(pdev, dma_handle))
1116		return -EIO;
1117	if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
1118		goto tlv_put_failure;
1119	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
1120		goto tlv_put_failure;
1121	return 0;
1122
1123tlv_put_failure:
1124	pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
1125	desc_info->tlv_size = 0;
1126	return -EMSGSIZE;
1127}
1128
1129static size_t rocker_port_rx_buf_len(struct rocker_port *rocker_port)
1130{
1131	return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1132}
1133
1134static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker,
1135					struct rocker_port *rocker_port,
1136					struct rocker_desc_info *desc_info)
1137{
1138	struct net_device *dev = rocker_port->dev;
1139	struct sk_buff *skb;
1140	size_t buf_len = rocker_port_rx_buf_len(rocker_port);
1141	int err;
1142
1143	/* Ensure that hw will see tlv_size zero in case of an error.
1144	 * That tells hw to use another descriptor.
1145	 */
1146	rocker_desc_cookie_ptr_set(desc_info, NULL);
1147	desc_info->tlv_size = 0;
1148
1149	skb = netdev_alloc_skb_ip_align(dev, buf_len);
1150	if (!skb)
1151		return -ENOMEM;
1152	err = rocker_dma_rx_ring_skb_map(rocker, rocker_port, desc_info,
1153					 skb, buf_len);
1154	if (err) {
1155		dev_kfree_skb_any(skb);
1156		return err;
1157	}
1158	rocker_desc_cookie_ptr_set(desc_info, skb);
1159	return 0;
1160}
1161
1162static void rocker_dma_rx_ring_skb_unmap(struct rocker *rocker,
1163					 struct rocker_tlv **attrs)
1164{
1165	struct pci_dev *pdev = rocker->pdev;
1166	dma_addr_t dma_handle;
1167	size_t len;
1168
1169	if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
1170	    !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
1171		return;
1172	dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
1173	len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
1174	pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
1175}
1176
1177static void rocker_dma_rx_ring_skb_free(struct rocker *rocker,
1178					struct rocker_desc_info *desc_info)
1179{
1180	struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
1181	struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
1182
1183	if (!skb)
1184		return;
1185	rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
1186	rocker_dma_rx_ring_skb_unmap(rocker, attrs);
1187	dev_kfree_skb_any(skb);
1188}
1189
1190static int rocker_dma_rx_ring_skbs_alloc(struct rocker *rocker,
1191					 struct rocker_port *rocker_port)
1192{
1193	struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1194	int i;
1195	int err;
1196
1197	for (i = 0; i < rx_ring->size; i++) {
1198		err = rocker_dma_rx_ring_skb_alloc(rocker, rocker_port,
1199						   &rx_ring->desc_info[i]);
1200		if (err)
1201			goto rollback;
1202	}
1203	return 0;
1204
1205rollback:
1206	for (i--; i >= 0; i--)
1207		rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1208	return err;
1209}
1210
1211static void rocker_dma_rx_ring_skbs_free(struct rocker *rocker,
1212					 struct rocker_port *rocker_port)
1213{
1214	struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1215	int i;
1216
1217	for (i = 0; i < rx_ring->size; i++)
1218		rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1219}
1220
1221static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1222{
1223	struct rocker *rocker = rocker_port->rocker;
1224	int err;
1225
1226	err = rocker_dma_ring_create(rocker,
1227				     ROCKER_DMA_TX(rocker_port->port_number),
1228				     ROCKER_DMA_TX_DEFAULT_SIZE,
1229				     &rocker_port->tx_ring);
1230	if (err) {
1231		netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1232		return err;
1233	}
1234
1235	err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1236					 PCI_DMA_TODEVICE,
1237					 ROCKER_DMA_TX_DESC_SIZE);
1238	if (err) {
1239		netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1240		goto err_dma_tx_ring_bufs_alloc;
1241	}
1242
1243	err = rocker_dma_ring_create(rocker,
1244				     ROCKER_DMA_RX(rocker_port->port_number),
1245				     ROCKER_DMA_RX_DEFAULT_SIZE,
1246				     &rocker_port->rx_ring);
1247	if (err) {
1248		netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1249		goto err_dma_rx_ring_create;
1250	}
1251
1252	err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1253					 PCI_DMA_BIDIRECTIONAL,
1254					 ROCKER_DMA_RX_DESC_SIZE);
1255	if (err) {
1256		netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1257		goto err_dma_rx_ring_bufs_alloc;
1258	}
1259
1260	err = rocker_dma_rx_ring_skbs_alloc(rocker, rocker_port);
1261	if (err) {
1262		netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1263		goto err_dma_rx_ring_skbs_alloc;
1264	}
1265	rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1266
1267	return 0;
1268
1269err_dma_rx_ring_skbs_alloc:
1270	rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1271				  PCI_DMA_BIDIRECTIONAL);
1272err_dma_rx_ring_bufs_alloc:
1273	rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1274err_dma_rx_ring_create:
1275	rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1276				  PCI_DMA_TODEVICE);
1277err_dma_tx_ring_bufs_alloc:
1278	rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1279	return err;
1280}
1281
1282static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1283{
1284	struct rocker *rocker = rocker_port->rocker;
1285
1286	rocker_dma_rx_ring_skbs_free(rocker, rocker_port);
1287	rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1288				  PCI_DMA_BIDIRECTIONAL);
1289	rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1290	rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1291				  PCI_DMA_TODEVICE);
1292	rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1293}
1294
1295static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
1296{
1297	u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1298
1299	if (enable)
1300		val |= 1ULL << rocker_port->pport;
1301	else
1302		val &= ~(1ULL << rocker_port->pport);
1303	rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1304}
1305
1306/********************************
1307 * Interrupt handler and helpers
1308 ********************************/
1309
1310static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1311{
1312	struct rocker *rocker = dev_id;
1313	struct rocker_desc_info *desc_info;
1314	struct rocker_wait *wait;
1315	u32 credits = 0;
1316
1317	spin_lock(&rocker->cmd_ring_lock);
1318	while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1319		wait = rocker_desc_cookie_ptr_get(desc_info);
1320		if (wait->nowait) {
1321			rocker_desc_gen_clear(desc_info);
1322			rocker_wait_destroy(wait);
1323		} else {
1324			rocker_wait_wake_up(wait);
1325		}
1326		credits++;
1327	}
1328	spin_unlock(&rocker->cmd_ring_lock);
1329	rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1330
1331	return IRQ_HANDLED;
1332}
1333
1334static void rocker_port_link_up(struct rocker_port *rocker_port)
1335{
1336	netif_carrier_on(rocker_port->dev);
1337	netdev_info(rocker_port->dev, "Link is up\n");
1338}
1339
1340static void rocker_port_link_down(struct rocker_port *rocker_port)
1341{
1342	netif_carrier_off(rocker_port->dev);
1343	netdev_info(rocker_port->dev, "Link is down\n");
1344}
1345
1346static int rocker_event_link_change(struct rocker *rocker,
1347				    const struct rocker_tlv *info)
1348{
1349	struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
1350	unsigned int port_number;
1351	bool link_up;
1352	struct rocker_port *rocker_port;
1353
1354	rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
1355	if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
1356	    !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1357		return -EIO;
1358	port_number =
1359		rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
1360	link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1361
1362	if (port_number >= rocker->port_count)
1363		return -EINVAL;
1364
1365	rocker_port = rocker->ports[port_number];
1366	if (netif_carrier_ok(rocker_port->dev) != link_up) {
1367		if (link_up)
1368			rocker_port_link_up(rocker_port);
1369		else
1370			rocker_port_link_down(rocker_port);
1371	}
1372
1373	return 0;
1374}
1375
1376#define ROCKER_OP_FLAG_REMOVE		BIT(0)
1377#define ROCKER_OP_FLAG_NOWAIT		BIT(1)
1378#define ROCKER_OP_FLAG_LEARNED		BIT(2)
1379#define ROCKER_OP_FLAG_REFRESH		BIT(3)
1380
1381static int rocker_port_fdb(struct rocker_port *rocker_port,
1382			   const unsigned char *addr,
1383			   __be16 vlan_id, int flags);
1384
1385static int rocker_event_mac_vlan_seen(struct rocker *rocker,
1386				      const struct rocker_tlv *info)
1387{
1388	struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
1389	unsigned int port_number;
1390	struct rocker_port *rocker_port;
1391	unsigned char *addr;
1392	int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
1393	__be16 vlan_id;
1394
1395	rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
1396	if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
1397	    !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1398	    !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1399		return -EIO;
1400	port_number =
1401		rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
1402	addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
1403	vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
1404
1405	if (port_number >= rocker->port_count)
1406		return -EINVAL;
1407
1408	rocker_port = rocker->ports[port_number];
1409
1410	if (rocker_port->stp_state != BR_STATE_LEARNING &&
1411	    rocker_port->stp_state != BR_STATE_FORWARDING)
1412		return 0;
1413
1414	return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
1415}
1416
1417static int rocker_event_process(struct rocker *rocker,
1418				struct rocker_desc_info *desc_info)
1419{
1420	struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1421	struct rocker_tlv *info;
1422	u16 type;
1423
1424	rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1425	if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1426	    !attrs[ROCKER_TLV_EVENT_INFO])
1427		return -EIO;
1428
1429	type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1430	info = attrs[ROCKER_TLV_EVENT_INFO];
1431
1432	switch (type) {
1433	case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1434		return rocker_event_link_change(rocker, info);
1435	case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1436		return rocker_event_mac_vlan_seen(rocker, info);
1437	}
1438
1439	return -EOPNOTSUPP;
1440}
1441
1442static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1443{
1444	struct rocker *rocker = dev_id;
1445	struct pci_dev *pdev = rocker->pdev;
1446	struct rocker_desc_info *desc_info;
1447	u32 credits = 0;
1448	int err;
1449
1450	while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1451		err = rocker_desc_err(desc_info);
1452		if (err) {
1453			dev_err(&pdev->dev, "event desc received with err %d\n",
1454				err);
1455		} else {
1456			err = rocker_event_process(rocker, desc_info);
1457			if (err)
1458				dev_err(&pdev->dev, "event processing failed with err %d\n",
1459					err);
1460		}
1461		rocker_desc_gen_clear(desc_info);
1462		rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1463		credits++;
1464	}
1465	rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1466
1467	return IRQ_HANDLED;
1468}
1469
1470static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1471{
1472	struct rocker_port *rocker_port = dev_id;
1473
1474	napi_schedule(&rocker_port->napi_tx);
1475	return IRQ_HANDLED;
1476}
1477
1478static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1479{
1480	struct rocker_port *rocker_port = dev_id;
1481
1482	napi_schedule(&rocker_port->napi_rx);
1483	return IRQ_HANDLED;
1484}
1485
1486/********************
1487 * Command interface
1488 ********************/
1489
1490typedef int (*rocker_cmd_cb_t)(struct rocker *rocker,
1491			       struct rocker_port *rocker_port,
1492			       struct rocker_desc_info *desc_info,
1493			       void *priv);
1494
1495static int rocker_cmd_exec(struct rocker *rocker,
1496			   struct rocker_port *rocker_port,
1497			   rocker_cmd_cb_t prepare, void *prepare_priv,
1498			   rocker_cmd_cb_t process, void *process_priv,
1499			   bool nowait)
1500{
1501	struct rocker_desc_info *desc_info;
1502	struct rocker_wait *wait;
1503	unsigned long flags;
1504	int err;
1505
1506	wait = rocker_wait_create(nowait ? GFP_ATOMIC : GFP_KERNEL);
1507	if (!wait)
1508		return -ENOMEM;
1509	wait->nowait = nowait;
1510
1511	spin_lock_irqsave(&rocker->cmd_ring_lock, flags);
1512	desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1513	if (!desc_info) {
1514		spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
1515		err = -EAGAIN;
1516		goto out;
1517	}
1518	err = prepare(rocker, rocker_port, desc_info, prepare_priv);
1519	if (err) {
1520		spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
1521		goto out;
1522	}
1523	rocker_desc_cookie_ptr_set(desc_info, wait);
1524	rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
1525	spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
1526
1527	if (nowait)
1528		return 0;
1529
1530	if (!rocker_wait_event_timeout(wait, HZ / 10))
1531		return -EIO;
1532
1533	err = rocker_desc_err(desc_info);
1534	if (err)
1535		return err;
1536
1537	if (process)
1538		err = process(rocker, rocker_port, desc_info, process_priv);
1539
1540	rocker_desc_gen_clear(desc_info);
1541out:
1542	rocker_wait_destroy(wait);
1543	return err;
1544}
1545
1546static int
1547rocker_cmd_get_port_settings_prep(struct rocker *rocker,
1548				  struct rocker_port *rocker_port,
1549				  struct rocker_desc_info *desc_info,
1550				  void *priv)
1551{
1552	struct rocker_tlv *cmd_info;
1553
1554	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1555			       ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1556		return -EMSGSIZE;
1557	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1558	if (!cmd_info)
1559		return -EMSGSIZE;
1560	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1561			       rocker_port->pport))
1562		return -EMSGSIZE;
1563	rocker_tlv_nest_end(desc_info, cmd_info);
1564	return 0;
1565}
1566
1567static int
1568rocker_cmd_get_port_settings_ethtool_proc(struct rocker *rocker,
1569					  struct rocker_port *rocker_port,
1570					  struct rocker_desc_info *desc_info,
1571					  void *priv)
1572{
1573	struct ethtool_cmd *ecmd = priv;
1574	struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1575	struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1576	u32 speed;
1577	u8 duplex;
1578	u8 autoneg;
1579
1580	rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1581	if (!attrs[ROCKER_TLV_CMD_INFO])
1582		return -EIO;
1583
1584	rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1585				attrs[ROCKER_TLV_CMD_INFO]);
1586	if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1587	    !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1588	    !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1589		return -EIO;
1590
1591	speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1592	duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1593	autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1594
1595	ecmd->transceiver = XCVR_INTERNAL;
1596	ecmd->supported = SUPPORTED_TP;
1597	ecmd->phy_address = 0xff;
1598	ecmd->port = PORT_TP;
1599	ethtool_cmd_speed_set(ecmd, speed);
1600	ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1601	ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1602
1603	return 0;
1604}
1605
1606static int
1607rocker_cmd_get_port_settings_macaddr_proc(struct rocker *rocker,
1608					  struct rocker_port *rocker_port,
1609					  struct rocker_desc_info *desc_info,
1610					  void *priv)
1611{
1612	unsigned char *macaddr = priv;
1613	struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1614	struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1615	struct rocker_tlv *attr;
1616
1617	rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1618	if (!attrs[ROCKER_TLV_CMD_INFO])
1619		return -EIO;
1620
1621	rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1622				attrs[ROCKER_TLV_CMD_INFO]);
1623	attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1624	if (!attr)
1625		return -EIO;
1626
1627	if (rocker_tlv_len(attr) != ETH_ALEN)
1628		return -EINVAL;
1629
1630	ether_addr_copy(macaddr, rocker_tlv_data(attr));
1631	return 0;
1632}
1633
1634struct port_name {
1635	char *buf;
1636	size_t len;
1637};
1638
1639static int
1640rocker_cmd_get_port_settings_phys_name_proc(struct rocker *rocker,
1641					    struct rocker_port *rocker_port,
1642					    struct rocker_desc_info *desc_info,
1643					    void *priv)
1644{
1645	struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1646	struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1647	struct port_name *name = priv;
1648	struct rocker_tlv *attr;
1649	size_t i, j, len;
1650	char *str;
1651
1652	rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1653	if (!attrs[ROCKER_TLV_CMD_INFO])
1654		return -EIO;
1655
1656	rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1657				attrs[ROCKER_TLV_CMD_INFO]);
1658	attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
1659	if (!attr)
1660		return -EIO;
1661
1662	len = min_t(size_t, rocker_tlv_len(attr), name->len);
1663	str = rocker_tlv_data(attr);
1664
1665	/* make sure name only contains alphanumeric characters */
1666	for (i = j = 0; i < len; ++i) {
1667		if (isalnum(str[i])) {
1668			name->buf[j] = str[i];
1669			j++;
1670		}
1671	}
1672
1673	if (j == 0)
1674		return -EIO;
1675
1676	name->buf[j] = '\0';
1677
1678	return 0;
1679}
1680
1681static int
1682rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker,
1683					  struct rocker_port *rocker_port,
1684					  struct rocker_desc_info *desc_info,
1685					  void *priv)
1686{
1687	struct ethtool_cmd *ecmd = priv;
1688	struct rocker_tlv *cmd_info;
1689
1690	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1691			       ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1692		return -EMSGSIZE;
1693	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1694	if (!cmd_info)
1695		return -EMSGSIZE;
1696	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1697			       rocker_port->pport))
1698		return -EMSGSIZE;
1699	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1700			       ethtool_cmd_speed(ecmd)))
1701		return -EMSGSIZE;
1702	if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1703			      ecmd->duplex))
1704		return -EMSGSIZE;
1705	if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1706			      ecmd->autoneg))
1707		return -EMSGSIZE;
1708	rocker_tlv_nest_end(desc_info, cmd_info);
1709	return 0;
1710}
1711
1712static int
1713rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker,
1714					  struct rocker_port *rocker_port,
1715					  struct rocker_desc_info *desc_info,
1716					  void *priv)
1717{
1718	unsigned char *macaddr = priv;
1719	struct rocker_tlv *cmd_info;
1720
1721	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1722			       ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1723		return -EMSGSIZE;
1724	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1725	if (!cmd_info)
1726		return -EMSGSIZE;
1727	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1728			       rocker_port->pport))
1729		return -EMSGSIZE;
1730	if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1731			   ETH_ALEN, macaddr))
1732		return -EMSGSIZE;
1733	rocker_tlv_nest_end(desc_info, cmd_info);
1734	return 0;
1735}
1736
1737static int
1738rocker_cmd_set_port_learning_prep(struct rocker *rocker,
1739				  struct rocker_port *rocker_port,
1740				  struct rocker_desc_info *desc_info,
1741				  void *priv)
1742{
1743	struct rocker_tlv *cmd_info;
1744
1745	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1746			       ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1747		return -EMSGSIZE;
1748	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1749	if (!cmd_info)
1750		return -EMSGSIZE;
1751	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1752			       rocker_port->pport))
1753		return -EMSGSIZE;
1754	if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
1755			      !!(rocker_port->brport_flags & BR_LEARNING)))
1756		return -EMSGSIZE;
1757	rocker_tlv_nest_end(desc_info, cmd_info);
1758	return 0;
1759}
1760
1761static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1762						struct ethtool_cmd *ecmd)
1763{
1764	return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1765			       rocker_cmd_get_port_settings_prep, NULL,
1766			       rocker_cmd_get_port_settings_ethtool_proc,
1767			       ecmd, false);
1768}
1769
1770static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1771						unsigned char *macaddr)
1772{
1773	return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1774			       rocker_cmd_get_port_settings_prep, NULL,
1775			       rocker_cmd_get_port_settings_macaddr_proc,
1776			       macaddr, false);
1777}
1778
1779static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1780						struct ethtool_cmd *ecmd)
1781{
1782	return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1783			       rocker_cmd_set_port_settings_ethtool_prep,
1784			       ecmd, NULL, NULL, false);
1785}
1786
1787static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1788						unsigned char *macaddr)
1789{
1790	return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1791			       rocker_cmd_set_port_settings_macaddr_prep,
1792			       macaddr, NULL, NULL, false);
1793}
1794
1795static int rocker_port_set_learning(struct rocker_port *rocker_port)
1796{
1797	return rocker_cmd_exec(rocker_port->rocker, rocker_port,
1798			       rocker_cmd_set_port_learning_prep,
1799			       NULL, NULL, NULL, false);
1800}
1801
1802static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
1803					   struct rocker_flow_tbl_entry *entry)
1804{
1805	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1806			       entry->key.ig_port.in_pport))
1807		return -EMSGSIZE;
1808	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1809			       entry->key.ig_port.in_pport_mask))
1810		return -EMSGSIZE;
1811	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1812			       entry->key.ig_port.goto_tbl))
1813		return -EMSGSIZE;
1814
1815	return 0;
1816}
1817
1818static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
1819					struct rocker_flow_tbl_entry *entry)
1820{
1821	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1822			       entry->key.vlan.in_pport))
1823		return -EMSGSIZE;
1824	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1825				entry->key.vlan.vlan_id))
1826		return -EMSGSIZE;
1827	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1828				entry->key.vlan.vlan_id_mask))
1829		return -EMSGSIZE;
1830	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1831			       entry->key.vlan.goto_tbl))
1832		return -EMSGSIZE;
1833	if (entry->key.vlan.untagged &&
1834	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
1835				entry->key.vlan.new_vlan_id))
1836		return -EMSGSIZE;
1837
1838	return 0;
1839}
1840
1841static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
1842					    struct rocker_flow_tbl_entry *entry)
1843{
1844	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1845			       entry->key.term_mac.in_pport))
1846		return -EMSGSIZE;
1847	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1848			       entry->key.term_mac.in_pport_mask))
1849		return -EMSGSIZE;
1850	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1851				entry->key.term_mac.eth_type))
1852		return -EMSGSIZE;
1853	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1854			   ETH_ALEN, entry->key.term_mac.eth_dst))
1855		return -EMSGSIZE;
1856	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1857			   ETH_ALEN, entry->key.term_mac.eth_dst_mask))
1858		return -EMSGSIZE;
1859	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1860				entry->key.term_mac.vlan_id))
1861		return -EMSGSIZE;
1862	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1863				entry->key.term_mac.vlan_id_mask))
1864		return -EMSGSIZE;
1865	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1866			       entry->key.term_mac.goto_tbl))
1867		return -EMSGSIZE;
1868	if (entry->key.term_mac.copy_to_cpu &&
1869	    rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1870			      entry->key.term_mac.copy_to_cpu))
1871		return -EMSGSIZE;
1872
1873	return 0;
1874}
1875
1876static int
1877rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
1878				      struct rocker_flow_tbl_entry *entry)
1879{
1880	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1881				entry->key.ucast_routing.eth_type))
1882		return -EMSGSIZE;
1883	if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
1884				entry->key.ucast_routing.dst4))
1885		return -EMSGSIZE;
1886	if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
1887				entry->key.ucast_routing.dst4_mask))
1888		return -EMSGSIZE;
1889	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1890			       entry->key.ucast_routing.goto_tbl))
1891		return -EMSGSIZE;
1892	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1893			       entry->key.ucast_routing.group_id))
1894		return -EMSGSIZE;
1895
1896	return 0;
1897}
1898
1899static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
1900					  struct rocker_flow_tbl_entry *entry)
1901{
1902	if (entry->key.bridge.has_eth_dst &&
1903	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1904			   ETH_ALEN, entry->key.bridge.eth_dst))
1905		return -EMSGSIZE;
1906	if (entry->key.bridge.has_eth_dst_mask &&
1907	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1908			   ETH_ALEN, entry->key.bridge.eth_dst_mask))
1909		return -EMSGSIZE;
1910	if (entry->key.bridge.vlan_id &&
1911	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1912				entry->key.bridge.vlan_id))
1913		return -EMSGSIZE;
1914	if (entry->key.bridge.tunnel_id &&
1915	    rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
1916			       entry->key.bridge.tunnel_id))
1917		return -EMSGSIZE;
1918	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1919			       entry->key.bridge.goto_tbl))
1920		return -EMSGSIZE;
1921	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1922			       entry->key.bridge.group_id))
1923		return -EMSGSIZE;
1924	if (entry->key.bridge.copy_to_cpu &&
1925	    rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1926			      entry->key.bridge.copy_to_cpu))
1927		return -EMSGSIZE;
1928
1929	return 0;
1930}
1931
1932static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
1933				       struct rocker_flow_tbl_entry *entry)
1934{
1935	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1936			       entry->key.acl.in_pport))
1937		return -EMSGSIZE;
1938	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1939			       entry->key.acl.in_pport_mask))
1940		return -EMSGSIZE;
1941	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
1942			   ETH_ALEN, entry->key.acl.eth_src))
1943		return -EMSGSIZE;
1944	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
1945			   ETH_ALEN, entry->key.acl.eth_src_mask))
1946		return -EMSGSIZE;
1947	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1948			   ETH_ALEN, entry->key.acl.eth_dst))
1949		return -EMSGSIZE;
1950	if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1951			   ETH_ALEN, entry->key.acl.eth_dst_mask))
1952		return -EMSGSIZE;
1953	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1954				entry->key.acl.eth_type))
1955		return -EMSGSIZE;
1956	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1957				entry->key.acl.vlan_id))
1958		return -EMSGSIZE;
1959	if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1960				entry->key.acl.vlan_id_mask))
1961		return -EMSGSIZE;
1962
1963	switch (ntohs(entry->key.acl.eth_type)) {
1964	case ETH_P_IP:
1965	case ETH_P_IPV6:
1966		if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
1967				      entry->key.acl.ip_proto))
1968			return -EMSGSIZE;
1969		if (rocker_tlv_put_u8(desc_info,
1970				      ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
1971				      entry->key.acl.ip_proto_mask))
1972			return -EMSGSIZE;
1973		if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
1974				      entry->key.acl.ip_tos & 0x3f))
1975			return -EMSGSIZE;
1976		if (rocker_tlv_put_u8(desc_info,
1977				      ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
1978				      entry->key.acl.ip_tos_mask & 0x3f))
1979			return -EMSGSIZE;
1980		if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
1981				      (entry->key.acl.ip_tos & 0xc0) >> 6))
1982			return -EMSGSIZE;
1983		if (rocker_tlv_put_u8(desc_info,
1984				      ROCKER_TLV_OF_DPA_IP_ECN_MASK,
1985				      (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
1986			return -EMSGSIZE;
1987		break;
1988	}
1989
1990	if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
1991	    rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
1992			       entry->key.acl.group_id))
1993		return -EMSGSIZE;
1994
1995	return 0;
1996}
1997
1998static int rocker_cmd_flow_tbl_add(struct rocker *rocker,
1999				   struct rocker_port *rocker_port,
2000				   struct rocker_desc_info *desc_info,
2001				   void *priv)
2002{
2003	struct rocker_flow_tbl_entry *entry = priv;
2004	struct rocker_tlv *cmd_info;
2005	int err = 0;
2006
2007	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2008		return -EMSGSIZE;
2009	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2010	if (!cmd_info)
2011		return -EMSGSIZE;
2012	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
2013			       entry->key.tbl_id))
2014		return -EMSGSIZE;
2015	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
2016			       entry->key.priority))
2017		return -EMSGSIZE;
2018	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
2019		return -EMSGSIZE;
2020	if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2021			       entry->cookie))
2022		return -EMSGSIZE;
2023
2024	switch (entry->key.tbl_id) {
2025	case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
2026		err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
2027		break;
2028	case ROCKER_OF_DPA_TABLE_ID_VLAN:
2029		err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
2030		break;
2031	case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
2032		err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
2033		break;
2034	case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
2035		err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
2036		break;
2037	case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
2038		err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
2039		break;
2040	case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
2041		err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
2042		break;
2043	default:
2044		err = -ENOTSUPP;
2045		break;
2046	}
2047
2048	if (err)
2049		return err;
2050
2051	rocker_tlv_nest_end(desc_info, cmd_info);
2052
2053	return 0;
2054}
2055
2056static int rocker_cmd_flow_tbl_del(struct rocker *rocker,
2057				   struct rocker_port *rocker_port,
2058				   struct rocker_desc_info *desc_info,
2059				   void *priv)
2060{
2061	const struct rocker_flow_tbl_entry *entry = priv;
2062	struct rocker_tlv *cmd_info;
2063
2064	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2065		return -EMSGSIZE;
2066	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2067	if (!cmd_info)
2068		return -EMSGSIZE;
2069	if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2070			       entry->cookie))
2071		return -EMSGSIZE;
2072	rocker_tlv_nest_end(desc_info, cmd_info);
2073
2074	return 0;
2075}
2076
2077static int
2078rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
2079				      struct rocker_group_tbl_entry *entry)
2080{
2081	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
2082			       ROCKER_GROUP_PORT_GET(entry->group_id)))
2083		return -EMSGSIZE;
2084	if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
2085			      entry->l2_interface.pop_vlan))
2086		return -EMSGSIZE;
2087
2088	return 0;
2089}
2090
2091static int
2092rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
2093				    struct rocker_group_tbl_entry *entry)
2094{
2095	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2096			       entry->l2_rewrite.group_id))
2097		return -EMSGSIZE;
2098	if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
2099	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2100			   ETH_ALEN, entry->l2_rewrite.eth_src))
2101		return -EMSGSIZE;
2102	if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
2103	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2104			   ETH_ALEN, entry->l2_rewrite.eth_dst))
2105		return -EMSGSIZE;
2106	if (entry->l2_rewrite.vlan_id &&
2107	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2108				entry->l2_rewrite.vlan_id))
2109		return -EMSGSIZE;
2110
2111	return 0;
2112}
2113
2114static int
2115rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
2116				   struct rocker_group_tbl_entry *entry)
2117{
2118	int i;
2119	struct rocker_tlv *group_ids;
2120
2121	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
2122			       entry->group_count))
2123		return -EMSGSIZE;
2124
2125	group_ids = rocker_tlv_nest_start(desc_info,
2126					  ROCKER_TLV_OF_DPA_GROUP_IDS);
2127	if (!group_ids)
2128		return -EMSGSIZE;
2129
2130	for (i = 0; i < entry->group_count; i++)
2131		/* Note TLV array is 1-based */
2132		if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
2133			return -EMSGSIZE;
2134
2135	rocker_tlv_nest_end(desc_info, group_ids);
2136
2137	return 0;
2138}
2139
2140static int
2141rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
2142				    struct rocker_group_tbl_entry *entry)
2143{
2144	if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2145	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2146			   ETH_ALEN, entry->l3_unicast.eth_src))
2147		return -EMSGSIZE;
2148	if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2149	    rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2150			   ETH_ALEN, entry->l3_unicast.eth_dst))
2151		return -EMSGSIZE;
2152	if (entry->l3_unicast.vlan_id &&
2153	    rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2154				entry->l3_unicast.vlan_id))
2155		return -EMSGSIZE;
2156	if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2157			      entry->l3_unicast.ttl_check))
2158		return -EMSGSIZE;
2159	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2160			       entry->l3_unicast.group_id))
2161		return -EMSGSIZE;
2162
2163	return 0;
2164}
2165
2166static int rocker_cmd_group_tbl_add(struct rocker *rocker,
2167				    struct rocker_port *rocker_port,
2168				    struct rocker_desc_info *desc_info,
2169				    void *priv)
2170{
2171	struct rocker_group_tbl_entry *entry = priv;
2172	struct rocker_tlv *cmd_info;
2173	int err = 0;
2174
2175	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2176		return -EMSGSIZE;
2177	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2178	if (!cmd_info)
2179		return -EMSGSIZE;
2180
2181	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2182			       entry->group_id))
2183		return -EMSGSIZE;
2184
2185	switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2186	case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2187		err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2188		break;
2189	case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2190		err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2191		break;
2192	case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2193	case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2194		err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2195		break;
2196	case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2197		err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2198		break;
2199	default:
2200		err = -ENOTSUPP;
2201		break;
2202	}
2203
2204	if (err)
2205		return err;
2206
2207	rocker_tlv_nest_end(desc_info, cmd_info);
2208
2209	return 0;
2210}
2211
2212static int rocker_cmd_group_tbl_del(struct rocker *rocker,
2213				    struct rocker_port *rocker_port,
2214				    struct rocker_desc_info *desc_info,
2215				    void *priv)
2216{
2217	const struct rocker_group_tbl_entry *entry = priv;
2218	struct rocker_tlv *cmd_info;
2219
2220	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2221		return -EMSGSIZE;
2222	cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2223	if (!cmd_info)
2224		return -EMSGSIZE;
2225	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2226			       entry->group_id))
2227		return -EMSGSIZE;
2228	rocker_tlv_nest_end(desc_info, cmd_info);
2229
2230	return 0;
2231}
2232
2233/***************************************************
2234 * Flow, group, FDB, internal VLAN and neigh tables
2235 ***************************************************/
2236
2237static int rocker_init_tbls(struct rocker *rocker)
2238{
2239	hash_init(rocker->flow_tbl);
2240	spin_lock_init(&rocker->flow_tbl_lock);
2241
2242	hash_init(rocker->group_tbl);
2243	spin_lock_init(&rocker->group_tbl_lock);
2244
2245	hash_init(rocker->fdb_tbl);
2246	spin_lock_init(&rocker->fdb_tbl_lock);
2247
2248	hash_init(rocker->internal_vlan_tbl);
2249	spin_lock_init(&rocker->internal_vlan_tbl_lock);
2250
2251	hash_init(rocker->neigh_tbl);
2252	spin_lock_init(&rocker->neigh_tbl_lock);
2253
2254	return 0;
2255}
2256
2257static void rocker_free_tbls(struct rocker *rocker)
2258{
2259	unsigned long flags;
2260	struct rocker_flow_tbl_entry *flow_entry;
2261	struct rocker_group_tbl_entry *group_entry;
2262	struct rocker_fdb_tbl_entry *fdb_entry;
2263	struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
2264	struct rocker_neigh_tbl_entry *neigh_entry;
2265	struct hlist_node *tmp;
2266	int bkt;
2267
2268	spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2269	hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2270		hash_del(&flow_entry->entry);
2271	spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2272
2273	spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2274	hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2275		hash_del(&group_entry->entry);
2276	spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2277
2278	spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2279	hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2280		hash_del(&fdb_entry->entry);
2281	spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2282
2283	spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2284	hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2285			   tmp, internal_vlan_entry, entry)
2286		hash_del(&internal_vlan_entry->entry);
2287	spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
2288
2289	spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
2290	hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
2291		hash_del(&neigh_entry->entry);
2292	spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
2293}
2294
2295static struct rocker_flow_tbl_entry *
2296rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match)
2297{
2298	struct rocker_flow_tbl_entry *found;
2299	size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2300
2301	hash_for_each_possible(rocker->flow_tbl, found,
2302			       entry, match->key_crc32) {
2303		if (memcmp(&found->key, &match->key, key_len) == 0)
2304			return found;
2305	}
2306
2307	return NULL;
2308}
2309
2310static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
2311			       struct rocker_flow_tbl_entry *match,
2312			       bool nowait)
2313{
2314	struct rocker *rocker = rocker_port->rocker;
2315	struct rocker_flow_tbl_entry *found;
2316	size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2317	unsigned long flags;
2318
2319	match->key_crc32 = crc32(~0, &match->key, key_len);
2320
2321	spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2322
2323	found = rocker_flow_tbl_find(rocker, match);
2324
2325	if (found) {
2326		match->cookie = found->cookie;
2327		hash_del(&found->entry);
2328		kfree(found);
2329		found = match;
2330		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
2331	} else {
2332		found = match;
2333		found->cookie = rocker->flow_tbl_next_cookie++;
2334		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
2335	}
2336
2337	hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
2338
2339	spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2340
2341	return rocker_cmd_exec(rocker, rocker_port,
2342			       rocker_cmd_flow_tbl_add,
2343			       found, NULL, NULL, nowait);
2344}
2345
2346static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
2347			       struct rocker_flow_tbl_entry *match,
2348			       bool nowait)
2349{
2350	struct rocker *rocker = rocker_port->rocker;
2351	struct rocker_flow_tbl_entry *found;
2352	size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2353	unsigned long flags;
2354	int err = 0;
2355
2356	match->key_crc32 = crc32(~0, &match->key, key_len);
2357
2358	spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2359
2360	found = rocker_flow_tbl_find(rocker, match);
2361
2362	if (found) {
2363		hash_del(&found->entry);
2364		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
2365	}
2366
2367	spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2368
2369	kfree(match);
2370
2371	if (found) {
2372		err = rocker_cmd_exec(rocker, rocker_port,
2373				      rocker_cmd_flow_tbl_del,
2374				      found, NULL, NULL, nowait);
2375		kfree(found);
2376	}
2377
2378	return err;
2379}
2380
2381static gfp_t rocker_op_flags_gfp(int flags)
2382{
2383	return flags & ROCKER_OP_FLAG_NOWAIT ? GFP_ATOMIC : GFP_KERNEL;
2384}
2385
2386static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
2387			      int flags, struct rocker_flow_tbl_entry *entry)
2388{
2389	bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
2390
2391	if (flags & ROCKER_OP_FLAG_REMOVE)
2392		return rocker_flow_tbl_del(rocker_port, entry, nowait);
2393	else
2394		return rocker_flow_tbl_add(rocker_port, entry, nowait);
2395}
2396
2397static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
2398				   int flags, u32 in_pport, u32 in_pport_mask,
2399				   enum rocker_of_dpa_table_id goto_tbl)
2400{
2401	struct rocker_flow_tbl_entry *entry;
2402
2403	entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2404	if (!entry)
2405		return -ENOMEM;
2406
2407	entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2408	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
2409	entry->key.ig_port.in_pport = in_pport;
2410	entry->key.ig_port.in_pport_mask = in_pport_mask;
2411	entry->key.ig_port.goto_tbl = goto_tbl;
2412
2413	return rocker_flow_tbl_do(rocker_port, flags, entry);
2414}
2415
2416static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
2417				int flags, u32 in_pport,
2418				__be16 vlan_id, __be16 vlan_id_mask,
2419				enum rocker_of_dpa_table_id goto_tbl,
2420				bool untagged, __be16 new_vlan_id)
2421{
2422	struct rocker_flow_tbl_entry *entry;
2423
2424	entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2425	if (!entry)
2426		return -ENOMEM;
2427
2428	entry->key.priority = ROCKER_PRIORITY_VLAN;
2429	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
2430	entry->key.vlan.in_pport = in_pport;
2431	entry->key.vlan.vlan_id = vlan_id;
2432	entry->key.vlan.vlan_id_mask = vlan_id_mask;
2433	entry->key.vlan.goto_tbl = goto_tbl;
2434
2435	entry->key.vlan.untagged = untagged;
2436	entry->key.vlan.new_vlan_id = new_vlan_id;
2437
2438	return rocker_flow_tbl_do(rocker_port, flags, entry);
2439}
2440
2441static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
2442				    u32 in_pport, u32 in_pport_mask,
2443				    __be16 eth_type, const u8 *eth_dst,
2444				    const u8 *eth_dst_mask, __be16 vlan_id,
2445				    __be16 vlan_id_mask, bool copy_to_cpu,
2446				    int flags)
2447{
2448	struct rocker_flow_tbl_entry *entry;
2449
2450	entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2451	if (!entry)
2452		return -ENOMEM;
2453
2454	if (is_multicast_ether_addr(eth_dst)) {
2455		entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2456		entry->key.term_mac.goto_tbl =
2457			 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2458	} else {
2459		entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2460		entry->key.term_mac.goto_tbl =
2461			 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2462	}
2463
2464	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
2465	entry->key.term_mac.in_pport = in_pport;
2466	entry->key.term_mac.in_pport_mask = in_pport_mask;
2467	entry->key.term_mac.eth_type = eth_type;
2468	ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2469	ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2470	entry->key.term_mac.vlan_id = vlan_id;
2471	entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2472	entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2473
2474	return rocker_flow_tbl_do(rocker_port, flags, entry);
2475}
2476
2477static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
2478				  int flags,
2479				  const u8 *eth_dst, const u8 *eth_dst_mask,
2480				  __be16 vlan_id, u32 tunnel_id,
2481				  enum rocker_of_dpa_table_id goto_tbl,
2482				  u32 group_id, bool copy_to_cpu)
2483{
2484	struct rocker_flow_tbl_entry *entry;
2485	u32 priority;
2486	bool vlan_bridging = !!vlan_id;
2487	bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2488	bool wild = false;
2489
2490	entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2491	if (!entry)
2492		return -ENOMEM;
2493
2494	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2495
2496	if (eth_dst) {
2497		entry->key.bridge.has_eth_dst = 1;
2498		ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2499	}
2500	if (eth_dst_mask) {
2501		entry->key.bridge.has_eth_dst_mask = 1;
2502		ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
2503		if (memcmp(eth_dst_mask, ff_mac, ETH_ALEN))
2504			wild = true;
2505	}
2506
2507	priority = ROCKER_PRIORITY_UNKNOWN;
2508	if (vlan_bridging && dflt && wild)
2509		priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
2510	else if (vlan_bridging && dflt && !wild)
2511		priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
2512	else if (vlan_bridging && !dflt)
2513		priority = ROCKER_PRIORITY_BRIDGING_VLAN;
2514	else if (!vlan_bridging && dflt && wild)
2515		priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
2516	else if (!vlan_bridging && dflt && !wild)
2517		priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
2518	else if (!vlan_bridging && !dflt)
2519		priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2520
2521	entry->key.priority = priority;
2522	entry->key.bridge.vlan_id = vlan_id;
2523	entry->key.bridge.tunnel_id = tunnel_id;
2524	entry->key.bridge.goto_tbl = goto_tbl;
2525	entry->key.bridge.group_id = group_id;
2526	entry->key.bridge.copy_to_cpu = copy_to_cpu;
2527
2528	return rocker_flow_tbl_do(rocker_port, flags, entry);
2529}
2530
2531static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
2532					  __be16 eth_type, __be32 dst,
2533					  __be32 dst_mask, u32 priority,
2534					  enum rocker_of_dpa_table_id goto_tbl,
2535					  u32 group_id, int flags)
2536{
2537	struct rocker_flow_tbl_entry *entry;
2538
2539	entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2540	if (!entry)
2541		return -ENOMEM;
2542
2543	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2544	entry->key.priority = priority;
2545	entry->key.ucast_routing.eth_type = eth_type;
2546	entry->key.ucast_routing.dst4 = dst;
2547	entry->key.ucast_routing.dst4_mask = dst_mask;
2548	entry->key.ucast_routing.goto_tbl = goto_tbl;
2549	entry->key.ucast_routing.group_id = group_id;
2550	entry->key_len = offsetof(struct rocker_flow_tbl_key,
2551				  ucast_routing.group_id);
2552
2553	return rocker_flow_tbl_do(rocker_port, flags, entry);
2554}
2555
2556static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
2557			       int flags, u32 in_pport,
2558			       u32 in_pport_mask,
2559			       const u8 *eth_src, const u8 *eth_src_mask,
2560			       const u8 *eth_dst, const u8 *eth_dst_mask,
2561			       __be16 eth_type,
2562			       __be16 vlan_id, __be16 vlan_id_mask,
2563			       u8 ip_proto, u8 ip_proto_mask,
2564			       u8 ip_tos, u8 ip_tos_mask,
2565			       u32 group_id)
2566{
2567	u32 priority;
2568	struct rocker_flow_tbl_entry *entry;
2569
2570	entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2571	if (!entry)
2572		return -ENOMEM;
2573
2574	priority = ROCKER_PRIORITY_ACL_NORMAL;
2575	if (eth_dst && eth_dst_mask) {
2576		if (memcmp(eth_dst_mask, mcast_mac, ETH_ALEN) == 0)
2577			priority = ROCKER_PRIORITY_ACL_DFLT;
2578		else if (is_link_local_ether_addr(eth_dst))
2579			priority = ROCKER_PRIORITY_ACL_CTRL;
2580	}
2581
2582	entry->key.priority = priority;
2583	entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2584	entry->key.acl.in_pport = in_pport;
2585	entry->key.acl.in_pport_mask = in_pport_mask;
2586
2587	if (eth_src)
2588		ether_addr_copy(entry->key.acl.eth_src, eth_src);
2589	if (eth_src_mask)
2590		ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2591	if (eth_dst)
2592		ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2593	if (eth_dst_mask)
2594		ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2595
2596	entry->key.acl.eth_type = eth_type;
2597	entry->key.acl.vlan_id = vlan_id;
2598	entry->key.acl.vlan_id_mask = vlan_id_mask;
2599	entry->key.acl.ip_proto = ip_proto;
2600	entry->key.acl.ip_proto_mask = ip_proto_mask;
2601	entry->key.acl.ip_tos = ip_tos;
2602	entry->key.acl.ip_tos_mask = ip_tos_mask;
2603	entry->key.acl.group_id = group_id;
2604
2605	return rocker_flow_tbl_do(rocker_port, flags, entry);
2606}
2607
2608static struct rocker_group_tbl_entry *
2609rocker_group_tbl_find(struct rocker *rocker,
2610		      struct rocker_group_tbl_entry *match)
2611{
2612	struct rocker_group_tbl_entry *found;
2613
2614	hash_for_each_possible(rocker->group_tbl, found,
2615			       entry, match->group_id) {
2616		if (found->group_id == match->group_id)
2617			return found;
2618	}
2619
2620	return NULL;
2621}
2622
2623static void rocker_group_tbl_entry_free(struct rocker_group_tbl_entry *entry)
2624{
2625	switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2626	case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2627	case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2628		kfree(entry->group_ids);
2629		break;
2630	default:
2631		break;
2632	}
2633	kfree(entry);
2634}
2635
2636static int rocker_group_tbl_add(struct rocker_port *rocker_port,
2637				struct rocker_group_tbl_entry *match,
2638				bool nowait)
2639{
2640	struct rocker *rocker = rocker_port->rocker;
2641	struct rocker_group_tbl_entry *found;
2642	unsigned long flags;
2643
2644	spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2645
2646	found = rocker_group_tbl_find(rocker, match);
2647
2648	if (found) {
2649		hash_del(&found->entry);
2650		rocker_group_tbl_entry_free(found);
2651		found = match;
2652		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2653	} else {
2654		found = match;
2655		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2656	}
2657
2658	hash_add(rocker->group_tbl, &found->entry, found->group_id);
2659
2660	spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2661
2662	return rocker_cmd_exec(rocker, rocker_port,
2663			       rocker_cmd_group_tbl_add,
2664			       found, NULL, NULL, nowait);
2665}
2666
2667static int rocker_group_tbl_del(struct rocker_port *rocker_port,
2668				struct rocker_group_tbl_entry *match,
2669				bool nowait)
2670{
2671	struct rocker *rocker = rocker_port->rocker;
2672	struct rocker_group_tbl_entry *found;
2673	unsigned long flags;
2674	int err = 0;
2675
2676	spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2677
2678	found = rocker_group_tbl_find(rocker, match);
2679
2680	if (found) {
2681		hash_del(&found->entry);
2682		found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2683	}
2684
2685	spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2686
2687	rocker_group_tbl_entry_free(match);
2688
2689	if (found) {
2690		err = rocker_cmd_exec(rocker, rocker_port,
2691				      rocker_cmd_group_tbl_del,
2692				      found, NULL, NULL, nowait);
2693		rocker_group_tbl_entry_free(found);
2694	}
2695
2696	return err;
2697}
2698
2699static int rocker_group_tbl_do(struct rocker_port *rocker_port,
2700			       int flags, struct rocker_group_tbl_entry *entry)
2701{
2702	bool nowait = flags & ROCKER_OP_FLAG_NOWAIT;
2703
2704	if (flags & ROCKER_OP_FLAG_REMOVE)
2705		return rocker_group_tbl_del(rocker_port, entry, nowait);
2706	else
2707		return rocker_group_tbl_add(rocker_port, entry, nowait);
2708}
2709
2710static int rocker_group_l2_interface(struct rocker_port *rocker_port,
2711				     int flags, __be16 vlan_id,
2712				     u32 out_pport, int pop_vlan)
2713{
2714	struct rocker_group_tbl_entry *entry;
2715
2716	entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2717	if (!entry)
2718		return -ENOMEM;
2719
2720	entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
2721	entry->l2_interface.pop_vlan = pop_vlan;
2722
2723	return rocker_group_tbl_do(rocker_port, flags, entry);
2724}
2725
2726static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
2727				   int flags, u8 group_count,
2728				   u32 *group_ids, u32 group_id)
2729{
2730	struct rocker_group_tbl_entry *entry;
2731
2732	entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2733	if (!entry)
2734		return -ENOMEM;
2735
2736	entry->group_id = group_id;
2737	entry->group_count = group_count;
2738
2739	entry->group_ids = kcalloc(group_count, sizeof(u32),
2740				   rocker_op_flags_gfp(flags));
2741	if (!entry->group_ids) {
2742		kfree(entry);
2743		return -ENOMEM;
2744	}
2745	memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2746
2747	return rocker_group_tbl_do(rocker_port, flags, entry);
2748}
2749
2750static int rocker_group_l2_flood(struct rocker_port *rocker_port,
2751				 int flags, __be16 vlan_id,
2752				 u8 group_count, u32 *group_ids,
2753				 u32 group_id)
2754{
2755	return rocker_group_l2_fan_out(rocker_port, flags,
2756				       group_count, group_ids,
2757				       group_id);
2758}
2759
2760static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
2761				   int flags, u32 index, u8 *src_mac,
2762				   u8 *dst_mac, __be16 vlan_id,
2763				   bool ttl_check, u32 pport)
2764{
2765	struct rocker_group_tbl_entry *entry;
2766
2767	entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2768	if (!entry)
2769		return -ENOMEM;
2770
2771	entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
2772	if (src_mac)
2773		ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
2774	if (dst_mac)
2775		ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
2776	entry->l3_unicast.vlan_id = vlan_id;
2777	entry->l3_unicast.ttl_check = ttl_check;
2778	entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
2779
2780	return rocker_group_tbl_do(rocker_port, flags, entry);
2781}
2782
2783static struct rocker_neigh_tbl_entry *
2784	rocker_neigh_tbl_find(struct rocker *rocker, __be32 ip_addr)
2785{
2786	struct rocker_neigh_tbl_entry *found;
2787
2788	hash_for_each_possible(rocker->neigh_tbl, found,
2789			       entry, be32_to_cpu(ip_addr))
2790		if (found->ip_addr == ip_addr)
2791			return found;
2792
2793	return NULL;
2794}
2795
2796static void _rocker_neigh_add(struct rocker *rocker,
2797			      struct rocker_neigh_tbl_entry *entry)
2798{
2799	entry->index = rocker->neigh_tbl_next_index++;
2800	entry->ref_count++;
2801	hash_add(rocker->neigh_tbl, &entry->entry,
2802		 be32_to_cpu(entry->ip_addr));
2803}
2804
2805static void _rocker_neigh_del(struct rocker *rocker,
2806			      struct rocker_neigh_tbl_entry *entry)
2807{
2808	if (--entry->ref_count == 0) {
2809		hash_del(&entry->entry);
2810		kfree(entry);
2811	}
2812}
2813
2814static void _rocker_neigh_update(struct rocker *rocker,
2815				 struct rocker_neigh_tbl_entry *entry,
2816				 u8 *eth_dst, bool ttl_check)
2817{
2818	if (eth_dst) {
2819		ether_addr_copy(entry->eth_dst, eth_dst);
2820		entry->ttl_check = ttl_check;
2821	} else {
2822		entry->ref_count++;
2823	}
2824}
2825
2826static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
2827				  int flags, __be32 ip_addr, u8 *eth_dst)
2828{
2829	struct rocker *rocker = rocker_port->rocker;
2830	struct rocker_neigh_tbl_entry *entry;
2831	struct rocker_neigh_tbl_entry *found;
2832	unsigned long lock_flags;
2833	__be16 eth_type = htons(ETH_P_IP);
2834	enum rocker_of_dpa_table_id goto_tbl =
2835		ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2836	u32 group_id;
2837	u32 priority = 0;
2838	bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2839	bool updating;
2840	bool removing;
2841	int err = 0;
2842
2843	entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2844	if (!entry)
2845		return -ENOMEM;
2846
2847	spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
2848
2849	found = rocker_neigh_tbl_find(rocker, ip_addr);
2850
2851	updating = found && adding;
2852	removing = found && !adding;
2853	adding = !found && adding;
2854
2855	if (adding) {
2856		entry->ip_addr = ip_addr;
2857		entry->dev = rocker_port->dev;
2858		ether_addr_copy(entry->eth_dst, eth_dst);
2859		entry->ttl_check = true;
2860		_rocker_neigh_add(rocker, entry);
2861	} else if (removing) {
2862		memcpy(entry, found, sizeof(*entry));
2863		_rocker_neigh_del(rocker, found);
2864	} else if (updating) {
2865		_rocker_neigh_update(rocker, found, eth_dst, true);
2866		memcpy(entry, found, sizeof(*entry));
2867	} else {
2868		err = -ENOENT;
2869	}
2870
2871	spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
2872
2873	if (err)
2874		goto err_out;
2875
2876	/* For each active neighbor, we have an L3 unicast group and
2877	 * a /32 route to the neighbor, which uses the L3 unicast
2878	 * group.  The L3 unicast group can also be referred to by
2879	 * other routes' nexthops.
2880	 */
2881
2882	err = rocker_group_l3_unicast(rocker_port, flags,
2883				      entry->index,
2884				      rocker_port->dev->dev_addr,
2885				      entry->eth_dst,
2886				      rocker_port->internal_vlan_id,
2887				      entry->ttl_check,
2888				      rocker_port->pport);
2889	if (err) {
2890		netdev_err(rocker_port->dev,
2891			   "Error (%d) L3 unicast group index %d\n",
2892			   err, entry->index);
2893		goto err_out;
2894	}
2895
2896	if (adding || removing) {
2897		group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
2898		err = rocker_flow_tbl_ucast4_routing(rocker_port,
2899						     eth_type, ip_addr,
2900						     inet_make_mask(32),
2901						     priority, goto_tbl,
2902						     group_id, flags);
2903
2904		if (err)
2905			netdev_err(rocker_port->dev,
2906				   "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
2907				   err, &entry->ip_addr, group_id);
2908	}
2909
2910err_out:
2911	if (!adding)
2912		kfree(entry);
2913
2914	return err;
2915}
2916
2917static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
2918				    __be32 ip_addr)
2919{
2920	struct net_device *dev = rocker_port->dev;
2921	struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
2922	int err = 0;
2923
2924	if (!n) {
2925		n = neigh_create(&arp_tbl, &ip_addr, dev);
2926		if (IS_ERR(n))
2927			return IS_ERR(n);
2928	}
2929
2930	/* If the neigh is already resolved, then go ahead and
2931	 * install the entry, otherwise start the ARP process to
2932	 * resolve the neigh.
2933	 */
2934
2935	if (n->nud_state & NUD_VALID)
2936		err = rocker_port_ipv4_neigh(rocker_port, 0, ip_addr, n->ha);
2937	else
2938		neigh_event_send(n, NULL);
2939
2940	neigh_release(n);
2941	return err;
2942}
2943
2944static int rocker_port_ipv4_nh(struct rocker_port *rocker_port, int flags,
2945			       __be32 ip_addr, u32 *index)
2946{
2947	struct rocker *rocker = rocker_port->rocker;
2948	struct rocker_neigh_tbl_entry *entry;
2949	struct rocker_neigh_tbl_entry *found;
2950	unsigned long lock_flags;
2951	bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2952	bool updating;
2953	bool removing;
2954	bool resolved = true;
2955	int err = 0;
2956
2957	entry = kzalloc(sizeof(*entry), rocker_op_flags_gfp(flags));
2958	if (!entry)
2959		return -ENOMEM;
2960
2961	spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
2962
2963	found = rocker_neigh_tbl_find(rocker, ip_addr);
2964	if (found)
2965		*index = found->index;
2966
2967	updating = found && adding;
2968	removing = found && !adding;
2969	adding = !found && adding;
2970
2971	if (adding) {
2972		entry->ip_addr = ip_addr;
2973		entry->dev = rocker_port->dev;
2974		_rocker_neigh_add(rocker, entry);
2975		*index = entry->index;
2976		resolved = false;
2977	} else if (removing) {
2978		_rocker_neigh_del(rocker, found);
2979	} else if (updating) {
2980		_rocker_neigh_update(rocker, found, NULL, false);
2981		resolved = !is_zero_ether_addr(found->eth_dst);
2982	} else {
2983		err = -ENOENT;
2984	}
2985
2986	spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
2987
2988	if (!adding)
2989		kfree(entry);
2990
2991	if (err)
2992		return err;
2993
2994	/* Resolved means neigh ip_addr is resolved to neigh mac. */
2995
2996	if (!resolved)
2997		err = rocker_port_ipv4_resolve(rocker_port, ip_addr);
2998
2999	return err;
3000}
3001
3002static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
3003					int flags, __be16 vlan_id)
3004{
3005	struct rocker_port *p;
3006	struct rocker *rocker = rocker_port->rocker;
3007	u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3008	u32 *group_ids;
3009	u8 group_count = 0;
3010	int err = 0;
3011	int i;
3012
3013	group_ids = kcalloc(rocker->port_count, sizeof(u32),
3014			    rocker_op_flags_gfp(flags));
3015	if (!group_ids)
3016		return -ENOMEM;
3017
3018	/* Adjust the flood group for this VLAN.  The flood group
3019	 * references an L2 interface group for each port in this
3020	 * VLAN.
3021	 */
3022
3023	for (i = 0; i < rocker->port_count; i++) {
3024		p = rocker->ports[i];
3025		if (!rocker_port_is_bridged(p))
3026			continue;
3027		if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
3028			group_ids[group_count++] =
3029				ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
3030		}
3031	}
3032
3033	/* If there are no bridged ports in this VLAN, we're done */
3034	if (group_count == 0)
3035		goto no_ports_in_vlan;
3036
3037	err = rocker_group_l2_flood(rocker_port, flags, vlan_id,
3038				    group_count, group_ids,
3039				    group_id);
3040	if (err)
3041		netdev_err(rocker_port->dev,
3042			   "Error (%d) port VLAN l2 flood group\n", err);
3043
3044no_ports_in_vlan:
3045	kfree(group_ids);
3046	return err;
3047}
3048
3049static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
3050				      int flags, __be16 vlan_id,
3051				      bool pop_vlan)
3052{
3053	struct rocker *rocker = rocker_port->rocker;
3054	struct rocker_port *p;
3055	bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3056	u32 out_pport;
3057	int ref = 0;
3058	int err;
3059	int i;
3060
3061	/* An L2 interface group for this port in this VLAN, but
3062	 * only when port STP state is LEARNING|FORWARDING.
3063	 */
3064
3065	if (rocker_port->stp_state == BR_STATE_LEARNING ||
3066	    rocker_port->stp_state == BR_STATE_FORWARDING) {
3067		out_pport = rocker_port->pport;
3068		err = rocker_group_l2_interface(rocker_port, flags,
3069						vlan_id, out_pport,
3070						pop_vlan);
3071		if (err) {
3072			netdev_err(rocker_port->dev,
3073				   "Error (%d) port VLAN l2 group for pport %d\n",
3074				   err, out_pport);
3075			return err;
3076		}
3077	}
3078
3079	/* An L2 interface group for this VLAN to CPU port.
3080	 * Add when first port joins this VLAN and destroy when
3081	 * last port leaves this VLAN.
3082	 */
3083
3084	for (i = 0; i < rocker->port_count; i++) {
3085		p = rocker->ports[i];
3086		if (test_bit(ntohs(vlan_id), p->vlan_bitmap))
3087			ref++;
3088	}
3089
3090	if ((!adding || ref != 1) && (adding || ref != 0))
3091		return 0;
3092
3093	out_pport = 0;
3094	err = rocker_group_l2_interface(rocker_port, flags,
3095					vlan_id, out_pport,
3096					pop_vlan);
3097	if (err) {
3098		netdev_err(rocker_port->dev,
3099			   "Error (%d) port VLAN l2 group for CPU port\n", err);
3100		return err;
3101	}
3102
3103	return 0;
3104}
3105
3106static struct rocker_ctrl {
3107	const u8 *eth_dst;
3108	const u8 *eth_dst_mask;
3109	__be16 eth_type;
3110	bool acl;
3111	bool bridge;
3112	bool term;
3113	bool copy_to_cpu;
3114} rocker_ctrls[] = {
3115	[ROCKER_CTRL_LINK_LOCAL_MCAST] = {
3116		/* pass link local multicast pkts up to CPU for filtering */
3117		.eth_dst = ll_mac,
3118		.eth_dst_mask = ll_mask,
3119		.acl = true,
3120	},
3121	[ROCKER_CTRL_LOCAL_ARP] = {
3122		/* pass local ARP pkts up to CPU */
3123		.eth_dst = zero_mac,
3124		.eth_dst_mask = zero_mac,
3125		.eth_type = htons(ETH_P_ARP),
3126		.acl = true,
3127	},
3128	[ROCKER_CTRL_IPV4_MCAST] = {
3129		/* pass IPv4 mcast pkts up to CPU, RFC 1112 */
3130		.eth_dst = ipv4_mcast,
3131		.eth_dst_mask = ipv4_mask,
3132		.eth_type = htons(ETH_P_IP),
3133		.term  = true,
3134		.copy_to_cpu = true,
3135	},
3136	[ROCKER_CTRL_IPV6_MCAST] = {
3137		/* pass IPv6 mcast pkts up to CPU, RFC 2464 */
3138		.eth_dst = ipv6_mcast,
3139		.eth_dst_mask = ipv6_mask,
3140		.eth_type = htons(ETH_P_IPV6),
3141		.term  = true,
3142		.copy_to_cpu = true,
3143	},
3144	[ROCKER_CTRL_DFLT_BRIDGING] = {
3145		/* flood any pkts on vlan */
3146		.bridge = true,
3147		.copy_to_cpu = true,
3148	},
3149};
3150
3151static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
3152				     int flags, struct rocker_ctrl *ctrl,
3153				     __be16 vlan_id)
3154{
3155	u32 in_pport = rocker_port->pport;
3156	u32 in_pport_mask = 0xffffffff;
3157	u32 out_pport = 0;
3158	u8 *eth_src = NULL;
3159	u8 *eth_src_mask = NULL;
3160	__be16 vlan_id_mask = htons(0xffff);
3161	u8 ip_proto = 0;
3162	u8 ip_proto_mask = 0;
3163	u8 ip_tos = 0;
3164	u8 ip_tos_mask = 0;
3165	u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3166	int err;
3167
3168	err = rocker_flow_tbl_acl(rocker_port, flags,
3169				  in_pport, in_pport_mask,
3170				  eth_src, eth_src_mask,
3171				  ctrl->eth_dst, ctrl->eth_dst_mask,
3172				  ctrl->eth_type,
3173				  vlan_id, vlan_id_mask,
3174				  ip_proto, ip_proto_mask,
3175				  ip_tos, ip_tos_mask,
3176				  group_id);
3177
3178	if (err)
3179		netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
3180
3181	return err;
3182}
3183
3184static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
3185					int flags, struct rocker_ctrl *ctrl,
3186					__be16 vlan_id)
3187{
3188	enum rocker_of_dpa_table_id goto_tbl =
3189		ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3190	u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3191	u32 tunnel_id = 0;
3192	int err;
3193
3194	if (!rocker_port_is_bridged(rocker_port))
3195		return 0;
3196
3197	err = rocker_flow_tbl_bridge(rocker_port, flags,
3198				     ctrl->eth_dst, ctrl->eth_dst_mask,
3199				     vlan_id, tunnel_id,
3200				     goto_tbl, group_id, ctrl->copy_to_cpu);
3201
3202	if (err)
3203		netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
3204
3205	return err;
3206}
3207
3208static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
3209				      int flags, struct rocker_ctrl *ctrl,
3210				      __be16 vlan_id)
3211{
3212	u32 in_pport_mask = 0xffffffff;
3213	__be16 vlan_id_mask = htons(0xffff);
3214	int err;
3215
3216	if (ntohs(vlan_id) == 0)
3217		vlan_id = rocker_port->internal_vlan_id;
3218
3219	err = rocker_flow_tbl_term_mac(rocker_port,
3220				       rocker_port->pport, in_pport_mask,
3221				       ctrl->eth_type, ctrl->eth_dst,
3222				       ctrl->eth_dst_mask, vlan_id,
3223				       vlan_id_mask, ctrl->copy_to_cpu,
3224				       flags);
3225
3226	if (err)
3227		netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
3228
3229	return err;
3230}
3231
3232static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port, int flags,
3233				 struct rocker_ctrl *ctrl, __be16 vlan_id)
3234{
3235	if (ctrl->acl)
3236		return rocker_port_ctrl_vlan_acl(rocker_port, flags,
3237						 ctrl, vlan_id);
3238	if (ctrl->bridge)
3239		return rocker_port_ctrl_vlan_bridge(rocker_port, flags,
3240						    ctrl, vlan_id);
3241
3242	if (ctrl->term)
3243		return rocker_port_ctrl_vlan_term(rocker_port, flags,
3244						  ctrl, vlan_id);
3245
3246	return -EOPNOTSUPP;
3247}
3248
3249static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
3250				     int flags, __be16 vlan_id)
3251{
3252	int err = 0;
3253	int i;
3254
3255	for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3256		if (rocker_port->ctrls[i]) {
3257			err = rocker_port_ctrl_vlan(rocker_port, flags,
3258						    &rocker_ctrls[i], vlan_id);
3259			if (err)
3260				return err;
3261		}
3262	}
3263
3264	return err;
3265}
3266
3267static int rocker_port_ctrl(struct rocker_port *rocker_port, int flags,
3268			    struct rocker_ctrl *ctrl)
3269{
3270	u16 vid;
3271	int err = 0;
3272
3273	for (vid = 1; vid < VLAN_N_VID; vid++) {
3274		if (!test_bit(vid, rocker_port->vlan_bitmap))
3275			continue;
3276		err = rocker_port_ctrl_vlan(rocker_port, flags,
3277					    ctrl, htons(vid));
3278		if (err)
3279			break;
3280	}
3281
3282	return err;
3283}
3284
3285static int rocker_port_vlan(struct rocker_port *rocker_port, int flags,
3286			    u16 vid)
3287{
3288	enum rocker_of_dpa_table_id goto_tbl =
3289		ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
3290	u32 in_pport = rocker_port->pport;
3291	__be16 vlan_id = htons(vid);
3292	__be16 vlan_id_mask = htons(0xffff);
3293	__be16 internal_vlan_id;
3294	bool untagged;
3295	bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3296	int err;
3297
3298	internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
3299
3300	if (adding && test_and_set_bit(ntohs(internal_vlan_id),
3301				       rocker_port->vlan_bitmap))
3302			return 0; /* already added */
3303	else if (!adding && !test_and_clear_bit(ntohs(internal_vlan_id),
3304						rocker_port->vlan_bitmap))
3305			return 0; /* already removed */
3306
3307	if (adding) {
3308		err = rocker_port_ctrl_vlan_add(rocker_port, flags,
3309						internal_vlan_id);
3310		if (err) {
3311			netdev_err(rocker_port->dev,
3312				   "Error (%d) port ctrl vlan add\n", err);
3313			return err;
3314		}
3315	}
3316
3317	err = rocker_port_vlan_l2_groups(rocker_port, flags,
3318					 internal_vlan_id, untagged);
3319	if (err) {
3320		netdev_err(rocker_port->dev,
3321			   "Error (%d) port VLAN l2 groups\n", err);
3322		return err;
3323	}
3324
3325	err = rocker_port_vlan_flood_group(rocker_port, flags,
3326					   internal_vlan_id);
3327	if (err) {
3328		netdev_err(rocker_port->dev,
3329			   "Error (%d) port VLAN l2 flood group\n", err);
3330		return err;
3331	}
3332
3333	err = rocker_flow_tbl_vlan(rocker_port, flags,
3334				   in_pport, vlan_id, vlan_id_mask,
3335				   goto_tbl, untagged, internal_vlan_id);
3336	if (err)
3337		netdev_err(rocker_port->dev,
3338			   "Error (%d) port VLAN table\n", err);
3339
3340	return err;
3341}
3342
3343static int rocker_port_ig_tbl(struct rocker_port *rocker_port, int flags)
3344{
3345	enum rocker_of_dpa_table_id goto_tbl;
3346	u32 in_pport;
3347	u32 in_pport_mask;
3348	int err;
3349
3350	/* Normal Ethernet Frames.  Matches pkts from any local physical
3351	 * ports.  Goto VLAN tbl.
3352	 */
3353
3354	in_pport = 0;
3355	in_pport_mask = 0xffff0000;
3356	goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
3357
3358	err = rocker_flow_tbl_ig_port(rocker_port, flags,
3359				      in_pport, in_pport_mask,
3360				      goto_tbl);
3361	if (err)
3362		netdev_err(rocker_port->dev,
3363			   "Error (%d) ingress port table entry\n", err);
3364
3365	return err;
3366}
3367
3368struct rocker_fdb_learn_work {
3369	struct work_struct work;
3370	struct net_device *dev;
3371	int flags;
3372	u8 addr[ETH_ALEN];
3373	u16 vid;
3374};
3375
3376static void rocker_port_fdb_learn_work(struct work_struct *work)
3377{
3378	struct rocker_fdb_learn_work *lw =
3379		container_of(work, struct rocker_fdb_learn_work, work);
3380	bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3381	bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
3382	struct netdev_switch_notifier_fdb_info info;
3383
3384	info.addr = lw->addr;
3385	info.vid = lw->vid;
3386
3387	rtnl_lock();
3388	if (learned && removing)
3389		call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_DEL,
3390					     lw->dev, &info.info);
3391	else if (learned && !removing)
3392		call_netdev_switch_notifiers(NETDEV_SWITCH_FDB_ADD,
3393					     lw->dev, &info.info);
3394	rtnl_unlock();
3395
3396	kfree(work);
3397}
3398
3399static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
3400				 int flags, const u8 *addr, __be16 vlan_id)
3401{
3402	struct rocker_fdb_learn_work *lw;
3403	enum rocker_of_dpa_table_id goto_tbl =
3404		ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3405	u32 out_pport = rocker_port->pport;
3406	u32 tunnel_id = 0;
3407	u32 group_id = ROCKER_GROUP_NONE;
3408	bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
3409	bool copy_to_cpu = false;
3410	int err;
3411
3412	if (rocker_port_is_bridged(rocker_port))
3413		group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3414
3415	if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
3416		err = rocker_flow_tbl_bridge(rocker_port, flags, addr, NULL,
3417					     vlan_id, tunnel_id, goto_tbl,
3418					     group_id, copy_to_cpu);
3419		if (err)
3420			return err;
3421	}
3422
3423	if (!syncing)
3424		return 0;
3425
3426	if (!rocker_port_is_bridged(rocker_port))
3427		return 0;
3428
3429	lw = kmalloc(sizeof(*lw), rocker_op_flags_gfp(flags));
3430	if (!lw)
3431		return -ENOMEM;
3432
3433	INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3434
3435	lw->dev = rocker_port->dev;
3436	lw->flags = flags;
3437	ether_addr_copy(lw->addr, addr);
3438	lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3439
3440	schedule_work(&lw->work);
3441
3442	return 0;
3443}
3444
3445static struct rocker_fdb_tbl_entry *
3446rocker_fdb_tbl_find(struct rocker *rocker, struct rocker_fdb_tbl_entry *match)
3447{
3448	struct rocker_fdb_tbl_entry *found;
3449
3450	hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3451		if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3452			return found;
3453
3454	return NULL;
3455}
3456
3457static int rocker_port_fdb(struct rocker_port *rocker_port,
3458			   const unsigned char *addr,
3459			   __be16 vlan_id, int flags)
3460{
3461	struct rocker *rocker = rocker_port->rocker;
3462	struct rocker_fdb_tbl_entry *fdb;
3463	struct rocker_fdb_tbl_entry *found;
3464	bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3465	unsigned long lock_flags;
3466
3467	fdb = kzalloc(sizeof(*fdb), rocker_op_flags_gfp(flags));
3468	if (!fdb)
3469		return -ENOMEM;
3470
3471	fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
3472	fdb->key.pport = rocker_port->pport;
3473	ether_addr_copy(fdb->key.addr, addr);
3474	fdb->key.vlan_id = vlan_id;
3475	fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3476
3477	spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3478
3479	found = rocker_fdb_tbl_find(rocker, fdb);
3480
3481	if (removing && found) {
3482		kfree(fdb);
3483		hash_del(&found->entry);
3484	} else if (!removing && !found) {
3485		hash_add(rocker->fdb_tbl, &fdb->entry, fdb->key_crc32);
3486	}
3487
3488	spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3489
3490	/* Check if adding and already exists, or removing and can't find */
3491	if (!found != !removing) {
3492		kfree(fdb);
3493		if (!found && removing)
3494			return 0;
3495		/* Refreshing existing to update aging timers */
3496		flags |= ROCKER_OP_FLAG_REFRESH;
3497	}
3498
3499	return rocker_port_fdb_learn(rocker_port, flags, addr, vlan_id);
3500}
3501
3502static int rocker_port_fdb_flush(struct rocker_port *rocker_port)
3503{
3504	struct rocker *rocker = rocker_port->rocker;
3505	struct rocker_fdb_tbl_entry *found;
3506	unsigned long lock_flags;
3507	int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
3508	struct hlist_node *tmp;
3509	int bkt;
3510	int err = 0;
3511
3512	if (rocker_port->stp_state == BR_STATE_LEARNING ||
3513	    rocker_port->stp_state == BR_STATE_FORWARDING)
3514		return 0;
3515
3516	spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3517
3518	hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
3519		if (found->key.pport != rocker_port->pport)
3520			continue;
3521		if (!found->learned)
3522			continue;
3523		err = rocker_port_fdb_learn(rocker_port, flags,
3524					    found->key.addr,
3525					    found->key.vlan_id);
3526		if (err)
3527			goto err_out;
3528		hash_del(&found->entry);
3529	}
3530
3531err_out:
3532	spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3533
3534	return err;
3535}
3536
3537static int rocker_port_router_mac(struct rocker_port *rocker_port,
3538				  int flags, __be16 vlan_id)
3539{
3540	u32 in_pport_mask = 0xffffffff;
3541	__be16 eth_type;
3542	const u8 *dst_mac_mask = ff_mac;
3543	__be16 vlan_id_mask = htons(0xffff);
3544	bool copy_to_cpu = false;
3545	int err;
3546
3547	if (ntohs(vlan_id) == 0)
3548		vlan_id = rocker_port->internal_vlan_id;
3549
3550	eth_type = htons(ETH_P_IP);
3551	err = rocker_flow_tbl_term_mac(rocker_port,
3552				       rocker_port->pport, in_pport_mask,
3553				       eth_type, rocker_port->dev->dev_addr,
3554				       dst_mac_mask, vlan_id, vlan_id_mask,
3555				       copy_to_cpu, flags);
3556	if (err)
3557		return err;
3558
3559	eth_type = htons(ETH_P_IPV6);
3560	err = rocker_flow_tbl_term_mac(rocker_port,
3561				       rocker_port->pport, in_pport_mask,
3562				       eth_type, rocker_port->dev->dev_addr,
3563				       dst_mac_mask, vlan_id, vlan_id_mask,
3564				       copy_to_cpu, flags);
3565
3566	return err;
3567}
3568
3569static int rocker_port_fwding(struct rocker_port *rocker_port)
3570{
3571	bool pop_vlan;
3572	u32 out_pport;
3573	__be16 vlan_id;
3574	u16 vid;
3575	int flags = ROCKER_OP_FLAG_NOWAIT;
3576	int err;
3577
3578	/* Port will be forwarding-enabled if its STP state is LEARNING
3579	 * or FORWARDING.  Traffic from CPU can still egress, regardless of
3580	 * port STP state.  Use L2 interface group on port VLANs as a way
3581	 * to toggle port forwarding: if forwarding is disabled, L2
3582	 * interface group will not exist.
3583	 */
3584
3585	if (rocker_port->stp_state != BR_STATE_LEARNING &&
3586	    rocker_port->stp_state != BR_STATE_FORWARDING)
3587		flags |= ROCKER_OP_FLAG_REMOVE;
3588
3589	out_pport = rocker_port->pport;
3590	for (vid = 1; vid < VLAN_N_VID; vid++) {
3591		if (!test_bit(vid, rocker_port->vlan_bitmap))
3592			continue;
3593		vlan_id = htons(vid);
3594		pop_vlan = rocker_vlan_id_is_internal(vlan_id);
3595		err = rocker_group_l2_interface(rocker_port, flags,
3596						vlan_id, out_pport,
3597						pop_vlan);
3598		if (err) {
3599			netdev_err(rocker_port->dev,
3600				   "Error (%d) port VLAN l2 group for pport %d\n",
3601				   err, out_pport);
3602			return err;
3603		}
3604	}
3605
3606	return 0;
3607}
3608
3609static int rocker_port_stp_update(struct rocker_port *rocker_port, u8 state)
3610{
3611	bool want[ROCKER_CTRL_MAX] = { 0, };
3612	int flags;
3613	int err;
3614	int i;
3615
3616	if (rocker_port->stp_state == state)
3617		return 0;
3618
3619	rocker_port->stp_state = state;
3620
3621	switch (state) {
3622	case BR_STATE_DISABLED:
3623		/* port is completely disabled */
3624		break;
3625	case BR_STATE_LISTENING:
3626	case BR_STATE_BLOCKING:
3627		want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3628		break;
3629	case BR_STATE_LEARNING:
3630	case BR_STATE_FORWARDING:
3631		want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3632		want[ROCKER_CTRL_IPV4_MCAST] = true;
3633		want[ROCKER_CTRL_IPV6_MCAST] = true;
3634		if (rocker_port_is_bridged(rocker_port))
3635			want[ROCKER_CTRL_DFLT_BRIDGING] = true;
3636		else
3637			want[ROCKER_CTRL_LOCAL_ARP] = true;
3638		break;
3639	}
3640
3641	for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3642		if (want[i] != rocker_port->ctrls[i]) {
3643			flags = ROCKER_OP_FLAG_NOWAIT |
3644				(want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
3645			err = rocker_port_ctrl(rocker_port, flags,
3646					       &rocker_ctrls[i]);
3647			if (err)
3648				return err;
3649			rocker_port->ctrls[i] = want[i];
3650		}
3651	}
3652
3653	err = rocker_port_fdb_flush(rocker_port);
3654	if (err)
3655		return err;
3656
3657	return rocker_port_fwding(rocker_port);
3658}
3659
3660static int rocker_port_fwd_enable(struct rocker_port *rocker_port)
3661{
3662	if (rocker_port_is_bridged(rocker_port))
3663		/* bridge STP will enable port */
3664		return 0;
3665
3666	/* port is not bridged, so simulate going to FORWARDING state */
3667	return rocker_port_stp_update(rocker_port, BR_STATE_FORWARDING);
3668}
3669
3670static int rocker_port_fwd_disable(struct rocker_port *rocker_port)
3671{
3672	if (rocker_port_is_bridged(rocker_port))
3673		/* bridge STP will disable port */
3674		return 0;
3675
3676	/* port is not bridged, so simulate going to DISABLED state */
3677	return rocker_port_stp_update(rocker_port, BR_STATE_DISABLED);
3678}
3679
3680static struct rocker_internal_vlan_tbl_entry *
3681rocker_internal_vlan_tbl_find(struct rocker *rocker, int ifindex)
3682{
3683	struct rocker_internal_vlan_tbl_entry *found;
3684
3685	hash_for_each_possible(rocker->internal_vlan_tbl, found,
3686			       entry, ifindex) {
3687		if (found->ifindex == ifindex)
3688			return found;
3689	}
3690
3691	return NULL;
3692}
3693
3694static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
3695					       int ifindex)
3696{
3697	struct rocker *rocker = rocker_port->rocker;
3698	struct rocker_internal_vlan_tbl_entry *entry;
3699	struct rocker_internal_vlan_tbl_entry *found;
3700	unsigned long lock_flags;
3701	int i;
3702
3703	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3704	if (!entry)
3705		return 0;
3706
3707	entry->ifindex = ifindex;
3708
3709	spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3710
3711	found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3712	if (found) {
3713		kfree(entry);
3714		goto found;
3715	}
3716
3717	found = entry;
3718	hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
3719
3720	for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
3721		if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
3722			continue;
3723		found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
3724		goto found;
3725	}
3726
3727	netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
3728
3729found:
3730	found->ref_count++;
3731	spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3732
3733	return found->vlan_id;
3734}
3735
3736static void rocker_port_internal_vlan_id_put(struct rocker_port *rocker_port,
3737					     int ifindex)
3738{
3739	struct rocker *rocker = rocker_port->rocker;
3740	struct rocker_internal_vlan_tbl_entry *found;
3741	unsigned long lock_flags;
3742	unsigned long bit;
3743
3744	spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3745
3746	found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3747	if (!found) {
3748		netdev_err(rocker_port->dev,
3749			   "ifindex (%d) not found in internal VLAN tbl\n",
3750			   ifindex);
3751		goto not_found;
3752	}
3753
3754	if (--found->ref_count <= 0) {
3755		bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
3756		clear_bit(bit, rocker->internal_vlan_bitmap);
3757		hash_del(&found->entry);
3758		kfree(found);
3759	}
3760
3761not_found:
3762	spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3763}
3764
3765static int rocker_port_fib_ipv4(struct rocker_port *rocker_port, __be32 dst,
3766				int dst_len, struct fib_info *fi, u32 tb_id,
3767				int flags)
3768{
3769	struct fib_nh *nh;
3770	__be16 eth_type = htons(ETH_P_IP);
3771	__be32 dst_mask = inet_make_mask(dst_len);
3772	__be16 internal_vlan_id = rocker_port->internal_vlan_id;
3773	u32 priority = fi->fib_priority;
3774	enum rocker_of_dpa_table_id goto_tbl =
3775		ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3776	u32 group_id;
3777	bool nh_on_port;
3778	bool has_gw;
3779	u32 index;
3780	int err;
3781
3782	/* XXX support ECMP */
3783
3784	nh = fi->fib_nh;
3785	nh_on_port = (fi->fib_dev == rocker_port->dev);
3786	has_gw = !!nh->nh_gw;
3787
3788	if (has_gw && nh_on_port) {
3789		err = rocker_port_ipv4_nh(rocker_port, flags,
3790					  nh->nh_gw, &index);
3791		if (err)
3792			return err;
3793
3794		group_id = ROCKER_GROUP_L3_UNICAST(index);
3795	} else {
3796		/* Send to CPU for processing */
3797		group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
3798	}
3799
3800	err = rocker_flow_tbl_ucast4_routing(rocker_port, eth_type, dst,
3801					     dst_mask, priority, goto_tbl,
3802					     group_id, flags);
3803	if (err)
3804		netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
3805			   err, &dst);
3806
3807	return err;
3808}
3809
3810/*****************
3811 * Net device ops
3812 *****************/
3813
3814static int rocker_port_open(struct net_device *dev)
3815{
3816	struct rocker_port *rocker_port = netdev_priv(dev);
3817	int err;
3818
3819	err = rocker_port_dma_rings_init(rocker_port);
3820	if (err)
3821		return err;
3822
3823	err = request_irq(rocker_msix_tx_vector(rocker_port),
3824			  rocker_tx_irq_handler, 0,
3825			  rocker_driver_name, rocker_port);
3826	if (err) {
3827		netdev_err(rocker_port->dev, "cannot assign tx irq\n");
3828		goto err_request_tx_irq;
3829	}
3830
3831	err = request_irq(rocker_msix_rx_vector(rocker_port),
3832			  rocker_rx_irq_handler, 0,
3833			  rocker_driver_name, rocker_port);
3834	if (err) {
3835		netdev_err(rocker_port->dev, "cannot assign rx irq\n");
3836		goto err_request_rx_irq;
3837	}
3838
3839	err = rocker_port_fwd_enable(rocker_port);
3840	if (err)
3841		goto err_fwd_enable;
3842
3843	napi_enable(&rocker_port->napi_tx);
3844	napi_enable(&rocker_port->napi_rx);
3845	rocker_port_set_enable(rocker_port, true);
3846	netif_start_queue(dev);
3847	return 0;
3848
3849err_fwd_enable:
3850	free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
3851err_request_rx_irq:
3852	free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
3853err_request_tx_irq:
3854	rocker_port_dma_rings_fini(rocker_port);
3855	return err;
3856}
3857
3858static int rocker_port_stop(struct net_device *dev)
3859{
3860	struct rocker_port *rocker_port = netdev_priv(dev);
3861
3862	netif_stop_queue(dev);
3863	rocker_port_set_enable(rocker_port, false);
3864	napi_disable(&rocker_port->napi_rx);
3865	napi_disable(&rocker_port->napi_tx);
3866	rocker_port_fwd_disable(rocker_port);
3867	free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
3868	free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
3869	rocker_port_dma_rings_fini(rocker_port);
3870
3871	return 0;
3872}
3873
3874static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port,
3875				       struct rocker_desc_info *desc_info)
3876{
3877	struct rocker *rocker = rocker_port->rocker;
3878	struct pci_dev *pdev = rocker->pdev;
3879	struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
3880	struct rocker_tlv *attr;
3881	int rem;
3882
3883	rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
3884	if (!attrs[ROCKER_TLV_TX_FRAGS])
3885		return;
3886	rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
3887		struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
3888		dma_addr_t dma_handle;
3889		size_t len;
3890
3891		if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
3892			continue;
3893		rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
3894					attr);
3895		if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
3896		    !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
3897			continue;
3898		dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
3899		len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
3900		pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
3901	}
3902}
3903
3904static int rocker_tx_desc_frag_map_put(struct rocker_port *rocker_port,
3905				       struct rocker_desc_info *desc_info,
3906				       char *buf, size_t buf_len)
3907{
3908	struct rocker *rocker = rocker_port->rocker;
3909	struct pci_dev *pdev = rocker->pdev;
3910	dma_addr_t dma_handle;
3911	struct rocker_tlv *frag;
3912
3913	dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
3914	if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
3915		if (net_ratelimit())
3916			netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
3917		return -EIO;
3918	}
3919	frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
3920	if (!frag)
3921		goto unmap_frag;
3922	if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
3923			       dma_handle))
3924		goto nest_cancel;
3925	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
3926			       buf_len))
3927		goto nest_cancel;
3928	rocker_tlv_nest_end(desc_info, frag);
3929	return 0;
3930
3931nest_cancel:
3932	rocker_tlv_nest_cancel(desc_info, frag);
3933unmap_frag:
3934	pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
3935	return -EMSGSIZE;
3936}
3937
3938static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
3939{
3940	struct rocker_port *rocker_port = netdev_priv(dev);
3941	struct rocker *rocker = rocker_port->rocker;
3942	struct rocker_desc_info *desc_info;
3943	struct rocker_tlv *frags;
3944	int i;
3945	int err;
3946
3947	desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
3948	if (unlikely(!desc_info)) {
3949		if (net_ratelimit())
3950			netdev_err(dev, "tx ring full when queue awake\n");
3951		return NETDEV_TX_BUSY;
3952	}
3953
3954	rocker_desc_cookie_ptr_set(desc_info, skb);
3955
3956	frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
3957	if (!frags)
3958		goto out;
3959	err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
3960					  skb->data, skb_headlen(skb));
3961	if (err)
3962		goto nest_cancel;
3963	if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX)
3964		goto nest_cancel;
3965
3966	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3967		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3968
3969		err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
3970						  skb_frag_address(frag),
3971						  skb_frag_size(frag));
3972		if (err)
3973			goto unmap_frags;
3974	}
3975	rocker_tlv_nest_end(desc_info, frags);
3976
3977	rocker_desc_gen_clear(desc_info);
3978	rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
3979
3980	desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
3981	if (!desc_info)
3982		netif_stop_queue(dev);
3983
3984	return NETDEV_TX_OK;
3985
3986unmap_frags:
3987	rocker_tx_desc_frags_unmap(rocker_port, desc_info);
3988nest_cancel:
3989	rocker_tlv_nest_cancel(desc_info, frags);
3990out:
3991	dev_kfree_skb(skb);
3992	dev->stats.tx_dropped++;
3993
3994	return NETDEV_TX_OK;
3995}
3996
3997static int rocker_port_set_mac_address(struct net_device *dev, void *p)
3998{
3999	struct sockaddr *addr = p;
4000	struct rocker_port *rocker_port = netdev_priv(dev);
4001	int err;
4002
4003	if (!is_valid_ether_addr(addr->sa_data))
4004		return -EADDRNOTAVAIL;
4005
4006	err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
4007	if (err)
4008		return err;
4009	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4010	return 0;
4011}
4012
4013static int rocker_port_vlan_rx_add_vid(struct net_device *dev,
4014				       __be16 proto, u16 vid)
4015{
4016	struct rocker_port *rocker_port = netdev_priv(dev);
4017	int err;
4018
4019	err = rocker_port_vlan(rocker_port, 0, vid);
4020	if (err)
4021		return err;
4022
4023	return rocker_port_router_mac(rocker_port, 0, htons(vid));
4024}
4025
4026static int rocker_port_vlan_rx_kill_vid(struct net_device *dev,
4027					__be16 proto, u16 vid)
4028{
4029	struct rocker_port *rocker_port = netdev_priv(dev);
4030	int err;
4031
4032	err = rocker_port_router_mac(rocker_port, ROCKER_OP_FLAG_REMOVE,
4033				     htons(vid));
4034	if (err)
4035		return err;
4036
4037	return rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, vid);
4038}
4039
4040static int rocker_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
4041			       struct net_device *dev,
4042			       const unsigned char *addr, u16 vid,
4043			       u16 nlm_flags)
4044{
4045	struct rocker_port *rocker_port = netdev_priv(dev);
4046	__be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL);
4047	int flags = 0;
4048
4049	if (!rocker_port_is_bridged(rocker_port))
4050		return -EINVAL;
4051
4052	return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
4053}
4054
4055static int rocker_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
4056			       struct net_device *dev,
4057			       const unsigned char *addr, u16 vid)
4058{
4059	struct rocker_port *rocker_port = netdev_priv(dev);
4060	__be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, NULL);
4061	int flags = ROCKER_OP_FLAG_REMOVE;
4062
4063	if (!rocker_port_is_bridged(rocker_port))
4064		return -EINVAL;
4065
4066	return rocker_port_fdb(rocker_port, addr, vlan_id, flags);
4067}
4068
4069static int rocker_fdb_fill_info(struct sk_buff *skb,
4070				struct rocker_port *rocker_port,
4071				const unsigned char *addr, u16 vid,
4072				u32 portid, u32 seq, int type,
4073				unsigned int flags)
4074{
4075	struct nlmsghdr *nlh;
4076	struct ndmsg *ndm;
4077
4078	nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
4079	if (!nlh)
4080		return -EMSGSIZE;
4081
4082	ndm = nlmsg_data(nlh);
4083	ndm->ndm_family	 = AF_BRIDGE;
4084	ndm->ndm_pad1    = 0;
4085	ndm->ndm_pad2    = 0;
4086	ndm->ndm_flags	 = NTF_SELF;
4087	ndm->ndm_type	 = 0;
4088	ndm->ndm_ifindex = rocker_port->dev->ifindex;
4089	ndm->ndm_state   = NUD_REACHABLE;
4090
4091	if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
4092		goto nla_put_failure;
4093
4094	if (vid && nla_put_u16(skb, NDA_VLAN, vid))
4095		goto nla_put_failure;
4096
4097	nlmsg_end(skb, nlh);
4098	return 0;
4099
4100nla_put_failure:
4101	nlmsg_cancel(skb, nlh);
4102	return -EMSGSIZE;
4103}
4104
4105static int rocker_port_fdb_dump(struct sk_buff *skb,
4106				struct netlink_callback *cb,
4107				struct net_device *dev,
4108				struct net_device *filter_dev,
4109				int idx)
4110{
4111	struct rocker_port *rocker_port = netdev_priv(dev);
4112	struct rocker *rocker = rocker_port->rocker;
4113	struct rocker_fdb_tbl_entry *found;
4114	struct hlist_node *tmp;
4115	int bkt;
4116	unsigned long lock_flags;
4117	const unsigned char *addr;
4118	u16 vid;
4119	int err;
4120
4121	spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
4122	hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
4123		if (found->key.pport != rocker_port->pport)
4124			continue;
4125		if (idx < cb->args[0])
4126			goto skip;
4127		addr = found->key.addr;
4128		vid = rocker_port_vlan_to_vid(rocker_port, found->key.vlan_id);
4129		err = rocker_fdb_fill_info(skb, rocker_port, addr, vid,
4130					   NETLINK_CB(cb->skb).portid,
4131					   cb->nlh->nlmsg_seq,
4132					   RTM_NEWNEIGH, NLM_F_MULTI);
4133		if (err < 0)
4134			break;
4135skip:
4136		++idx;
4137	}
4138	spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
4139	return idx;
4140}
4141
4142static int rocker_port_bridge_setlink(struct net_device *dev,
4143				      struct nlmsghdr *nlh, u16 flags)
4144{
4145	struct rocker_port *rocker_port = netdev_priv(dev);
4146	struct nlattr *protinfo;
4147	struct nlattr *attr;
4148	int err;
4149
4150	protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
4151				   IFLA_PROTINFO);
4152	if (protinfo) {
4153		attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING);
4154		if (attr) {
4155			if (nla_len(attr) < sizeof(u8))
4156				return -EINVAL;
4157
4158			if (nla_get_u8(attr))
4159				rocker_port->brport_flags |= BR_LEARNING;
4160			else
4161				rocker_port->brport_flags &= ~BR_LEARNING;
4162			err = rocker_port_set_learning(rocker_port);
4163			if (err)
4164				return err;
4165		}
4166		attr = nla_find_nested(protinfo, IFLA_BRPORT_LEARNING_SYNC);
4167		if (attr) {
4168			if (nla_len(attr) < sizeof(u8))
4169				return -EINVAL;
4170
4171			if (nla_get_u8(attr))
4172				rocker_port->brport_flags |= BR_LEARNING_SYNC;
4173			else
4174				rocker_port->brport_flags &= ~BR_LEARNING_SYNC;
4175		}
4176	}
4177
4178	return 0;
4179}
4180
4181static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4182				      struct net_device *dev,
4183				      u32 filter_mask, int nlflags)
4184{
4185	struct rocker_port *rocker_port = netdev_priv(dev);
4186	u16 mode = BRIDGE_MODE_UNDEF;
4187	u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
4188
4189	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
4190				       rocker_port->brport_flags, mask,
4191				       nlflags);
4192}
4193
4194static int rocker_port_get_phys_port_name(struct net_device *dev,
4195					  char *buf, size_t len)
4196{
4197	struct rocker_port *rocker_port = netdev_priv(dev);
4198	struct port_name name = { .buf = buf, .len = len };
4199	int err;
4200
4201	err = rocker_cmd_exec(rocker_port->rocker, rocker_port,
4202			      rocker_cmd_get_port_settings_prep, NULL,
4203			      rocker_cmd_get_port_settings_phys_name_proc,
4204			      &name, false);
4205
4206	return err ? -EOPNOTSUPP : 0;
4207}
4208
4209static const struct net_device_ops rocker_port_netdev_ops = {
4210	.ndo_open			= rocker_port_open,
4211	.ndo_stop			= rocker_port_stop,
4212	.ndo_start_xmit			= rocker_port_xmit,
4213	.ndo_set_mac_address		= rocker_port_set_mac_address,
4214	.ndo_vlan_rx_add_vid		= rocker_port_vlan_rx_add_vid,
4215	.ndo_vlan_rx_kill_vid		= rocker_port_vlan_rx_kill_vid,
4216	.ndo_fdb_add			= rocker_port_fdb_add,
4217	.ndo_fdb_del			= rocker_port_fdb_del,
4218	.ndo_fdb_dump			= rocker_port_fdb_dump,
4219	.ndo_bridge_setlink		= rocker_port_bridge_setlink,
4220	.ndo_bridge_getlink		= rocker_port_bridge_getlink,
4221	.ndo_get_phys_port_name		= rocker_port_get_phys_port_name,
4222};
4223
4224/********************
4225 * swdev interface
4226 ********************/
4227
4228static int rocker_port_swdev_parent_id_get(struct net_device *dev,
4229					   struct netdev_phys_item_id *psid)
4230{
4231	struct rocker_port *rocker_port = netdev_priv(dev);
4232	struct rocker *rocker = rocker_port->rocker;
4233
4234	psid->id_len = sizeof(rocker->hw.id);
4235	memcpy(&psid->id, &rocker->hw.id, psid->id_len);
4236	return 0;
4237}
4238
4239static int rocker_port_swdev_port_stp_update(struct net_device *dev, u8 state)
4240{
4241	struct rocker_port *rocker_port = netdev_priv(dev);
4242
4243	return rocker_port_stp_update(rocker_port, state);
4244}
4245
4246static int rocker_port_swdev_fib_ipv4_add(struct net_device *dev,
4247					  __be32 dst, int dst_len,
4248					  struct fib_info *fi,
4249					  u8 tos, u8 type,
4250					  u32 nlflags, u32 tb_id)
4251{
4252	struct rocker_port *rocker_port = netdev_priv(dev);
4253	int flags = 0;
4254
4255	return rocker_port_fib_ipv4(rocker_port, dst, dst_len,
4256				    fi, tb_id, flags);
4257}
4258
4259static int rocker_port_swdev_fib_ipv4_del(struct net_device *dev,
4260					  __be32 dst, int dst_len,
4261					  struct fib_info *fi,
4262					  u8 tos, u8 type, u32 tb_id)
4263{
4264	struct rocker_port *rocker_port = netdev_priv(dev);
4265	int flags = ROCKER_OP_FLAG_REMOVE;
4266
4267	return rocker_port_fib_ipv4(rocker_port, dst, dst_len,
4268				    fi, tb_id, flags);
4269}
4270
4271static const struct swdev_ops rocker_port_swdev_ops = {
4272	.swdev_parent_id_get		= rocker_port_swdev_parent_id_get,
4273	.swdev_port_stp_update		= rocker_port_swdev_port_stp_update,
4274	.swdev_fib_ipv4_add		= rocker_port_swdev_fib_ipv4_add,
4275	.swdev_fib_ipv4_del		= rocker_port_swdev_fib_ipv4_del,
4276};
4277
4278/********************
4279 * ethtool interface
4280 ********************/
4281
4282static int rocker_port_get_settings(struct net_device *dev,
4283				    struct ethtool_cmd *ecmd)
4284{
4285	struct rocker_port *rocker_port = netdev_priv(dev);
4286
4287	return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
4288}
4289
4290static int rocker_port_set_settings(struct net_device *dev,
4291				    struct ethtool_cmd *ecmd)
4292{
4293	struct rocker_port *rocker_port = netdev_priv(dev);
4294
4295	return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
4296}
4297
4298static void rocker_port_get_drvinfo(struct net_device *dev,
4299				    struct ethtool_drvinfo *drvinfo)
4300{
4301	strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
4302	strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
4303}
4304
4305static struct rocker_port_stats {
4306	char str[ETH_GSTRING_LEN];
4307	int type;
4308} rocker_port_stats[] = {
4309	{ "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS,    },
4310	{ "rx_bytes",   ROCKER_TLV_CMD_PORT_STATS_RX_BYTES,   },
4311	{ "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
4312	{ "rx_errors",  ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS,  },
4313
4314	{ "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS,    },
4315	{ "tx_bytes",   ROCKER_TLV_CMD_PORT_STATS_TX_BYTES,   },
4316	{ "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
4317	{ "tx_errors",  ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS,  },
4318};
4319
4320#define ROCKER_PORT_STATS_LEN  ARRAY_SIZE(rocker_port_stats)
4321
4322static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
4323				    u8 *data)
4324{
4325	u8 *p = data;
4326	int i;
4327
4328	switch (stringset) {
4329	case ETH_SS_STATS:
4330		for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4331			memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
4332			p += ETH_GSTRING_LEN;
4333		}
4334		break;
4335	}
4336}
4337
4338static int
4339rocker_cmd_get_port_stats_prep(struct rocker *rocker,
4340			       struct rocker_port *rocker_port,
4341			       struct rocker_desc_info *desc_info,
4342			       void *priv)
4343{
4344	struct rocker_tlv *cmd_stats;
4345
4346	if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
4347			       ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
4348		return -EMSGSIZE;
4349
4350	cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
4351	if (!cmd_stats)
4352		return -EMSGSIZE;
4353
4354	if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
4355			       rocker_port->pport))
4356		return -EMSGSIZE;
4357
4358	rocker_tlv_nest_end(desc_info, cmd_stats);
4359
4360	return 0;
4361}
4362
4363static int
4364rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker,
4365				       struct rocker_port *rocker_port,
4366				       struct rocker_desc_info *desc_info,
4367				       void *priv)
4368{
4369	struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
4370	struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
4371	struct rocker_tlv *pattr;
4372	u32 pport;
4373	u64 *data = priv;
4374	int i;
4375
4376	rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
4377
4378	if (!attrs[ROCKER_TLV_CMD_INFO])
4379		return -EIO;
4380
4381	rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
4382				attrs[ROCKER_TLV_CMD_INFO]);
4383
4384	if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
4385		return -EIO;
4386
4387	pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
4388	if (pport != rocker_port->pport)
4389		return -EIO;
4390
4391	for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4392		pattr = stats_attrs[rocker_port_stats[i].type];
4393		if (!pattr)
4394			continue;
4395
4396		data[i] = rocker_tlv_get_u64(pattr);
4397	}
4398
4399	return 0;
4400}
4401
4402static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
4403					     void *priv)
4404{
4405	return rocker_cmd_exec(rocker_port->rocker, rocker_port,
4406			       rocker_cmd_get_port_stats_prep, NULL,
4407			       rocker_cmd_get_port_stats_ethtool_proc,
4408			       priv, false);
4409}
4410
4411static void rocker_port_get_stats(struct net_device *dev,
4412				  struct ethtool_stats *stats, u64 *data)
4413{
4414	struct rocker_port *rocker_port = netdev_priv(dev);
4415
4416	if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
4417		int i;
4418
4419		for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
4420			data[i] = 0;
4421	}
4422
4423	return;
4424}
4425
4426static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
4427{
4428	switch (sset) {
4429	case ETH_SS_STATS:
4430		return ROCKER_PORT_STATS_LEN;
4431	default:
4432		return -EOPNOTSUPP;
4433	}
4434}
4435
4436static const struct ethtool_ops rocker_port_ethtool_ops = {
4437	.get_settings		= rocker_port_get_settings,
4438	.set_settings		= rocker_port_set_settings,
4439	.get_drvinfo		= rocker_port_get_drvinfo,
4440	.get_link		= ethtool_op_get_link,
4441	.get_strings		= rocker_port_get_strings,
4442	.get_ethtool_stats	= rocker_port_get_stats,
4443	.get_sset_count		= rocker_port_get_sset_count,
4444};
4445
4446/*****************
4447 * NAPI interface
4448 *****************/
4449
4450static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
4451{
4452	return container_of(napi, struct rocker_port, napi_tx);
4453}
4454
4455static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
4456{
4457	struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
4458	struct rocker *rocker = rocker_port->rocker;
4459	struct rocker_desc_info *desc_info;
4460	u32 credits = 0;
4461	int err;
4462
4463	/* Cleanup tx descriptors */
4464	while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
4465		struct sk_buff *skb;
4466
4467		err = rocker_desc_err(desc_info);
4468		if (err && net_ratelimit())
4469			netdev_err(rocker_port->dev, "tx desc received with err %d\n",
4470				   err);
4471		rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4472
4473		skb = rocker_desc_cookie_ptr_get(desc_info);
4474		if (err == 0) {
4475			rocker_port->dev->stats.tx_packets++;
4476			rocker_port->dev->stats.tx_bytes += skb->len;
4477		} else
4478			rocker_port->dev->stats.tx_errors++;
4479
4480		dev_kfree_skb_any(skb);
4481		credits++;
4482	}
4483
4484	if (credits && netif_queue_stopped(rocker_port->dev))
4485		netif_wake_queue(rocker_port->dev);
4486
4487	napi_complete(napi);
4488	rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
4489
4490	return 0;
4491}
4492
4493static int rocker_port_rx_proc(struct rocker *rocker,
4494			       struct rocker_port *rocker_port,
4495			       struct rocker_desc_info *desc_info)
4496{
4497	struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
4498	struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
4499	size_t rx_len;
4500
4501	if (!skb)
4502		return -ENOENT;
4503
4504	rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
4505	if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
4506		return -EINVAL;
4507
4508	rocker_dma_rx_ring_skb_unmap(rocker, attrs);
4509
4510	rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
4511	skb_put(skb, rx_len);
4512	skb->protocol = eth_type_trans(skb, rocker_port->dev);
4513
4514	rocker_port->dev->stats.rx_packets++;
4515	rocker_port->dev->stats.rx_bytes += skb->len;
4516
4517	netif_receive_skb(skb);
4518
4519	return rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, desc_info);
4520}
4521
4522static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
4523{
4524	return container_of(napi, struct rocker_port, napi_rx);
4525}
4526
4527static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
4528{
4529	struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
4530	struct rocker *rocker = rocker_port->rocker;
4531	struct rocker_desc_info *desc_info;
4532	u32 credits = 0;
4533	int err;
4534
4535	/* Process rx descriptors */
4536	while (credits < budget &&
4537	       (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
4538		err = rocker_desc_err(desc_info);
4539		if (err) {
4540			if (net_ratelimit())
4541				netdev_err(rocker_port->dev, "rx desc received with err %d\n",
4542					   err);
4543		} else {
4544			err = rocker_port_rx_proc(rocker, rocker_port,
4545						  desc_info);
4546			if (err && net_ratelimit())
4547				netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
4548					   err);
4549		}
4550		if (err)
4551			rocker_port->dev->stats.rx_errors++;
4552
4553		rocker_desc_gen_clear(desc_info);
4554		rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
4555		credits++;
4556	}
4557
4558	if (credits < budget)
4559		napi_complete(napi);
4560
4561	rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
4562
4563	return credits;
4564}
4565
4566/*****************
4567 * PCI driver ops
4568 *****************/
4569
4570static void rocker_carrier_init(struct rocker_port *rocker_port)
4571{
4572	struct rocker *rocker = rocker_port->rocker;
4573	u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
4574	bool link_up;
4575
4576	link_up = link_status & (1 << rocker_port->pport);
4577	if (link_up)
4578		netif_carrier_on(rocker_port->dev);
4579	else
4580		netif_carrier_off(rocker_port->dev);
4581}
4582
4583static void rocker_remove_ports(struct rocker *rocker)
4584{
4585	struct rocker_port *rocker_port;
4586	int i;
4587
4588	for (i = 0; i < rocker->port_count; i++) {
4589		rocker_port = rocker->ports[i];
4590		rocker_port_ig_tbl(rocker_port, ROCKER_OP_FLAG_REMOVE);
4591		unregister_netdev(rocker_port->dev);
4592		free_netdev(rocker_port->dev);
4593	}
4594	kfree(rocker->ports);
4595}
4596
4597static void rocker_port_dev_addr_init(struct rocker *rocker,
4598				      struct rocker_port *rocker_port)
4599{
4600	struct pci_dev *pdev = rocker->pdev;
4601	int err;
4602
4603	err = rocker_cmd_get_port_settings_macaddr(rocker_port,
4604						   rocker_port->dev->dev_addr);
4605	if (err) {
4606		dev_warn(&pdev->dev, "failed to get mac address, using random\n");
4607		eth_hw_addr_random(rocker_port->dev);
4608	}
4609}
4610
4611static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
4612{
4613	struct pci_dev *pdev = rocker->pdev;
4614	struct rocker_port *rocker_port;
4615	struct net_device *dev;
4616	int err;
4617
4618	dev = alloc_etherdev(sizeof(struct rocker_port));
4619	if (!dev)
4620		return -ENOMEM;
4621	rocker_port = netdev_priv(dev);
4622	rocker_port->dev = dev;
4623	rocker_port->rocker = rocker;
4624	rocker_port->port_number = port_number;
4625	rocker_port->pport = port_number + 1;
4626	rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
4627
4628	rocker_port_dev_addr_init(rocker, rocker_port);
4629	dev->netdev_ops = &rocker_port_netdev_ops;
4630	dev->ethtool_ops = &rocker_port_ethtool_ops;
4631	dev->swdev_ops = &rocker_port_swdev_ops;
4632	netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
4633		       NAPI_POLL_WEIGHT);
4634	netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
4635		       NAPI_POLL_WEIGHT);
4636	rocker_carrier_init(rocker_port);
4637
4638	dev->features |= NETIF_F_NETNS_LOCAL |
4639			 NETIF_F_HW_VLAN_CTAG_FILTER |
4640			 NETIF_F_HW_SWITCH_OFFLOAD;
4641
4642	err = register_netdev(dev);
4643	if (err) {
4644		dev_err(&pdev->dev, "register_netdev failed\n");
4645		goto err_register_netdev;
4646	}
4647	rocker->ports[port_number] = rocker_port;
4648
4649	rocker_port_set_learning(rocker_port);
4650
4651	rocker_port->internal_vlan_id =
4652		rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
4653	err = rocker_port_ig_tbl(rocker_port, 0);
4654	if (err) {
4655		dev_err(&pdev->dev, "install ig port table failed\n");
4656		goto err_port_ig_tbl;
4657	}
4658
4659	return 0;
4660
4661err_port_ig_tbl:
4662	unregister_netdev(dev);
4663err_register_netdev:
4664	free_netdev(dev);
4665	return err;
4666}
4667
4668static int rocker_probe_ports(struct rocker *rocker)
4669{
4670	int i;
4671	size_t alloc_size;
4672	int err;
4673
4674	alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
4675	rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
4676	if (!rocker->ports)
4677		return -ENOMEM;
4678	for (i = 0; i < rocker->port_count; i++) {
4679		err = rocker_probe_port(rocker, i);
4680		if (err)
4681			goto remove_ports;
4682	}
4683	return 0;
4684
4685remove_ports:
4686	rocker_remove_ports(rocker);
4687	return err;
4688}
4689
4690static int rocker_msix_init(struct rocker *rocker)
4691{
4692	struct pci_dev *pdev = rocker->pdev;
4693	int msix_entries;
4694	int i;
4695	int err;
4696
4697	msix_entries = pci_msix_vec_count(pdev);
4698	if (msix_entries < 0)
4699		return msix_entries;
4700
4701	if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
4702		return -EINVAL;
4703
4704	rocker->msix_entries = kmalloc_array(msix_entries,
4705					     sizeof(struct msix_entry),
4706					     GFP_KERNEL);
4707	if (!rocker->msix_entries)
4708		return -ENOMEM;
4709
4710	for (i = 0; i < msix_entries; i++)
4711		rocker->msix_entries[i].entry = i;
4712
4713	err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
4714	if (err < 0)
4715		goto err_enable_msix;
4716
4717	return 0;
4718
4719err_enable_msix:
4720	kfree(rocker->msix_entries);
4721	return err;
4722}
4723
4724static void rocker_msix_fini(struct rocker *rocker)
4725{
4726	pci_disable_msix(rocker->pdev);
4727	kfree(rocker->msix_entries);
4728}
4729
4730static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
4731{
4732	struct rocker *rocker;
4733	int err;
4734
4735	rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
4736	if (!rocker)
4737		return -ENOMEM;
4738
4739	err = pci_enable_device(pdev);
4740	if (err) {
4741		dev_err(&pdev->dev, "pci_enable_device failed\n");
4742		goto err_pci_enable_device;
4743	}
4744
4745	err = pci_request_regions(pdev, rocker_driver_name);
4746	if (err) {
4747		dev_err(&pdev->dev, "pci_request_regions failed\n");
4748		goto err_pci_request_regions;
4749	}
4750
4751	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4752	if (!err) {
4753		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4754		if (err) {
4755			dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
4756			goto err_pci_set_dma_mask;
4757		}
4758	} else {
4759		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4760		if (err) {
4761			dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
4762			goto err_pci_set_dma_mask;
4763		}
4764	}
4765
4766	if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
4767		dev_err(&pdev->dev, "invalid PCI region size\n");
4768		err = -EINVAL;
4769		goto err_pci_resource_len_check;
4770	}
4771
4772	rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
4773				  pci_resource_len(pdev, 0));
4774	if (!rocker->hw_addr) {
4775		dev_err(&pdev->dev, "ioremap failed\n");
4776		err = -EIO;
4777		goto err_ioremap;
4778	}
4779	pci_set_master(pdev);
4780
4781	rocker->pdev = pdev;
4782	pci_set_drvdata(pdev, rocker);
4783
4784	rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
4785
4786	err = rocker_msix_init(rocker);
4787	if (err) {
4788		dev_err(&pdev->dev, "MSI-X init failed\n");
4789		goto err_msix_init;
4790	}
4791
4792	err = rocker_basic_hw_test(rocker);
4793	if (err) {
4794		dev_err(&pdev->dev, "basic hw test failed\n");
4795		goto err_basic_hw_test;
4796	}
4797
4798	rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
4799
4800	err = rocker_dma_rings_init(rocker);
4801	if (err)
4802		goto err_dma_rings_init;
4803
4804	err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
4805			  rocker_cmd_irq_handler, 0,
4806			  rocker_driver_name, rocker);
4807	if (err) {
4808		dev_err(&pdev->dev, "cannot assign cmd irq\n");
4809		goto err_request_cmd_irq;
4810	}
4811
4812	err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
4813			  rocker_event_irq_handler, 0,
4814			  rocker_driver_name, rocker);
4815	if (err) {
4816		dev_err(&pdev->dev, "cannot assign event irq\n");
4817		goto err_request_event_irq;
4818	}
4819
4820	rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
4821
4822	err = rocker_init_tbls(rocker);
4823	if (err) {
4824		dev_err(&pdev->dev, "cannot init rocker tables\n");
4825		goto err_init_tbls;
4826	}
4827
4828	err = rocker_probe_ports(rocker);
4829	if (err) {
4830		dev_err(&pdev->dev, "failed to probe ports\n");
4831		goto err_probe_ports;
4832	}
4833
4834	dev_info(&pdev->dev, "Rocker switch with id %016llx\n", rocker->hw.id);
4835
4836	return 0;
4837
4838err_probe_ports:
4839	rocker_free_tbls(rocker);
4840err_init_tbls:
4841	free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
4842err_request_event_irq:
4843	free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
4844err_request_cmd_irq:
4845	rocker_dma_rings_fini(rocker);
4846err_dma_rings_init:
4847err_basic_hw_test:
4848	rocker_msix_fini(rocker);
4849err_msix_init:
4850	iounmap(rocker->hw_addr);
4851err_ioremap:
4852err_pci_resource_len_check:
4853err_pci_set_dma_mask:
4854	pci_release_regions(pdev);
4855err_pci_request_regions:
4856	pci_disable_device(pdev);
4857err_pci_enable_device:
4858	kfree(rocker);
4859	return err;
4860}
4861
4862static void rocker_remove(struct pci_dev *pdev)
4863{
4864	struct rocker *rocker = pci_get_drvdata(pdev);
4865
4866	rocker_free_tbls(rocker);
4867	rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
4868	rocker_remove_ports(rocker);
4869	free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
4870	free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
4871	rocker_dma_rings_fini(rocker);
4872	rocker_msix_fini(rocker);
4873	iounmap(rocker->hw_addr);
4874	pci_release_regions(rocker->pdev);
4875	pci_disable_device(rocker->pdev);
4876	kfree(rocker);
4877}
4878
4879static struct pci_driver rocker_pci_driver = {
4880	.name		= rocker_driver_name,
4881	.id_table	= rocker_pci_id_table,
4882	.probe		= rocker_probe,
4883	.remove		= rocker_remove,
4884};
4885
4886/************************************
4887 * Net device notifier event handler
4888 ************************************/
4889
4890static bool rocker_port_dev_check(struct net_device *dev)
4891{
4892	return dev->netdev_ops == &rocker_port_netdev_ops;
4893}
4894
4895static int rocker_port_bridge_join(struct rocker_port *rocker_port,
4896				   struct net_device *bridge)
4897{
4898	int err;
4899
4900	rocker_port_internal_vlan_id_put(rocker_port,
4901					 rocker_port->dev->ifindex);
4902
4903	rocker_port->bridge_dev = bridge;
4904
4905	/* Use bridge internal VLAN ID for untagged pkts */
4906	err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0);
4907	if (err)
4908		return err;
4909	rocker_port->internal_vlan_id =
4910		rocker_port_internal_vlan_id_get(rocker_port,
4911						 bridge->ifindex);
4912	return rocker_port_vlan(rocker_port, 0, 0);
4913}
4914
4915static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
4916{
4917	int err;
4918
4919	rocker_port_internal_vlan_id_put(rocker_port,
4920					 rocker_port->bridge_dev->ifindex);
4921
4922	rocker_port->bridge_dev = NULL;
4923
4924	/* Use port internal VLAN ID for untagged pkts */
4925	err = rocker_port_vlan(rocker_port, ROCKER_OP_FLAG_REMOVE, 0);
4926	if (err)
4927		return err;
4928	rocker_port->internal_vlan_id =
4929		rocker_port_internal_vlan_id_get(rocker_port,
4930						 rocker_port->dev->ifindex);
4931	err = rocker_port_vlan(rocker_port, 0, 0);
4932	if (err)
4933		return err;
4934
4935	if (rocker_port->dev->flags & IFF_UP)
4936		err = rocker_port_fwd_enable(rocker_port);
4937
4938	return err;
4939}
4940
4941static int rocker_port_master_changed(struct net_device *dev)
4942{
4943	struct rocker_port *rocker_port = netdev_priv(dev);
4944	struct net_device *master = netdev_master_upper_dev_get(dev);
4945	int err = 0;
4946
4947	/* There are currently three cases handled here:
4948	 * 1. Joining a bridge
4949	 * 2. Leaving a previously joined bridge
4950	 * 3. Other, e.g. being added to or removed from a bond or openvswitch,
4951	 *    in which case nothing is done
4952	 */
4953	if (master && master->rtnl_link_ops &&
4954	    !strcmp(master->rtnl_link_ops->kind, "bridge"))
4955		err = rocker_port_bridge_join(rocker_port, master);
4956	else if (rocker_port_is_bridged(rocker_port))
4957		err = rocker_port_bridge_leave(rocker_port);
4958
4959	return err;
4960}
4961
4962static int rocker_netdevice_event(struct notifier_block *unused,
4963				  unsigned long event, void *ptr)
4964{
4965	struct net_device *dev;
4966	int err;
4967
4968	switch (event) {
4969	case NETDEV_CHANGEUPPER:
4970		dev = netdev_notifier_info_to_dev(ptr);
4971		if (!rocker_port_dev_check(dev))
4972			return NOTIFY_DONE;
4973		err = rocker_port_master_changed(dev);
4974		if (err)
4975			netdev_warn(dev,
4976				    "failed to reflect master change (err %d)\n",
4977				    err);
4978		break;
4979	}
4980
4981	return NOTIFY_DONE;
4982}
4983
4984static struct notifier_block rocker_netdevice_nb __read_mostly = {
4985	.notifier_call = rocker_netdevice_event,
4986};
4987
4988/************************************
4989 * Net event notifier event handler
4990 ************************************/
4991
4992static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
4993{
4994	struct rocker_port *rocker_port = netdev_priv(dev);
4995	int flags = (n->nud_state & NUD_VALID) ? 0 : ROCKER_OP_FLAG_REMOVE;
4996	__be32 ip_addr = *(__be32 *)n->primary_key;
4997
4998	return rocker_port_ipv4_neigh(rocker_port, flags, ip_addr, n->ha);
4999}
5000
5001static int rocker_netevent_event(struct notifier_block *unused,
5002				 unsigned long event, void *ptr)
5003{
5004	struct net_device *dev;
5005	struct neighbour *n = ptr;
5006	int err;
5007
5008	switch (event) {
5009	case NETEVENT_NEIGH_UPDATE:
5010		if (n->tbl != &arp_tbl)
5011			return NOTIFY_DONE;
5012		dev = n->dev;
5013		if (!rocker_port_dev_check(dev))
5014			return NOTIFY_DONE;
5015		err = rocker_neigh_update(dev, n);
5016		if (err)
5017			netdev_warn(dev,
5018				    "failed to handle neigh update (err %d)\n",
5019				    err);
5020		break;
5021	}
5022
5023	return NOTIFY_DONE;
5024}
5025
5026static struct notifier_block rocker_netevent_nb __read_mostly = {
5027	.notifier_call = rocker_netevent_event,
5028};
5029
5030/***********************
5031 * Module init and exit
5032 ***********************/
5033
5034static int __init rocker_module_init(void)
5035{
5036	int err;
5037
5038	register_netdevice_notifier(&rocker_netdevice_nb);
5039	register_netevent_notifier(&rocker_netevent_nb);
5040	err = pci_register_driver(&rocker_pci_driver);
5041	if (err)
5042		goto err_pci_register_driver;
5043	return 0;
5044
5045err_pci_register_driver:
5046	unregister_netdevice_notifier(&rocker_netevent_nb);
5047	unregister_netdevice_notifier(&rocker_netdevice_nb);
5048	return err;
5049}
5050
5051static void __exit rocker_module_exit(void)
5052{
5053	unregister_netevent_notifier(&rocker_netevent_nb);
5054	unregister_netdevice_notifier(&rocker_netdevice_nb);
5055	pci_unregister_driver(&rocker_pci_driver);
5056}
5057
5058module_init(rocker_module_init);
5059module_exit(rocker_module_exit);
5060
5061MODULE_LICENSE("GPL v2");
5062MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
5063MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
5064MODULE_DESCRIPTION("Rocker switch device driver");
5065MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);
5066