1 /*
2 * drivers/net/ethernet/rocker/rocker.c - Rocker switch device driver
3 * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
4 * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/interrupt.h>
16 #include <linux/sched.h>
17 #include <linux/wait.h>
18 #include <linux/spinlock.h>
19 #include <linux/hashtable.h>
20 #include <linux/crc32.h>
21 #include <linux/sort.h>
22 #include <linux/random.h>
23 #include <linux/netdevice.h>
24 #include <linux/inetdevice.h>
25 #include <linux/skbuff.h>
26 #include <linux/socket.h>
27 #include <linux/etherdevice.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_ether.h>
30 #include <linux/if_vlan.h>
31 #include <linux/if_bridge.h>
32 #include <linux/bitops.h>
33 #include <linux/ctype.h>
34 #include <net/switchdev.h>
35 #include <net/rtnetlink.h>
36 #include <net/ip_fib.h>
37 #include <net/netevent.h>
38 #include <net/arp.h>
39 #include <linux/io-64-nonatomic-lo-hi.h>
40 #include <generated/utsrelease.h>
41
42 #include "rocker.h"
43
44 static const char rocker_driver_name[] = "rocker";
45
46 static const struct pci_device_id rocker_pci_id_table[] = {
47 {PCI_VDEVICE(REDHAT, PCI_DEVICE_ID_REDHAT_ROCKER), 0},
48 {0, }
49 };
50
51 struct rocker_flow_tbl_key {
52 u32 priority;
53 enum rocker_of_dpa_table_id tbl_id;
54 union {
55 struct {
56 u32 in_pport;
57 u32 in_pport_mask;
58 enum rocker_of_dpa_table_id goto_tbl;
59 } ig_port;
60 struct {
61 u32 in_pport;
62 __be16 vlan_id;
63 __be16 vlan_id_mask;
64 enum rocker_of_dpa_table_id goto_tbl;
65 bool untagged;
66 __be16 new_vlan_id;
67 } vlan;
68 struct {
69 u32 in_pport;
70 u32 in_pport_mask;
71 __be16 eth_type;
72 u8 eth_dst[ETH_ALEN];
73 u8 eth_dst_mask[ETH_ALEN];
74 __be16 vlan_id;
75 __be16 vlan_id_mask;
76 enum rocker_of_dpa_table_id goto_tbl;
77 bool copy_to_cpu;
78 } term_mac;
79 struct {
80 __be16 eth_type;
81 __be32 dst4;
82 __be32 dst4_mask;
83 enum rocker_of_dpa_table_id goto_tbl;
84 u32 group_id;
85 } ucast_routing;
86 struct {
87 u8 eth_dst[ETH_ALEN];
88 u8 eth_dst_mask[ETH_ALEN];
89 int has_eth_dst;
90 int has_eth_dst_mask;
91 __be16 vlan_id;
92 u32 tunnel_id;
93 enum rocker_of_dpa_table_id goto_tbl;
94 u32 group_id;
95 bool copy_to_cpu;
96 } bridge;
97 struct {
98 u32 in_pport;
99 u32 in_pport_mask;
100 u8 eth_src[ETH_ALEN];
101 u8 eth_src_mask[ETH_ALEN];
102 u8 eth_dst[ETH_ALEN];
103 u8 eth_dst_mask[ETH_ALEN];
104 __be16 eth_type;
105 __be16 vlan_id;
106 __be16 vlan_id_mask;
107 u8 ip_proto;
108 u8 ip_proto_mask;
109 u8 ip_tos;
110 u8 ip_tos_mask;
111 u32 group_id;
112 } acl;
113 };
114 };
115
116 struct rocker_flow_tbl_entry {
117 struct hlist_node entry;
118 u32 cmd;
119 u64 cookie;
120 struct rocker_flow_tbl_key key;
121 size_t key_len;
122 u32 key_crc32; /* key */
123 };
124
125 struct rocker_group_tbl_entry {
126 struct hlist_node entry;
127 u32 cmd;
128 u32 group_id; /* key */
129 u16 group_count;
130 u32 *group_ids;
131 union {
132 struct {
133 u8 pop_vlan;
134 } l2_interface;
135 struct {
136 u8 eth_src[ETH_ALEN];
137 u8 eth_dst[ETH_ALEN];
138 __be16 vlan_id;
139 u32 group_id;
140 } l2_rewrite;
141 struct {
142 u8 eth_src[ETH_ALEN];
143 u8 eth_dst[ETH_ALEN];
144 __be16 vlan_id;
145 bool ttl_check;
146 u32 group_id;
147 } l3_unicast;
148 };
149 };
150
151 struct rocker_fdb_tbl_entry {
152 struct hlist_node entry;
153 u32 key_crc32; /* key */
154 bool learned;
155 unsigned long touched;
156 struct rocker_fdb_tbl_key {
157 struct rocker_port *rocker_port;
158 u8 addr[ETH_ALEN];
159 __be16 vlan_id;
160 } key;
161 };
162
163 struct rocker_internal_vlan_tbl_entry {
164 struct hlist_node entry;
165 int ifindex; /* key */
166 u32 ref_count;
167 __be16 vlan_id;
168 };
169
170 struct rocker_neigh_tbl_entry {
171 struct hlist_node entry;
172 __be32 ip_addr; /* key */
173 struct net_device *dev;
174 u32 ref_count;
175 u32 index;
176 u8 eth_dst[ETH_ALEN];
177 bool ttl_check;
178 };
179
180 struct rocker_desc_info {
181 char *data; /* mapped */
182 size_t data_size;
183 size_t tlv_size;
184 struct rocker_desc *desc;
185 dma_addr_t mapaddr;
186 };
187
188 struct rocker_dma_ring_info {
189 size_t size;
190 u32 head;
191 u32 tail;
192 struct rocker_desc *desc; /* mapped */
193 dma_addr_t mapaddr;
194 struct rocker_desc_info *desc_info;
195 unsigned int type;
196 };
197
198 struct rocker;
199
200 enum {
201 ROCKER_CTRL_LINK_LOCAL_MCAST,
202 ROCKER_CTRL_LOCAL_ARP,
203 ROCKER_CTRL_IPV4_MCAST,
204 ROCKER_CTRL_IPV6_MCAST,
205 ROCKER_CTRL_DFLT_BRIDGING,
206 ROCKER_CTRL_DFLT_OVS,
207 ROCKER_CTRL_MAX,
208 };
209
210 #define ROCKER_INTERNAL_VLAN_ID_BASE 0x0f00
211 #define ROCKER_N_INTERNAL_VLANS 255
212 #define ROCKER_VLAN_BITMAP_LEN BITS_TO_LONGS(VLAN_N_VID)
213 #define ROCKER_INTERNAL_VLAN_BITMAP_LEN BITS_TO_LONGS(ROCKER_N_INTERNAL_VLANS)
214
215 struct rocker_port {
216 struct net_device *dev;
217 struct net_device *bridge_dev;
218 struct rocker *rocker;
219 unsigned int port_number;
220 u32 pport;
221 __be16 internal_vlan_id;
222 int stp_state;
223 u32 brport_flags;
224 unsigned long ageing_time;
225 bool ctrls[ROCKER_CTRL_MAX];
226 unsigned long vlan_bitmap[ROCKER_VLAN_BITMAP_LEN];
227 struct napi_struct napi_tx;
228 struct napi_struct napi_rx;
229 struct rocker_dma_ring_info tx_ring;
230 struct rocker_dma_ring_info rx_ring;
231 };
232
233 struct rocker {
234 struct pci_dev *pdev;
235 u8 __iomem *hw_addr;
236 struct msix_entry *msix_entries;
237 unsigned int port_count;
238 struct rocker_port **ports;
239 struct {
240 u64 id;
241 } hw;
242 unsigned long ageing_time;
243 spinlock_t cmd_ring_lock; /* for cmd ring accesses */
244 struct rocker_dma_ring_info cmd_ring;
245 struct rocker_dma_ring_info event_ring;
246 DECLARE_HASHTABLE(flow_tbl, 16);
247 spinlock_t flow_tbl_lock; /* for flow tbl accesses */
248 u64 flow_tbl_next_cookie;
249 DECLARE_HASHTABLE(group_tbl, 16);
250 spinlock_t group_tbl_lock; /* for group tbl accesses */
251 struct timer_list fdb_cleanup_timer;
252 DECLARE_HASHTABLE(fdb_tbl, 16);
253 spinlock_t fdb_tbl_lock; /* for fdb tbl accesses */
254 unsigned long internal_vlan_bitmap[ROCKER_INTERNAL_VLAN_BITMAP_LEN];
255 DECLARE_HASHTABLE(internal_vlan_tbl, 8);
256 spinlock_t internal_vlan_tbl_lock; /* for vlan tbl accesses */
257 DECLARE_HASHTABLE(neigh_tbl, 16);
258 spinlock_t neigh_tbl_lock; /* for neigh tbl accesses */
259 u32 neigh_tbl_next_index;
260 };
261
262 static const u8 zero_mac[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
263 static const u8 ff_mac[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
264 static const u8 ll_mac[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
265 static const u8 ll_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0 };
266 static const u8 mcast_mac[ETH_ALEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 };
267 static const u8 ipv4_mcast[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x00 };
268 static const u8 ipv4_mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0x80, 0x00, 0x00 };
269 static const u8 ipv6_mcast[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x00 };
270 static const u8 ipv6_mask[ETH_ALEN] = { 0xff, 0xff, 0x00, 0x00, 0x00, 0x00 };
271
272 /* Rocker priority levels for flow table entries. Higher
273 * priority match takes precedence over lower priority match.
274 */
275
276 enum {
277 ROCKER_PRIORITY_UNKNOWN = 0,
278 ROCKER_PRIORITY_IG_PORT = 1,
279 ROCKER_PRIORITY_VLAN = 1,
280 ROCKER_PRIORITY_TERM_MAC_UCAST = 0,
281 ROCKER_PRIORITY_TERM_MAC_MCAST = 1,
282 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT = 1,
283 ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD = 2,
284 ROCKER_PRIORITY_BRIDGING_VLAN = 3,
285 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT = 1,
286 ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD = 2,
287 ROCKER_PRIORITY_BRIDGING_TENANT = 3,
288 ROCKER_PRIORITY_ACL_CTRL = 3,
289 ROCKER_PRIORITY_ACL_NORMAL = 2,
290 ROCKER_PRIORITY_ACL_DFLT = 1,
291 };
292
rocker_vlan_id_is_internal(__be16 vlan_id)293 static bool rocker_vlan_id_is_internal(__be16 vlan_id)
294 {
295 u16 start = ROCKER_INTERNAL_VLAN_ID_BASE;
296 u16 end = 0xffe;
297 u16 _vlan_id = ntohs(vlan_id);
298
299 return (_vlan_id >= start && _vlan_id <= end);
300 }
301
rocker_port_vid_to_vlan(const struct rocker_port * rocker_port,u16 vid,bool * pop_vlan)302 static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
303 u16 vid, bool *pop_vlan)
304 {
305 __be16 vlan_id;
306
307 if (pop_vlan)
308 *pop_vlan = false;
309 vlan_id = htons(vid);
310 if (!vlan_id) {
311 vlan_id = rocker_port->internal_vlan_id;
312 if (pop_vlan)
313 *pop_vlan = true;
314 }
315
316 return vlan_id;
317 }
318
rocker_port_vlan_to_vid(const struct rocker_port * rocker_port,__be16 vlan_id)319 static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
320 __be16 vlan_id)
321 {
322 if (rocker_vlan_id_is_internal(vlan_id))
323 return 0;
324
325 return ntohs(vlan_id);
326 }
327
rocker_port_is_bridged(const struct rocker_port * rocker_port)328 static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
329 {
330 return rocker_port->bridge_dev &&
331 netif_is_bridge_master(rocker_port->bridge_dev);
332 }
333
rocker_port_is_ovsed(const struct rocker_port * rocker_port)334 static bool rocker_port_is_ovsed(const struct rocker_port *rocker_port)
335 {
336 return rocker_port->bridge_dev &&
337 netif_is_ovs_master(rocker_port->bridge_dev);
338 }
339
340 #define ROCKER_OP_FLAG_REMOVE BIT(0)
341 #define ROCKER_OP_FLAG_NOWAIT BIT(1)
342 #define ROCKER_OP_FLAG_LEARNED BIT(2)
343 #define ROCKER_OP_FLAG_REFRESH BIT(3)
344
__rocker_port_mem_alloc(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,size_t size)345 static void *__rocker_port_mem_alloc(struct rocker_port *rocker_port,
346 struct switchdev_trans *trans, int flags,
347 size_t size)
348 {
349 struct switchdev_trans_item *elem = NULL;
350 gfp_t gfp_flags = (flags & ROCKER_OP_FLAG_NOWAIT) ?
351 GFP_ATOMIC : GFP_KERNEL;
352
353 /* If in transaction prepare phase, allocate the memory
354 * and enqueue it on a transaction. If in transaction
355 * commit phase, dequeue the memory from the transaction
356 * rather than re-allocating the memory. The idea is the
357 * driver code paths for prepare and commit are identical
358 * so the memory allocated in the prepare phase is the
359 * memory used in the commit phase.
360 */
361
362 if (!trans) {
363 elem = kzalloc(size + sizeof(*elem), gfp_flags);
364 } else if (switchdev_trans_ph_prepare(trans)) {
365 elem = kzalloc(size + sizeof(*elem), gfp_flags);
366 if (!elem)
367 return NULL;
368 switchdev_trans_item_enqueue(trans, elem, kfree, elem);
369 } else {
370 elem = switchdev_trans_item_dequeue(trans);
371 }
372
373 return elem ? elem + 1 : NULL;
374 }
375
rocker_port_kzalloc(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,size_t size)376 static void *rocker_port_kzalloc(struct rocker_port *rocker_port,
377 struct switchdev_trans *trans, int flags,
378 size_t size)
379 {
380 return __rocker_port_mem_alloc(rocker_port, trans, flags, size);
381 }
382
rocker_port_kcalloc(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,size_t n,size_t size)383 static void *rocker_port_kcalloc(struct rocker_port *rocker_port,
384 struct switchdev_trans *trans, int flags,
385 size_t n, size_t size)
386 {
387 return __rocker_port_mem_alloc(rocker_port, trans, flags, n * size);
388 }
389
rocker_port_kfree(struct switchdev_trans * trans,const void * mem)390 static void rocker_port_kfree(struct switchdev_trans *trans, const void *mem)
391 {
392 struct switchdev_trans_item *elem;
393
394 /* Frees are ignored if in transaction prepare phase. The
395 * memory remains on the per-port list until freed in the
396 * commit phase.
397 */
398
399 if (switchdev_trans_ph_prepare(trans))
400 return;
401
402 elem = (struct switchdev_trans_item *) mem - 1;
403 kfree(elem);
404 }
405
406 struct rocker_wait {
407 wait_queue_head_t wait;
408 bool done;
409 bool nowait;
410 };
411
rocker_wait_reset(struct rocker_wait * wait)412 static void rocker_wait_reset(struct rocker_wait *wait)
413 {
414 wait->done = false;
415 wait->nowait = false;
416 }
417
rocker_wait_init(struct rocker_wait * wait)418 static void rocker_wait_init(struct rocker_wait *wait)
419 {
420 init_waitqueue_head(&wait->wait);
421 rocker_wait_reset(wait);
422 }
423
rocker_wait_create(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags)424 static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
425 struct switchdev_trans *trans,
426 int flags)
427 {
428 struct rocker_wait *wait;
429
430 wait = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*wait));
431 if (!wait)
432 return NULL;
433 rocker_wait_init(wait);
434 return wait;
435 }
436
rocker_wait_destroy(struct switchdev_trans * trans,struct rocker_wait * wait)437 static void rocker_wait_destroy(struct switchdev_trans *trans,
438 struct rocker_wait *wait)
439 {
440 rocker_port_kfree(trans, wait);
441 }
442
rocker_wait_event_timeout(struct rocker_wait * wait,unsigned long timeout)443 static bool rocker_wait_event_timeout(struct rocker_wait *wait,
444 unsigned long timeout)
445 {
446 wait_event_timeout(wait->wait, wait->done, HZ / 10);
447 if (!wait->done)
448 return false;
449 return true;
450 }
451
rocker_wait_wake_up(struct rocker_wait * wait)452 static void rocker_wait_wake_up(struct rocker_wait *wait)
453 {
454 wait->done = true;
455 wake_up(&wait->wait);
456 }
457
rocker_msix_vector(const struct rocker * rocker,unsigned int vector)458 static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
459 {
460 return rocker->msix_entries[vector].vector;
461 }
462
rocker_msix_tx_vector(const struct rocker_port * rocker_port)463 static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
464 {
465 return rocker_msix_vector(rocker_port->rocker,
466 ROCKER_MSIX_VEC_TX(rocker_port->port_number));
467 }
468
rocker_msix_rx_vector(const struct rocker_port * rocker_port)469 static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
470 {
471 return rocker_msix_vector(rocker_port->rocker,
472 ROCKER_MSIX_VEC_RX(rocker_port->port_number));
473 }
474
475 #define rocker_write32(rocker, reg, val) \
476 writel((val), (rocker)->hw_addr + (ROCKER_ ## reg))
477 #define rocker_read32(rocker, reg) \
478 readl((rocker)->hw_addr + (ROCKER_ ## reg))
479 #define rocker_write64(rocker, reg, val) \
480 writeq((val), (rocker)->hw_addr + (ROCKER_ ## reg))
481 #define rocker_read64(rocker, reg) \
482 readq((rocker)->hw_addr + (ROCKER_ ## reg))
483
484 /*****************************
485 * HW basic testing functions
486 *****************************/
487
rocker_reg_test(const struct rocker * rocker)488 static int rocker_reg_test(const struct rocker *rocker)
489 {
490 const struct pci_dev *pdev = rocker->pdev;
491 u64 test_reg;
492 u64 rnd;
493
494 rnd = prandom_u32();
495 rnd >>= 1;
496 rocker_write32(rocker, TEST_REG, rnd);
497 test_reg = rocker_read32(rocker, TEST_REG);
498 if (test_reg != rnd * 2) {
499 dev_err(&pdev->dev, "unexpected 32bit register value %08llx, expected %08llx\n",
500 test_reg, rnd * 2);
501 return -EIO;
502 }
503
504 rnd = prandom_u32();
505 rnd <<= 31;
506 rnd |= prandom_u32();
507 rocker_write64(rocker, TEST_REG64, rnd);
508 test_reg = rocker_read64(rocker, TEST_REG64);
509 if (test_reg != rnd * 2) {
510 dev_err(&pdev->dev, "unexpected 64bit register value %16llx, expected %16llx\n",
511 test_reg, rnd * 2);
512 return -EIO;
513 }
514
515 return 0;
516 }
517
rocker_dma_test_one(const struct rocker * rocker,struct rocker_wait * wait,u32 test_type,dma_addr_t dma_handle,const unsigned char * buf,const unsigned char * expect,size_t size)518 static int rocker_dma_test_one(const struct rocker *rocker,
519 struct rocker_wait *wait, u32 test_type,
520 dma_addr_t dma_handle, const unsigned char *buf,
521 const unsigned char *expect, size_t size)
522 {
523 const struct pci_dev *pdev = rocker->pdev;
524 int i;
525
526 rocker_wait_reset(wait);
527 rocker_write32(rocker, TEST_DMA_CTRL, test_type);
528
529 if (!rocker_wait_event_timeout(wait, HZ / 10)) {
530 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
531 return -EIO;
532 }
533
534 for (i = 0; i < size; i++) {
535 if (buf[i] != expect[i]) {
536 dev_err(&pdev->dev, "unexpected memory content %02x at byte %x\n, %02x expected",
537 buf[i], i, expect[i]);
538 return -EIO;
539 }
540 }
541 return 0;
542 }
543
544 #define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
545 #define ROCKER_TEST_DMA_FILL_PATTERN 0x96
546
rocker_dma_test_offset(const struct rocker * rocker,struct rocker_wait * wait,int offset)547 static int rocker_dma_test_offset(const struct rocker *rocker,
548 struct rocker_wait *wait, int offset)
549 {
550 struct pci_dev *pdev = rocker->pdev;
551 unsigned char *alloc;
552 unsigned char *buf;
553 unsigned char *expect;
554 dma_addr_t dma_handle;
555 int i;
556 int err;
557
558 alloc = kzalloc(ROCKER_TEST_DMA_BUF_SIZE * 2 + offset,
559 GFP_KERNEL | GFP_DMA);
560 if (!alloc)
561 return -ENOMEM;
562 buf = alloc + offset;
563 expect = buf + ROCKER_TEST_DMA_BUF_SIZE;
564
565 dma_handle = pci_map_single(pdev, buf, ROCKER_TEST_DMA_BUF_SIZE,
566 PCI_DMA_BIDIRECTIONAL);
567 if (pci_dma_mapping_error(pdev, dma_handle)) {
568 err = -EIO;
569 goto free_alloc;
570 }
571
572 rocker_write64(rocker, TEST_DMA_ADDR, dma_handle);
573 rocker_write32(rocker, TEST_DMA_SIZE, ROCKER_TEST_DMA_BUF_SIZE);
574
575 memset(expect, ROCKER_TEST_DMA_FILL_PATTERN, ROCKER_TEST_DMA_BUF_SIZE);
576 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_FILL,
577 dma_handle, buf, expect,
578 ROCKER_TEST_DMA_BUF_SIZE);
579 if (err)
580 goto unmap;
581
582 memset(expect, 0, ROCKER_TEST_DMA_BUF_SIZE);
583 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_CLEAR,
584 dma_handle, buf, expect,
585 ROCKER_TEST_DMA_BUF_SIZE);
586 if (err)
587 goto unmap;
588
589 prandom_bytes(buf, ROCKER_TEST_DMA_BUF_SIZE);
590 for (i = 0; i < ROCKER_TEST_DMA_BUF_SIZE; i++)
591 expect[i] = ~buf[i];
592 err = rocker_dma_test_one(rocker, wait, ROCKER_TEST_DMA_CTRL_INVERT,
593 dma_handle, buf, expect,
594 ROCKER_TEST_DMA_BUF_SIZE);
595 if (err)
596 goto unmap;
597
598 unmap:
599 pci_unmap_single(pdev, dma_handle, ROCKER_TEST_DMA_BUF_SIZE,
600 PCI_DMA_BIDIRECTIONAL);
601 free_alloc:
602 kfree(alloc);
603
604 return err;
605 }
606
rocker_dma_test(const struct rocker * rocker,struct rocker_wait * wait)607 static int rocker_dma_test(const struct rocker *rocker,
608 struct rocker_wait *wait)
609 {
610 int i;
611 int err;
612
613 for (i = 0; i < 8; i++) {
614 err = rocker_dma_test_offset(rocker, wait, i);
615 if (err)
616 return err;
617 }
618 return 0;
619 }
620
rocker_test_irq_handler(int irq,void * dev_id)621 static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
622 {
623 struct rocker_wait *wait = dev_id;
624
625 rocker_wait_wake_up(wait);
626
627 return IRQ_HANDLED;
628 }
629
rocker_basic_hw_test(const struct rocker * rocker)630 static int rocker_basic_hw_test(const struct rocker *rocker)
631 {
632 const struct pci_dev *pdev = rocker->pdev;
633 struct rocker_wait wait;
634 int err;
635
636 err = rocker_reg_test(rocker);
637 if (err) {
638 dev_err(&pdev->dev, "reg test failed\n");
639 return err;
640 }
641
642 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST),
643 rocker_test_irq_handler, 0,
644 rocker_driver_name, &wait);
645 if (err) {
646 dev_err(&pdev->dev, "cannot assign test irq\n");
647 return err;
648 }
649
650 rocker_wait_init(&wait);
651 rocker_write32(rocker, TEST_IRQ, ROCKER_MSIX_VEC_TEST);
652
653 if (!rocker_wait_event_timeout(&wait, HZ / 10)) {
654 dev_err(&pdev->dev, "no interrupt received within a timeout\n");
655 err = -EIO;
656 goto free_irq;
657 }
658
659 err = rocker_dma_test(rocker, &wait);
660 if (err)
661 dev_err(&pdev->dev, "dma test failed\n");
662
663 free_irq:
664 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_TEST), &wait);
665 return err;
666 }
667
668 /******
669 * TLV
670 ******/
671
672 #define ROCKER_TLV_ALIGNTO 8U
673 #define ROCKER_TLV_ALIGN(len) \
674 (((len) + ROCKER_TLV_ALIGNTO - 1) & ~(ROCKER_TLV_ALIGNTO - 1))
675 #define ROCKER_TLV_HDRLEN ROCKER_TLV_ALIGN(sizeof(struct rocker_tlv))
676
677 /* <------- ROCKER_TLV_HDRLEN -------> <--- ROCKER_TLV_ALIGN(payload) --->
678 * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
679 * | Header | Pad | Payload | Pad |
680 * | (struct rocker_tlv) | ing | | ing |
681 * +-----------------------------+- - -+- - - - - - - - - - - - - - -+- - -+
682 * <--------------------------- tlv->len -------------------------->
683 */
684
rocker_tlv_next(const struct rocker_tlv * tlv,int * remaining)685 static struct rocker_tlv *rocker_tlv_next(const struct rocker_tlv *tlv,
686 int *remaining)
687 {
688 int totlen = ROCKER_TLV_ALIGN(tlv->len);
689
690 *remaining -= totlen;
691 return (struct rocker_tlv *) ((char *) tlv + totlen);
692 }
693
rocker_tlv_ok(const struct rocker_tlv * tlv,int remaining)694 static int rocker_tlv_ok(const struct rocker_tlv *tlv, int remaining)
695 {
696 return remaining >= (int) ROCKER_TLV_HDRLEN &&
697 tlv->len >= ROCKER_TLV_HDRLEN &&
698 tlv->len <= remaining;
699 }
700
701 #define rocker_tlv_for_each(pos, head, len, rem) \
702 for (pos = head, rem = len; \
703 rocker_tlv_ok(pos, rem); \
704 pos = rocker_tlv_next(pos, &(rem)))
705
706 #define rocker_tlv_for_each_nested(pos, tlv, rem) \
707 rocker_tlv_for_each(pos, rocker_tlv_data(tlv), \
708 rocker_tlv_len(tlv), rem)
709
rocker_tlv_attr_size(int payload)710 static int rocker_tlv_attr_size(int payload)
711 {
712 return ROCKER_TLV_HDRLEN + payload;
713 }
714
rocker_tlv_total_size(int payload)715 static int rocker_tlv_total_size(int payload)
716 {
717 return ROCKER_TLV_ALIGN(rocker_tlv_attr_size(payload));
718 }
719
rocker_tlv_padlen(int payload)720 static int rocker_tlv_padlen(int payload)
721 {
722 return rocker_tlv_total_size(payload) - rocker_tlv_attr_size(payload);
723 }
724
rocker_tlv_type(const struct rocker_tlv * tlv)725 static int rocker_tlv_type(const struct rocker_tlv *tlv)
726 {
727 return tlv->type;
728 }
729
rocker_tlv_data(const struct rocker_tlv * tlv)730 static void *rocker_tlv_data(const struct rocker_tlv *tlv)
731 {
732 return (char *) tlv + ROCKER_TLV_HDRLEN;
733 }
734
rocker_tlv_len(const struct rocker_tlv * tlv)735 static int rocker_tlv_len(const struct rocker_tlv *tlv)
736 {
737 return tlv->len - ROCKER_TLV_HDRLEN;
738 }
739
rocker_tlv_get_u8(const struct rocker_tlv * tlv)740 static u8 rocker_tlv_get_u8(const struct rocker_tlv *tlv)
741 {
742 return *(u8 *) rocker_tlv_data(tlv);
743 }
744
rocker_tlv_get_u16(const struct rocker_tlv * tlv)745 static u16 rocker_tlv_get_u16(const struct rocker_tlv *tlv)
746 {
747 return *(u16 *) rocker_tlv_data(tlv);
748 }
749
rocker_tlv_get_be16(const struct rocker_tlv * tlv)750 static __be16 rocker_tlv_get_be16(const struct rocker_tlv *tlv)
751 {
752 return *(__be16 *) rocker_tlv_data(tlv);
753 }
754
rocker_tlv_get_u32(const struct rocker_tlv * tlv)755 static u32 rocker_tlv_get_u32(const struct rocker_tlv *tlv)
756 {
757 return *(u32 *) rocker_tlv_data(tlv);
758 }
759
rocker_tlv_get_u64(const struct rocker_tlv * tlv)760 static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv)
761 {
762 return *(u64 *) rocker_tlv_data(tlv);
763 }
764
rocker_tlv_parse(const struct rocker_tlv ** tb,int maxtype,const char * buf,int buf_len)765 static void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype,
766 const char *buf, int buf_len)
767 {
768 const struct rocker_tlv *tlv;
769 const struct rocker_tlv *head = (const struct rocker_tlv *) buf;
770 int rem;
771
772 memset(tb, 0, sizeof(struct rocker_tlv *) * (maxtype + 1));
773
774 rocker_tlv_for_each(tlv, head, buf_len, rem) {
775 u32 type = rocker_tlv_type(tlv);
776
777 if (type > 0 && type <= maxtype)
778 tb[type] = tlv;
779 }
780 }
781
rocker_tlv_parse_nested(const struct rocker_tlv ** tb,int maxtype,const struct rocker_tlv * tlv)782 static void rocker_tlv_parse_nested(const struct rocker_tlv **tb, int maxtype,
783 const struct rocker_tlv *tlv)
784 {
785 rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv),
786 rocker_tlv_len(tlv));
787 }
788
rocker_tlv_parse_desc(const struct rocker_tlv ** tb,int maxtype,const struct rocker_desc_info * desc_info)789 static void rocker_tlv_parse_desc(const struct rocker_tlv **tb, int maxtype,
790 const struct rocker_desc_info *desc_info)
791 {
792 rocker_tlv_parse(tb, maxtype, desc_info->data,
793 desc_info->desc->tlv_size);
794 }
795
rocker_tlv_start(struct rocker_desc_info * desc_info)796 static struct rocker_tlv *rocker_tlv_start(struct rocker_desc_info *desc_info)
797 {
798 return (struct rocker_tlv *) ((char *) desc_info->data +
799 desc_info->tlv_size);
800 }
801
rocker_tlv_put(struct rocker_desc_info * desc_info,int attrtype,int attrlen,const void * data)802 static int rocker_tlv_put(struct rocker_desc_info *desc_info,
803 int attrtype, int attrlen, const void *data)
804 {
805 int tail_room = desc_info->data_size - desc_info->tlv_size;
806 int total_size = rocker_tlv_total_size(attrlen);
807 struct rocker_tlv *tlv;
808
809 if (unlikely(tail_room < total_size))
810 return -EMSGSIZE;
811
812 tlv = rocker_tlv_start(desc_info);
813 desc_info->tlv_size += total_size;
814 tlv->type = attrtype;
815 tlv->len = rocker_tlv_attr_size(attrlen);
816 memcpy(rocker_tlv_data(tlv), data, attrlen);
817 memset((char *) tlv + tlv->len, 0, rocker_tlv_padlen(attrlen));
818 return 0;
819 }
820
rocker_tlv_put_u8(struct rocker_desc_info * desc_info,int attrtype,u8 value)821 static int rocker_tlv_put_u8(struct rocker_desc_info *desc_info,
822 int attrtype, u8 value)
823 {
824 return rocker_tlv_put(desc_info, attrtype, sizeof(u8), &value);
825 }
826
rocker_tlv_put_u16(struct rocker_desc_info * desc_info,int attrtype,u16 value)827 static int rocker_tlv_put_u16(struct rocker_desc_info *desc_info,
828 int attrtype, u16 value)
829 {
830 return rocker_tlv_put(desc_info, attrtype, sizeof(u16), &value);
831 }
832
rocker_tlv_put_be16(struct rocker_desc_info * desc_info,int attrtype,__be16 value)833 static int rocker_tlv_put_be16(struct rocker_desc_info *desc_info,
834 int attrtype, __be16 value)
835 {
836 return rocker_tlv_put(desc_info, attrtype, sizeof(__be16), &value);
837 }
838
rocker_tlv_put_u32(struct rocker_desc_info * desc_info,int attrtype,u32 value)839 static int rocker_tlv_put_u32(struct rocker_desc_info *desc_info,
840 int attrtype, u32 value)
841 {
842 return rocker_tlv_put(desc_info, attrtype, sizeof(u32), &value);
843 }
844
rocker_tlv_put_be32(struct rocker_desc_info * desc_info,int attrtype,__be32 value)845 static int rocker_tlv_put_be32(struct rocker_desc_info *desc_info,
846 int attrtype, __be32 value)
847 {
848 return rocker_tlv_put(desc_info, attrtype, sizeof(__be32), &value);
849 }
850
rocker_tlv_put_u64(struct rocker_desc_info * desc_info,int attrtype,u64 value)851 static int rocker_tlv_put_u64(struct rocker_desc_info *desc_info,
852 int attrtype, u64 value)
853 {
854 return rocker_tlv_put(desc_info, attrtype, sizeof(u64), &value);
855 }
856
857 static struct rocker_tlv *
rocker_tlv_nest_start(struct rocker_desc_info * desc_info,int attrtype)858 rocker_tlv_nest_start(struct rocker_desc_info *desc_info, int attrtype)
859 {
860 struct rocker_tlv *start = rocker_tlv_start(desc_info);
861
862 if (rocker_tlv_put(desc_info, attrtype, 0, NULL) < 0)
863 return NULL;
864
865 return start;
866 }
867
rocker_tlv_nest_end(struct rocker_desc_info * desc_info,struct rocker_tlv * start)868 static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info,
869 struct rocker_tlv *start)
870 {
871 start->len = (char *) rocker_tlv_start(desc_info) - (char *) start;
872 }
873
rocker_tlv_nest_cancel(struct rocker_desc_info * desc_info,const struct rocker_tlv * start)874 static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info,
875 const struct rocker_tlv *start)
876 {
877 desc_info->tlv_size = (const char *) start - desc_info->data;
878 }
879
880 /******************************************
881 * DMA rings and descriptors manipulations
882 ******************************************/
883
__pos_inc(u32 pos,size_t limit)884 static u32 __pos_inc(u32 pos, size_t limit)
885 {
886 return ++pos == limit ? 0 : pos;
887 }
888
rocker_desc_err(const struct rocker_desc_info * desc_info)889 static int rocker_desc_err(const struct rocker_desc_info *desc_info)
890 {
891 int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
892
893 switch (err) {
894 case ROCKER_OK:
895 return 0;
896 case -ROCKER_ENOENT:
897 return -ENOENT;
898 case -ROCKER_ENXIO:
899 return -ENXIO;
900 case -ROCKER_ENOMEM:
901 return -ENOMEM;
902 case -ROCKER_EEXIST:
903 return -EEXIST;
904 case -ROCKER_EINVAL:
905 return -EINVAL;
906 case -ROCKER_EMSGSIZE:
907 return -EMSGSIZE;
908 case -ROCKER_ENOTSUP:
909 return -EOPNOTSUPP;
910 case -ROCKER_ENOBUFS:
911 return -ENOBUFS;
912 }
913
914 return -EINVAL;
915 }
916
rocker_desc_gen_clear(const struct rocker_desc_info * desc_info)917 static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
918 {
919 desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
920 }
921
rocker_desc_gen(const struct rocker_desc_info * desc_info)922 static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
923 {
924 u32 comp_err = desc_info->desc->comp_err;
925
926 return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
927 }
928
rocker_desc_cookie_ptr_get(const struct rocker_desc_info * desc_info)929 static void *rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
930 {
931 return (void *)(uintptr_t)desc_info->desc->cookie;
932 }
933
rocker_desc_cookie_ptr_set(const struct rocker_desc_info * desc_info,void * ptr)934 static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
935 void *ptr)
936 {
937 desc_info->desc->cookie = (uintptr_t) ptr;
938 }
939
940 static struct rocker_desc_info *
rocker_desc_head_get(const struct rocker_dma_ring_info * info)941 rocker_desc_head_get(const struct rocker_dma_ring_info *info)
942 {
943 static struct rocker_desc_info *desc_info;
944 u32 head = __pos_inc(info->head, info->size);
945
946 desc_info = &info->desc_info[info->head];
947 if (head == info->tail)
948 return NULL; /* ring full */
949 desc_info->tlv_size = 0;
950 return desc_info;
951 }
952
rocker_desc_commit(const struct rocker_desc_info * desc_info)953 static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
954 {
955 desc_info->desc->buf_size = desc_info->data_size;
956 desc_info->desc->tlv_size = desc_info->tlv_size;
957 }
958
rocker_desc_head_set(const struct rocker * rocker,struct rocker_dma_ring_info * info,const struct rocker_desc_info * desc_info)959 static void rocker_desc_head_set(const struct rocker *rocker,
960 struct rocker_dma_ring_info *info,
961 const struct rocker_desc_info *desc_info)
962 {
963 u32 head = __pos_inc(info->head, info->size);
964
965 BUG_ON(head == info->tail);
966 rocker_desc_commit(desc_info);
967 info->head = head;
968 rocker_write32(rocker, DMA_DESC_HEAD(info->type), head);
969 }
970
971 static struct rocker_desc_info *
rocker_desc_tail_get(struct rocker_dma_ring_info * info)972 rocker_desc_tail_get(struct rocker_dma_ring_info *info)
973 {
974 static struct rocker_desc_info *desc_info;
975
976 if (info->tail == info->head)
977 return NULL; /* nothing to be done between head and tail */
978 desc_info = &info->desc_info[info->tail];
979 if (!rocker_desc_gen(desc_info))
980 return NULL; /* gen bit not set, desc is not ready yet */
981 info->tail = __pos_inc(info->tail, info->size);
982 desc_info->tlv_size = desc_info->desc->tlv_size;
983 return desc_info;
984 }
985
rocker_dma_ring_credits_set(const struct rocker * rocker,const struct rocker_dma_ring_info * info,u32 credits)986 static void rocker_dma_ring_credits_set(const struct rocker *rocker,
987 const struct rocker_dma_ring_info *info,
988 u32 credits)
989 {
990 if (credits)
991 rocker_write32(rocker, DMA_DESC_CREDITS(info->type), credits);
992 }
993
rocker_dma_ring_size_fix(size_t size)994 static unsigned long rocker_dma_ring_size_fix(size_t size)
995 {
996 return max(ROCKER_DMA_SIZE_MIN,
997 min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
998 }
999
rocker_dma_ring_create(const struct rocker * rocker,unsigned int type,size_t size,struct rocker_dma_ring_info * info)1000 static int rocker_dma_ring_create(const struct rocker *rocker,
1001 unsigned int type,
1002 size_t size,
1003 struct rocker_dma_ring_info *info)
1004 {
1005 int i;
1006
1007 BUG_ON(size != rocker_dma_ring_size_fix(size));
1008 info->size = size;
1009 info->type = type;
1010 info->head = 0;
1011 info->tail = 0;
1012 info->desc_info = kcalloc(info->size, sizeof(*info->desc_info),
1013 GFP_KERNEL);
1014 if (!info->desc_info)
1015 return -ENOMEM;
1016
1017 info->desc = pci_alloc_consistent(rocker->pdev,
1018 info->size * sizeof(*info->desc),
1019 &info->mapaddr);
1020 if (!info->desc) {
1021 kfree(info->desc_info);
1022 return -ENOMEM;
1023 }
1024
1025 for (i = 0; i < info->size; i++)
1026 info->desc_info[i].desc = &info->desc[i];
1027
1028 rocker_write32(rocker, DMA_DESC_CTRL(info->type),
1029 ROCKER_DMA_DESC_CTRL_RESET);
1030 rocker_write64(rocker, DMA_DESC_ADDR(info->type), info->mapaddr);
1031 rocker_write32(rocker, DMA_DESC_SIZE(info->type), info->size);
1032
1033 return 0;
1034 }
1035
rocker_dma_ring_destroy(const struct rocker * rocker,const struct rocker_dma_ring_info * info)1036 static void rocker_dma_ring_destroy(const struct rocker *rocker,
1037 const struct rocker_dma_ring_info *info)
1038 {
1039 rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
1040
1041 pci_free_consistent(rocker->pdev,
1042 info->size * sizeof(struct rocker_desc),
1043 info->desc, info->mapaddr);
1044 kfree(info->desc_info);
1045 }
1046
rocker_dma_ring_pass_to_producer(const struct rocker * rocker,struct rocker_dma_ring_info * info)1047 static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
1048 struct rocker_dma_ring_info *info)
1049 {
1050 int i;
1051
1052 BUG_ON(info->head || info->tail);
1053
1054 /* When ring is consumer, we need to advance head for each desc.
1055 * That tells hw that the desc is ready to be used by it.
1056 */
1057 for (i = 0; i < info->size - 1; i++)
1058 rocker_desc_head_set(rocker, info, &info->desc_info[i]);
1059 rocker_desc_commit(&info->desc_info[i]);
1060 }
1061
rocker_dma_ring_bufs_alloc(const struct rocker * rocker,const struct rocker_dma_ring_info * info,int direction,size_t buf_size)1062 static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
1063 const struct rocker_dma_ring_info *info,
1064 int direction, size_t buf_size)
1065 {
1066 struct pci_dev *pdev = rocker->pdev;
1067 int i;
1068 int err;
1069
1070 for (i = 0; i < info->size; i++) {
1071 struct rocker_desc_info *desc_info = &info->desc_info[i];
1072 struct rocker_desc *desc = &info->desc[i];
1073 dma_addr_t dma_handle;
1074 char *buf;
1075
1076 buf = kzalloc(buf_size, GFP_KERNEL | GFP_DMA);
1077 if (!buf) {
1078 err = -ENOMEM;
1079 goto rollback;
1080 }
1081
1082 dma_handle = pci_map_single(pdev, buf, buf_size, direction);
1083 if (pci_dma_mapping_error(pdev, dma_handle)) {
1084 kfree(buf);
1085 err = -EIO;
1086 goto rollback;
1087 }
1088
1089 desc_info->data = buf;
1090 desc_info->data_size = buf_size;
1091 dma_unmap_addr_set(desc_info, mapaddr, dma_handle);
1092
1093 desc->buf_addr = dma_handle;
1094 desc->buf_size = buf_size;
1095 }
1096 return 0;
1097
1098 rollback:
1099 for (i--; i >= 0; i--) {
1100 const struct rocker_desc_info *desc_info = &info->desc_info[i];
1101
1102 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1103 desc_info->data_size, direction);
1104 kfree(desc_info->data);
1105 }
1106 return err;
1107 }
1108
rocker_dma_ring_bufs_free(const struct rocker * rocker,const struct rocker_dma_ring_info * info,int direction)1109 static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
1110 const struct rocker_dma_ring_info *info,
1111 int direction)
1112 {
1113 struct pci_dev *pdev = rocker->pdev;
1114 int i;
1115
1116 for (i = 0; i < info->size; i++) {
1117 const struct rocker_desc_info *desc_info = &info->desc_info[i];
1118 struct rocker_desc *desc = &info->desc[i];
1119
1120 desc->buf_addr = 0;
1121 desc->buf_size = 0;
1122 pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
1123 desc_info->data_size, direction);
1124 kfree(desc_info->data);
1125 }
1126 }
1127
rocker_dma_rings_init(struct rocker * rocker)1128 static int rocker_dma_rings_init(struct rocker *rocker)
1129 {
1130 const struct pci_dev *pdev = rocker->pdev;
1131 int err;
1132
1133 err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
1134 ROCKER_DMA_CMD_DEFAULT_SIZE,
1135 &rocker->cmd_ring);
1136 if (err) {
1137 dev_err(&pdev->dev, "failed to create command dma ring\n");
1138 return err;
1139 }
1140
1141 spin_lock_init(&rocker->cmd_ring_lock);
1142
1143 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->cmd_ring,
1144 PCI_DMA_BIDIRECTIONAL, PAGE_SIZE);
1145 if (err) {
1146 dev_err(&pdev->dev, "failed to alloc command dma ring buffers\n");
1147 goto err_dma_cmd_ring_bufs_alloc;
1148 }
1149
1150 err = rocker_dma_ring_create(rocker, ROCKER_DMA_EVENT,
1151 ROCKER_DMA_EVENT_DEFAULT_SIZE,
1152 &rocker->event_ring);
1153 if (err) {
1154 dev_err(&pdev->dev, "failed to create event dma ring\n");
1155 goto err_dma_event_ring_create;
1156 }
1157
1158 err = rocker_dma_ring_bufs_alloc(rocker, &rocker->event_ring,
1159 PCI_DMA_FROMDEVICE, PAGE_SIZE);
1160 if (err) {
1161 dev_err(&pdev->dev, "failed to alloc event dma ring buffers\n");
1162 goto err_dma_event_ring_bufs_alloc;
1163 }
1164 rocker_dma_ring_pass_to_producer(rocker, &rocker->event_ring);
1165 return 0;
1166
1167 err_dma_event_ring_bufs_alloc:
1168 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1169 err_dma_event_ring_create:
1170 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1171 PCI_DMA_BIDIRECTIONAL);
1172 err_dma_cmd_ring_bufs_alloc:
1173 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1174 return err;
1175 }
1176
rocker_dma_rings_fini(struct rocker * rocker)1177 static void rocker_dma_rings_fini(struct rocker *rocker)
1178 {
1179 rocker_dma_ring_bufs_free(rocker, &rocker->event_ring,
1180 PCI_DMA_BIDIRECTIONAL);
1181 rocker_dma_ring_destroy(rocker, &rocker->event_ring);
1182 rocker_dma_ring_bufs_free(rocker, &rocker->cmd_ring,
1183 PCI_DMA_BIDIRECTIONAL);
1184 rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
1185 }
1186
rocker_dma_rx_ring_skb_map(const struct rocker_port * rocker_port,struct rocker_desc_info * desc_info,struct sk_buff * skb,size_t buf_len)1187 static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
1188 struct rocker_desc_info *desc_info,
1189 struct sk_buff *skb, size_t buf_len)
1190 {
1191 const struct rocker *rocker = rocker_port->rocker;
1192 struct pci_dev *pdev = rocker->pdev;
1193 dma_addr_t dma_handle;
1194
1195 dma_handle = pci_map_single(pdev, skb->data, buf_len,
1196 PCI_DMA_FROMDEVICE);
1197 if (pci_dma_mapping_error(pdev, dma_handle))
1198 return -EIO;
1199 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_RX_FRAG_ADDR, dma_handle))
1200 goto tlv_put_failure;
1201 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_RX_FRAG_MAX_LEN, buf_len))
1202 goto tlv_put_failure;
1203 return 0;
1204
1205 tlv_put_failure:
1206 pci_unmap_single(pdev, dma_handle, buf_len, PCI_DMA_FROMDEVICE);
1207 desc_info->tlv_size = 0;
1208 return -EMSGSIZE;
1209 }
1210
rocker_port_rx_buf_len(const struct rocker_port * rocker_port)1211 static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
1212 {
1213 return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1214 }
1215
rocker_dma_rx_ring_skb_alloc(const struct rocker_port * rocker_port,struct rocker_desc_info * desc_info)1216 static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
1217 struct rocker_desc_info *desc_info)
1218 {
1219 struct net_device *dev = rocker_port->dev;
1220 struct sk_buff *skb;
1221 size_t buf_len = rocker_port_rx_buf_len(rocker_port);
1222 int err;
1223
1224 /* Ensure that hw will see tlv_size zero in case of an error.
1225 * That tells hw to use another descriptor.
1226 */
1227 rocker_desc_cookie_ptr_set(desc_info, NULL);
1228 desc_info->tlv_size = 0;
1229
1230 skb = netdev_alloc_skb_ip_align(dev, buf_len);
1231 if (!skb)
1232 return -ENOMEM;
1233 err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
1234 if (err) {
1235 dev_kfree_skb_any(skb);
1236 return err;
1237 }
1238 rocker_desc_cookie_ptr_set(desc_info, skb);
1239 return 0;
1240 }
1241
rocker_dma_rx_ring_skb_unmap(const struct rocker * rocker,const struct rocker_tlv ** attrs)1242 static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
1243 const struct rocker_tlv **attrs)
1244 {
1245 struct pci_dev *pdev = rocker->pdev;
1246 dma_addr_t dma_handle;
1247 size_t len;
1248
1249 if (!attrs[ROCKER_TLV_RX_FRAG_ADDR] ||
1250 !attrs[ROCKER_TLV_RX_FRAG_MAX_LEN])
1251 return;
1252 dma_handle = rocker_tlv_get_u64(attrs[ROCKER_TLV_RX_FRAG_ADDR]);
1253 len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
1254 pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
1255 }
1256
rocker_dma_rx_ring_skb_free(const struct rocker * rocker,const struct rocker_desc_info * desc_info)1257 static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
1258 const struct rocker_desc_info *desc_info)
1259 {
1260 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
1261 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
1262
1263 if (!skb)
1264 return;
1265 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
1266 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
1267 dev_kfree_skb_any(skb);
1268 }
1269
rocker_dma_rx_ring_skbs_alloc(const struct rocker_port * rocker_port)1270 static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
1271 {
1272 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1273 const struct rocker *rocker = rocker_port->rocker;
1274 int i;
1275 int err;
1276
1277 for (i = 0; i < rx_ring->size; i++) {
1278 err = rocker_dma_rx_ring_skb_alloc(rocker_port,
1279 &rx_ring->desc_info[i]);
1280 if (err)
1281 goto rollback;
1282 }
1283 return 0;
1284
1285 rollback:
1286 for (i--; i >= 0; i--)
1287 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1288 return err;
1289 }
1290
rocker_dma_rx_ring_skbs_free(const struct rocker_port * rocker_port)1291 static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
1292 {
1293 const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
1294 const struct rocker *rocker = rocker_port->rocker;
1295 int i;
1296
1297 for (i = 0; i < rx_ring->size; i++)
1298 rocker_dma_rx_ring_skb_free(rocker, &rx_ring->desc_info[i]);
1299 }
1300
rocker_port_dma_rings_init(struct rocker_port * rocker_port)1301 static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
1302 {
1303 struct rocker *rocker = rocker_port->rocker;
1304 int err;
1305
1306 err = rocker_dma_ring_create(rocker,
1307 ROCKER_DMA_TX(rocker_port->port_number),
1308 ROCKER_DMA_TX_DEFAULT_SIZE,
1309 &rocker_port->tx_ring);
1310 if (err) {
1311 netdev_err(rocker_port->dev, "failed to create tx dma ring\n");
1312 return err;
1313 }
1314
1315 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->tx_ring,
1316 PCI_DMA_TODEVICE,
1317 ROCKER_DMA_TX_DESC_SIZE);
1318 if (err) {
1319 netdev_err(rocker_port->dev, "failed to alloc tx dma ring buffers\n");
1320 goto err_dma_tx_ring_bufs_alloc;
1321 }
1322
1323 err = rocker_dma_ring_create(rocker,
1324 ROCKER_DMA_RX(rocker_port->port_number),
1325 ROCKER_DMA_RX_DEFAULT_SIZE,
1326 &rocker_port->rx_ring);
1327 if (err) {
1328 netdev_err(rocker_port->dev, "failed to create rx dma ring\n");
1329 goto err_dma_rx_ring_create;
1330 }
1331
1332 err = rocker_dma_ring_bufs_alloc(rocker, &rocker_port->rx_ring,
1333 PCI_DMA_BIDIRECTIONAL,
1334 ROCKER_DMA_RX_DESC_SIZE);
1335 if (err) {
1336 netdev_err(rocker_port->dev, "failed to alloc rx dma ring buffers\n");
1337 goto err_dma_rx_ring_bufs_alloc;
1338 }
1339
1340 err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
1341 if (err) {
1342 netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
1343 goto err_dma_rx_ring_skbs_alloc;
1344 }
1345 rocker_dma_ring_pass_to_producer(rocker, &rocker_port->rx_ring);
1346
1347 return 0;
1348
1349 err_dma_rx_ring_skbs_alloc:
1350 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1351 PCI_DMA_BIDIRECTIONAL);
1352 err_dma_rx_ring_bufs_alloc:
1353 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1354 err_dma_rx_ring_create:
1355 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1356 PCI_DMA_TODEVICE);
1357 err_dma_tx_ring_bufs_alloc:
1358 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1359 return err;
1360 }
1361
rocker_port_dma_rings_fini(struct rocker_port * rocker_port)1362 static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
1363 {
1364 struct rocker *rocker = rocker_port->rocker;
1365
1366 rocker_dma_rx_ring_skbs_free(rocker_port);
1367 rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
1368 PCI_DMA_BIDIRECTIONAL);
1369 rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
1370 rocker_dma_ring_bufs_free(rocker, &rocker_port->tx_ring,
1371 PCI_DMA_TODEVICE);
1372 rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
1373 }
1374
rocker_port_set_enable(const struct rocker_port * rocker_port,bool enable)1375 static void rocker_port_set_enable(const struct rocker_port *rocker_port,
1376 bool enable)
1377 {
1378 u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
1379
1380 if (enable)
1381 val |= 1ULL << rocker_port->pport;
1382 else
1383 val &= ~(1ULL << rocker_port->pport);
1384 rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val);
1385 }
1386
1387 /********************************
1388 * Interrupt handler and helpers
1389 ********************************/
1390
rocker_cmd_irq_handler(int irq,void * dev_id)1391 static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
1392 {
1393 struct rocker *rocker = dev_id;
1394 const struct rocker_desc_info *desc_info;
1395 struct rocker_wait *wait;
1396 u32 credits = 0;
1397
1398 spin_lock(&rocker->cmd_ring_lock);
1399 while ((desc_info = rocker_desc_tail_get(&rocker->cmd_ring))) {
1400 wait = rocker_desc_cookie_ptr_get(desc_info);
1401 if (wait->nowait) {
1402 rocker_desc_gen_clear(desc_info);
1403 rocker_wait_destroy(NULL, wait);
1404 } else {
1405 rocker_wait_wake_up(wait);
1406 }
1407 credits++;
1408 }
1409 spin_unlock(&rocker->cmd_ring_lock);
1410 rocker_dma_ring_credits_set(rocker, &rocker->cmd_ring, credits);
1411
1412 return IRQ_HANDLED;
1413 }
1414
rocker_port_link_up(const struct rocker_port * rocker_port)1415 static void rocker_port_link_up(const struct rocker_port *rocker_port)
1416 {
1417 netif_carrier_on(rocker_port->dev);
1418 netdev_info(rocker_port->dev, "Link is up\n");
1419 }
1420
rocker_port_link_down(const struct rocker_port * rocker_port)1421 static void rocker_port_link_down(const struct rocker_port *rocker_port)
1422 {
1423 netif_carrier_off(rocker_port->dev);
1424 netdev_info(rocker_port->dev, "Link is down\n");
1425 }
1426
rocker_event_link_change(const struct rocker * rocker,const struct rocker_tlv * info)1427 static int rocker_event_link_change(const struct rocker *rocker,
1428 const struct rocker_tlv *info)
1429 {
1430 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
1431 unsigned int port_number;
1432 bool link_up;
1433 struct rocker_port *rocker_port;
1434
1435 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_LINK_CHANGED_MAX, info);
1436 if (!attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT] ||
1437 !attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP])
1438 return -EIO;
1439 port_number =
1440 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_PPORT]) - 1;
1441 link_up = rocker_tlv_get_u8(attrs[ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP]);
1442
1443 if (port_number >= rocker->port_count)
1444 return -EINVAL;
1445
1446 rocker_port = rocker->ports[port_number];
1447 if (netif_carrier_ok(rocker_port->dev) != link_up) {
1448 if (link_up)
1449 rocker_port_link_up(rocker_port);
1450 else
1451 rocker_port_link_down(rocker_port);
1452 }
1453
1454 return 0;
1455 }
1456
1457 static int rocker_port_fdb(struct rocker_port *rocker_port,
1458 struct switchdev_trans *trans,
1459 const unsigned char *addr,
1460 __be16 vlan_id, int flags);
1461
rocker_event_mac_vlan_seen(const struct rocker * rocker,const struct rocker_tlv * info)1462 static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
1463 const struct rocker_tlv *info)
1464 {
1465 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
1466 unsigned int port_number;
1467 struct rocker_port *rocker_port;
1468 const unsigned char *addr;
1469 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_LEARNED;
1470 __be16 vlan_id;
1471
1472 rocker_tlv_parse_nested(attrs, ROCKER_TLV_EVENT_MAC_VLAN_MAX, info);
1473 if (!attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT] ||
1474 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC] ||
1475 !attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID])
1476 return -EIO;
1477 port_number =
1478 rocker_tlv_get_u32(attrs[ROCKER_TLV_EVENT_MAC_VLAN_PPORT]) - 1;
1479 addr = rocker_tlv_data(attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAC]);
1480 vlan_id = rocker_tlv_get_be16(attrs[ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID]);
1481
1482 if (port_number >= rocker->port_count)
1483 return -EINVAL;
1484
1485 rocker_port = rocker->ports[port_number];
1486
1487 if (rocker_port->stp_state != BR_STATE_LEARNING &&
1488 rocker_port->stp_state != BR_STATE_FORWARDING)
1489 return 0;
1490
1491 return rocker_port_fdb(rocker_port, NULL, addr, vlan_id, flags);
1492 }
1493
rocker_event_process(const struct rocker * rocker,const struct rocker_desc_info * desc_info)1494 static int rocker_event_process(const struct rocker *rocker,
1495 const struct rocker_desc_info *desc_info)
1496 {
1497 const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
1498 const struct rocker_tlv *info;
1499 u16 type;
1500
1501 rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
1502 if (!attrs[ROCKER_TLV_EVENT_TYPE] ||
1503 !attrs[ROCKER_TLV_EVENT_INFO])
1504 return -EIO;
1505
1506 type = rocker_tlv_get_u16(attrs[ROCKER_TLV_EVENT_TYPE]);
1507 info = attrs[ROCKER_TLV_EVENT_INFO];
1508
1509 switch (type) {
1510 case ROCKER_TLV_EVENT_TYPE_LINK_CHANGED:
1511 return rocker_event_link_change(rocker, info);
1512 case ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN:
1513 return rocker_event_mac_vlan_seen(rocker, info);
1514 }
1515
1516 return -EOPNOTSUPP;
1517 }
1518
rocker_event_irq_handler(int irq,void * dev_id)1519 static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
1520 {
1521 struct rocker *rocker = dev_id;
1522 const struct pci_dev *pdev = rocker->pdev;
1523 const struct rocker_desc_info *desc_info;
1524 u32 credits = 0;
1525 int err;
1526
1527 while ((desc_info = rocker_desc_tail_get(&rocker->event_ring))) {
1528 err = rocker_desc_err(desc_info);
1529 if (err) {
1530 dev_err(&pdev->dev, "event desc received with err %d\n",
1531 err);
1532 } else {
1533 err = rocker_event_process(rocker, desc_info);
1534 if (err)
1535 dev_err(&pdev->dev, "event processing failed with err %d\n",
1536 err);
1537 }
1538 rocker_desc_gen_clear(desc_info);
1539 rocker_desc_head_set(rocker, &rocker->event_ring, desc_info);
1540 credits++;
1541 }
1542 rocker_dma_ring_credits_set(rocker, &rocker->event_ring, credits);
1543
1544 return IRQ_HANDLED;
1545 }
1546
rocker_tx_irq_handler(int irq,void * dev_id)1547 static irqreturn_t rocker_tx_irq_handler(int irq, void *dev_id)
1548 {
1549 struct rocker_port *rocker_port = dev_id;
1550
1551 napi_schedule(&rocker_port->napi_tx);
1552 return IRQ_HANDLED;
1553 }
1554
rocker_rx_irq_handler(int irq,void * dev_id)1555 static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
1556 {
1557 struct rocker_port *rocker_port = dev_id;
1558
1559 napi_schedule(&rocker_port->napi_rx);
1560 return IRQ_HANDLED;
1561 }
1562
1563 /********************
1564 * Command interface
1565 ********************/
1566
1567 typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
1568 struct rocker_desc_info *desc_info,
1569 void *priv);
1570
1571 typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
1572 const struct rocker_desc_info *desc_info,
1573 void *priv);
1574
rocker_cmd_exec(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,rocker_cmd_prep_cb_t prepare,void * prepare_priv,rocker_cmd_proc_cb_t process,void * process_priv)1575 static int rocker_cmd_exec(struct rocker_port *rocker_port,
1576 struct switchdev_trans *trans, int flags,
1577 rocker_cmd_prep_cb_t prepare, void *prepare_priv,
1578 rocker_cmd_proc_cb_t process, void *process_priv)
1579 {
1580 struct rocker *rocker = rocker_port->rocker;
1581 struct rocker_desc_info *desc_info;
1582 struct rocker_wait *wait;
1583 bool nowait = !!(flags & ROCKER_OP_FLAG_NOWAIT);
1584 unsigned long lock_flags;
1585 int err;
1586
1587 wait = rocker_wait_create(rocker_port, trans, flags);
1588 if (!wait)
1589 return -ENOMEM;
1590 wait->nowait = nowait;
1591
1592 spin_lock_irqsave(&rocker->cmd_ring_lock, lock_flags);
1593
1594 desc_info = rocker_desc_head_get(&rocker->cmd_ring);
1595 if (!desc_info) {
1596 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1597 err = -EAGAIN;
1598 goto out;
1599 }
1600
1601 err = prepare(rocker_port, desc_info, prepare_priv);
1602 if (err) {
1603 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1604 goto out;
1605 }
1606
1607 rocker_desc_cookie_ptr_set(desc_info, wait);
1608
1609 if (!switchdev_trans_ph_prepare(trans))
1610 rocker_desc_head_set(rocker, &rocker->cmd_ring, desc_info);
1611
1612 spin_unlock_irqrestore(&rocker->cmd_ring_lock, lock_flags);
1613
1614 if (nowait)
1615 return 0;
1616
1617 if (!switchdev_trans_ph_prepare(trans))
1618 if (!rocker_wait_event_timeout(wait, HZ / 10))
1619 return -EIO;
1620
1621 err = rocker_desc_err(desc_info);
1622 if (err)
1623 return err;
1624
1625 if (process)
1626 err = process(rocker_port, desc_info, process_priv);
1627
1628 rocker_desc_gen_clear(desc_info);
1629 out:
1630 rocker_wait_destroy(trans, wait);
1631 return err;
1632 }
1633
1634 static int
rocker_cmd_get_port_settings_prep(const struct rocker_port * rocker_port,struct rocker_desc_info * desc_info,void * priv)1635 rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
1636 struct rocker_desc_info *desc_info,
1637 void *priv)
1638 {
1639 struct rocker_tlv *cmd_info;
1640
1641 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1642 ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS))
1643 return -EMSGSIZE;
1644 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1645 if (!cmd_info)
1646 return -EMSGSIZE;
1647 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1648 rocker_port->pport))
1649 return -EMSGSIZE;
1650 rocker_tlv_nest_end(desc_info, cmd_info);
1651 return 0;
1652 }
1653
1654 static int
rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port * rocker_port,const struct rocker_desc_info * desc_info,void * priv)1655 rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
1656 const struct rocker_desc_info *desc_info,
1657 void *priv)
1658 {
1659 struct ethtool_cmd *ecmd = priv;
1660 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1661 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1662 u32 speed;
1663 u8 duplex;
1664 u8 autoneg;
1665
1666 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1667 if (!attrs[ROCKER_TLV_CMD_INFO])
1668 return -EIO;
1669
1670 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1671 attrs[ROCKER_TLV_CMD_INFO]);
1672 if (!info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] ||
1673 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] ||
1674 !info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG])
1675 return -EIO;
1676
1677 speed = rocker_tlv_get_u32(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
1678 duplex = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
1679 autoneg = rocker_tlv_get_u8(info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
1680
1681 ecmd->transceiver = XCVR_INTERNAL;
1682 ecmd->supported = SUPPORTED_TP;
1683 ecmd->phy_address = 0xff;
1684 ecmd->port = PORT_TP;
1685 ethtool_cmd_speed_set(ecmd, speed);
1686 ecmd->duplex = duplex ? DUPLEX_FULL : DUPLEX_HALF;
1687 ecmd->autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
1688
1689 return 0;
1690 }
1691
1692 static int
rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port * rocker_port,const struct rocker_desc_info * desc_info,void * priv)1693 rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
1694 const struct rocker_desc_info *desc_info,
1695 void *priv)
1696 {
1697 unsigned char *macaddr = priv;
1698 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1699 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1700 const struct rocker_tlv *attr;
1701
1702 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1703 if (!attrs[ROCKER_TLV_CMD_INFO])
1704 return -EIO;
1705
1706 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1707 attrs[ROCKER_TLV_CMD_INFO]);
1708 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR];
1709 if (!attr)
1710 return -EIO;
1711
1712 if (rocker_tlv_len(attr) != ETH_ALEN)
1713 return -EINVAL;
1714
1715 ether_addr_copy(macaddr, rocker_tlv_data(attr));
1716 return 0;
1717 }
1718
1719 struct port_name {
1720 char *buf;
1721 size_t len;
1722 };
1723
1724 static int
rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port * rocker_port,const struct rocker_desc_info * desc_info,void * priv)1725 rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
1726 const struct rocker_desc_info *desc_info,
1727 void *priv)
1728 {
1729 const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
1730 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
1731 struct port_name *name = priv;
1732 const struct rocker_tlv *attr;
1733 size_t i, j, len;
1734 const char *str;
1735
1736 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
1737 if (!attrs[ROCKER_TLV_CMD_INFO])
1738 return -EIO;
1739
1740 rocker_tlv_parse_nested(info_attrs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
1741 attrs[ROCKER_TLV_CMD_INFO]);
1742 attr = info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME];
1743 if (!attr)
1744 return -EIO;
1745
1746 len = min_t(size_t, rocker_tlv_len(attr), name->len);
1747 str = rocker_tlv_data(attr);
1748
1749 /* make sure name only contains alphanumeric characters */
1750 for (i = j = 0; i < len; ++i) {
1751 if (isalnum(str[i])) {
1752 name->buf[j] = str[i];
1753 j++;
1754 }
1755 }
1756
1757 if (j == 0)
1758 return -EIO;
1759
1760 name->buf[j] = '\0';
1761
1762 return 0;
1763 }
1764
1765 static int
rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port * rocker_port,struct rocker_desc_info * desc_info,void * priv)1766 rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
1767 struct rocker_desc_info *desc_info,
1768 void *priv)
1769 {
1770 struct ethtool_cmd *ecmd = priv;
1771 struct rocker_tlv *cmd_info;
1772
1773 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1774 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1775 return -EMSGSIZE;
1776 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1777 if (!cmd_info)
1778 return -EMSGSIZE;
1779 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1780 rocker_port->pport))
1781 return -EMSGSIZE;
1782 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED,
1783 ethtool_cmd_speed(ecmd)))
1784 return -EMSGSIZE;
1785 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX,
1786 ecmd->duplex))
1787 return -EMSGSIZE;
1788 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG,
1789 ecmd->autoneg))
1790 return -EMSGSIZE;
1791 rocker_tlv_nest_end(desc_info, cmd_info);
1792 return 0;
1793 }
1794
1795 static int
rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port * rocker_port,struct rocker_desc_info * desc_info,void * priv)1796 rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
1797 struct rocker_desc_info *desc_info,
1798 void *priv)
1799 {
1800 const unsigned char *macaddr = priv;
1801 struct rocker_tlv *cmd_info;
1802
1803 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1804 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1805 return -EMSGSIZE;
1806 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1807 if (!cmd_info)
1808 return -EMSGSIZE;
1809 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1810 rocker_port->pport))
1811 return -EMSGSIZE;
1812 if (rocker_tlv_put(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
1813 ETH_ALEN, macaddr))
1814 return -EMSGSIZE;
1815 rocker_tlv_nest_end(desc_info, cmd_info);
1816 return 0;
1817 }
1818
1819 static int
rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port * rocker_port,struct rocker_desc_info * desc_info,void * priv)1820 rocker_cmd_set_port_settings_mtu_prep(const struct rocker_port *rocker_port,
1821 struct rocker_desc_info *desc_info,
1822 void *priv)
1823 {
1824 int mtu = *(int *)priv;
1825 struct rocker_tlv *cmd_info;
1826
1827 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1828 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1829 return -EMSGSIZE;
1830 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1831 if (!cmd_info)
1832 return -EMSGSIZE;
1833 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1834 rocker_port->pport))
1835 return -EMSGSIZE;
1836 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_MTU,
1837 mtu))
1838 return -EMSGSIZE;
1839 rocker_tlv_nest_end(desc_info, cmd_info);
1840 return 0;
1841 }
1842
1843 static int
rocker_cmd_set_port_learning_prep(const struct rocker_port * rocker_port,struct rocker_desc_info * desc_info,void * priv)1844 rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
1845 struct rocker_desc_info *desc_info,
1846 void *priv)
1847 {
1848 struct rocker_tlv *cmd_info;
1849
1850 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
1851 ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS))
1852 return -EMSGSIZE;
1853 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
1854 if (!cmd_info)
1855 return -EMSGSIZE;
1856 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT,
1857 rocker_port->pport))
1858 return -EMSGSIZE;
1859 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
1860 !!(rocker_port->brport_flags & BR_LEARNING)))
1861 return -EMSGSIZE;
1862 rocker_tlv_nest_end(desc_info, cmd_info);
1863 return 0;
1864 }
1865
rocker_cmd_get_port_settings_ethtool(struct rocker_port * rocker_port,struct ethtool_cmd * ecmd)1866 static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
1867 struct ethtool_cmd *ecmd)
1868 {
1869 return rocker_cmd_exec(rocker_port, NULL, 0,
1870 rocker_cmd_get_port_settings_prep, NULL,
1871 rocker_cmd_get_port_settings_ethtool_proc,
1872 ecmd);
1873 }
1874
rocker_cmd_get_port_settings_macaddr(struct rocker_port * rocker_port,unsigned char * macaddr)1875 static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
1876 unsigned char *macaddr)
1877 {
1878 return rocker_cmd_exec(rocker_port, NULL, 0,
1879 rocker_cmd_get_port_settings_prep, NULL,
1880 rocker_cmd_get_port_settings_macaddr_proc,
1881 macaddr);
1882 }
1883
rocker_cmd_set_port_settings_ethtool(struct rocker_port * rocker_port,struct ethtool_cmd * ecmd)1884 static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
1885 struct ethtool_cmd *ecmd)
1886 {
1887 return rocker_cmd_exec(rocker_port, NULL, 0,
1888 rocker_cmd_set_port_settings_ethtool_prep,
1889 ecmd, NULL, NULL);
1890 }
1891
rocker_cmd_set_port_settings_macaddr(struct rocker_port * rocker_port,unsigned char * macaddr)1892 static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
1893 unsigned char *macaddr)
1894 {
1895 return rocker_cmd_exec(rocker_port, NULL, 0,
1896 rocker_cmd_set_port_settings_macaddr_prep,
1897 macaddr, NULL, NULL);
1898 }
1899
rocker_cmd_set_port_settings_mtu(struct rocker_port * rocker_port,int mtu)1900 static int rocker_cmd_set_port_settings_mtu(struct rocker_port *rocker_port,
1901 int mtu)
1902 {
1903 return rocker_cmd_exec(rocker_port, NULL, 0,
1904 rocker_cmd_set_port_settings_mtu_prep,
1905 &mtu, NULL, NULL);
1906 }
1907
rocker_port_set_learning(struct rocker_port * rocker_port,struct switchdev_trans * trans)1908 static int rocker_port_set_learning(struct rocker_port *rocker_port,
1909 struct switchdev_trans *trans)
1910 {
1911 return rocker_cmd_exec(rocker_port, trans, 0,
1912 rocker_cmd_set_port_learning_prep,
1913 NULL, NULL, NULL);
1914 }
1915
1916 static int
rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info * desc_info,const struct rocker_flow_tbl_entry * entry)1917 rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
1918 const struct rocker_flow_tbl_entry *entry)
1919 {
1920 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1921 entry->key.ig_port.in_pport))
1922 return -EMSGSIZE;
1923 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1924 entry->key.ig_port.in_pport_mask))
1925 return -EMSGSIZE;
1926 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1927 entry->key.ig_port.goto_tbl))
1928 return -EMSGSIZE;
1929
1930 return 0;
1931 }
1932
1933 static int
rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info * desc_info,const struct rocker_flow_tbl_entry * entry)1934 rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
1935 const struct rocker_flow_tbl_entry *entry)
1936 {
1937 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1938 entry->key.vlan.in_pport))
1939 return -EMSGSIZE;
1940 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1941 entry->key.vlan.vlan_id))
1942 return -EMSGSIZE;
1943 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1944 entry->key.vlan.vlan_id_mask))
1945 return -EMSGSIZE;
1946 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1947 entry->key.vlan.goto_tbl))
1948 return -EMSGSIZE;
1949 if (entry->key.vlan.untagged &&
1950 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_NEW_VLAN_ID,
1951 entry->key.vlan.new_vlan_id))
1952 return -EMSGSIZE;
1953
1954 return 0;
1955 }
1956
1957 static int
rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info * desc_info,const struct rocker_flow_tbl_entry * entry)1958 rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
1959 const struct rocker_flow_tbl_entry *entry)
1960 {
1961 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
1962 entry->key.term_mac.in_pport))
1963 return -EMSGSIZE;
1964 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
1965 entry->key.term_mac.in_pport_mask))
1966 return -EMSGSIZE;
1967 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1968 entry->key.term_mac.eth_type))
1969 return -EMSGSIZE;
1970 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
1971 ETH_ALEN, entry->key.term_mac.eth_dst))
1972 return -EMSGSIZE;
1973 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
1974 ETH_ALEN, entry->key.term_mac.eth_dst_mask))
1975 return -EMSGSIZE;
1976 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
1977 entry->key.term_mac.vlan_id))
1978 return -EMSGSIZE;
1979 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
1980 entry->key.term_mac.vlan_id_mask))
1981 return -EMSGSIZE;
1982 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
1983 entry->key.term_mac.goto_tbl))
1984 return -EMSGSIZE;
1985 if (entry->key.term_mac.copy_to_cpu &&
1986 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
1987 entry->key.term_mac.copy_to_cpu))
1988 return -EMSGSIZE;
1989
1990 return 0;
1991 }
1992
1993 static int
rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info * desc_info,const struct rocker_flow_tbl_entry * entry)1994 rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
1995 const struct rocker_flow_tbl_entry *entry)
1996 {
1997 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
1998 entry->key.ucast_routing.eth_type))
1999 return -EMSGSIZE;
2000 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP,
2001 entry->key.ucast_routing.dst4))
2002 return -EMSGSIZE;
2003 if (rocker_tlv_put_be32(desc_info, ROCKER_TLV_OF_DPA_DST_IP_MASK,
2004 entry->key.ucast_routing.dst4_mask))
2005 return -EMSGSIZE;
2006 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2007 entry->key.ucast_routing.goto_tbl))
2008 return -EMSGSIZE;
2009 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2010 entry->key.ucast_routing.group_id))
2011 return -EMSGSIZE;
2012
2013 return 0;
2014 }
2015
2016 static int
rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info * desc_info,const struct rocker_flow_tbl_entry * entry)2017 rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
2018 const struct rocker_flow_tbl_entry *entry)
2019 {
2020 if (entry->key.bridge.has_eth_dst &&
2021 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2022 ETH_ALEN, entry->key.bridge.eth_dst))
2023 return -EMSGSIZE;
2024 if (entry->key.bridge.has_eth_dst_mask &&
2025 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2026 ETH_ALEN, entry->key.bridge.eth_dst_mask))
2027 return -EMSGSIZE;
2028 if (entry->key.bridge.vlan_id &&
2029 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2030 entry->key.bridge.vlan_id))
2031 return -EMSGSIZE;
2032 if (entry->key.bridge.tunnel_id &&
2033 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_TUNNEL_ID,
2034 entry->key.bridge.tunnel_id))
2035 return -EMSGSIZE;
2036 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GOTO_TABLE_ID,
2037 entry->key.bridge.goto_tbl))
2038 return -EMSGSIZE;
2039 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2040 entry->key.bridge.group_id))
2041 return -EMSGSIZE;
2042 if (entry->key.bridge.copy_to_cpu &&
2043 rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_COPY_CPU_ACTION,
2044 entry->key.bridge.copy_to_cpu))
2045 return -EMSGSIZE;
2046
2047 return 0;
2048 }
2049
2050 static int
rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info * desc_info,const struct rocker_flow_tbl_entry * entry)2051 rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
2052 const struct rocker_flow_tbl_entry *entry)
2053 {
2054 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
2055 entry->key.acl.in_pport))
2056 return -EMSGSIZE;
2057 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT_MASK,
2058 entry->key.acl.in_pport_mask))
2059 return -EMSGSIZE;
2060 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2061 ETH_ALEN, entry->key.acl.eth_src))
2062 return -EMSGSIZE;
2063 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC_MASK,
2064 ETH_ALEN, entry->key.acl.eth_src_mask))
2065 return -EMSGSIZE;
2066 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2067 ETH_ALEN, entry->key.acl.eth_dst))
2068 return -EMSGSIZE;
2069 if (rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC_MASK,
2070 ETH_ALEN, entry->key.acl.eth_dst_mask))
2071 return -EMSGSIZE;
2072 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
2073 entry->key.acl.eth_type))
2074 return -EMSGSIZE;
2075 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2076 entry->key.acl.vlan_id))
2077 return -EMSGSIZE;
2078 if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID_MASK,
2079 entry->key.acl.vlan_id_mask))
2080 return -EMSGSIZE;
2081
2082 switch (ntohs(entry->key.acl.eth_type)) {
2083 case ETH_P_IP:
2084 case ETH_P_IPV6:
2085 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_PROTO,
2086 entry->key.acl.ip_proto))
2087 return -EMSGSIZE;
2088 if (rocker_tlv_put_u8(desc_info,
2089 ROCKER_TLV_OF_DPA_IP_PROTO_MASK,
2090 entry->key.acl.ip_proto_mask))
2091 return -EMSGSIZE;
2092 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_DSCP,
2093 entry->key.acl.ip_tos & 0x3f))
2094 return -EMSGSIZE;
2095 if (rocker_tlv_put_u8(desc_info,
2096 ROCKER_TLV_OF_DPA_IP_DSCP_MASK,
2097 entry->key.acl.ip_tos_mask & 0x3f))
2098 return -EMSGSIZE;
2099 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_IP_ECN,
2100 (entry->key.acl.ip_tos & 0xc0) >> 6))
2101 return -EMSGSIZE;
2102 if (rocker_tlv_put_u8(desc_info,
2103 ROCKER_TLV_OF_DPA_IP_ECN_MASK,
2104 (entry->key.acl.ip_tos_mask & 0xc0) >> 6))
2105 return -EMSGSIZE;
2106 break;
2107 }
2108
2109 if (entry->key.acl.group_id != ROCKER_GROUP_NONE &&
2110 rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2111 entry->key.acl.group_id))
2112 return -EMSGSIZE;
2113
2114 return 0;
2115 }
2116
rocker_cmd_flow_tbl_add(const struct rocker_port * rocker_port,struct rocker_desc_info * desc_info,void * priv)2117 static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
2118 struct rocker_desc_info *desc_info,
2119 void *priv)
2120 {
2121 const struct rocker_flow_tbl_entry *entry = priv;
2122 struct rocker_tlv *cmd_info;
2123 int err = 0;
2124
2125 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2126 return -EMSGSIZE;
2127 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2128 if (!cmd_info)
2129 return -EMSGSIZE;
2130 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_TABLE_ID,
2131 entry->key.tbl_id))
2132 return -EMSGSIZE;
2133 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_PRIORITY,
2134 entry->key.priority))
2135 return -EMSGSIZE;
2136 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_HARDTIME, 0))
2137 return -EMSGSIZE;
2138 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2139 entry->cookie))
2140 return -EMSGSIZE;
2141
2142 switch (entry->key.tbl_id) {
2143 case ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT:
2144 err = rocker_cmd_flow_tbl_add_ig_port(desc_info, entry);
2145 break;
2146 case ROCKER_OF_DPA_TABLE_ID_VLAN:
2147 err = rocker_cmd_flow_tbl_add_vlan(desc_info, entry);
2148 break;
2149 case ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC:
2150 err = rocker_cmd_flow_tbl_add_term_mac(desc_info, entry);
2151 break;
2152 case ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING:
2153 err = rocker_cmd_flow_tbl_add_ucast_routing(desc_info, entry);
2154 break;
2155 case ROCKER_OF_DPA_TABLE_ID_BRIDGING:
2156 err = rocker_cmd_flow_tbl_add_bridge(desc_info, entry);
2157 break;
2158 case ROCKER_OF_DPA_TABLE_ID_ACL_POLICY:
2159 err = rocker_cmd_flow_tbl_add_acl(desc_info, entry);
2160 break;
2161 default:
2162 err = -ENOTSUPP;
2163 break;
2164 }
2165
2166 if (err)
2167 return err;
2168
2169 rocker_tlv_nest_end(desc_info, cmd_info);
2170
2171 return 0;
2172 }
2173
rocker_cmd_flow_tbl_del(const struct rocker_port * rocker_port,struct rocker_desc_info * desc_info,void * priv)2174 static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
2175 struct rocker_desc_info *desc_info,
2176 void *priv)
2177 {
2178 const struct rocker_flow_tbl_entry *entry = priv;
2179 struct rocker_tlv *cmd_info;
2180
2181 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2182 return -EMSGSIZE;
2183 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2184 if (!cmd_info)
2185 return -EMSGSIZE;
2186 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_OF_DPA_COOKIE,
2187 entry->cookie))
2188 return -EMSGSIZE;
2189 rocker_tlv_nest_end(desc_info, cmd_info);
2190
2191 return 0;
2192 }
2193
2194 static int
rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info * desc_info,struct rocker_group_tbl_entry * entry)2195 rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
2196 struct rocker_group_tbl_entry *entry)
2197 {
2198 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_OUT_PPORT,
2199 ROCKER_GROUP_PORT_GET(entry->group_id)))
2200 return -EMSGSIZE;
2201 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_POP_VLAN,
2202 entry->l2_interface.pop_vlan))
2203 return -EMSGSIZE;
2204
2205 return 0;
2206 }
2207
2208 static int
rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info * desc_info,const struct rocker_group_tbl_entry * entry)2209 rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
2210 const struct rocker_group_tbl_entry *entry)
2211 {
2212 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2213 entry->l2_rewrite.group_id))
2214 return -EMSGSIZE;
2215 if (!is_zero_ether_addr(entry->l2_rewrite.eth_src) &&
2216 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2217 ETH_ALEN, entry->l2_rewrite.eth_src))
2218 return -EMSGSIZE;
2219 if (!is_zero_ether_addr(entry->l2_rewrite.eth_dst) &&
2220 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2221 ETH_ALEN, entry->l2_rewrite.eth_dst))
2222 return -EMSGSIZE;
2223 if (entry->l2_rewrite.vlan_id &&
2224 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2225 entry->l2_rewrite.vlan_id))
2226 return -EMSGSIZE;
2227
2228 return 0;
2229 }
2230
2231 static int
rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info * desc_info,const struct rocker_group_tbl_entry * entry)2232 rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
2233 const struct rocker_group_tbl_entry *entry)
2234 {
2235 int i;
2236 struct rocker_tlv *group_ids;
2237
2238 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_OF_DPA_GROUP_COUNT,
2239 entry->group_count))
2240 return -EMSGSIZE;
2241
2242 group_ids = rocker_tlv_nest_start(desc_info,
2243 ROCKER_TLV_OF_DPA_GROUP_IDS);
2244 if (!group_ids)
2245 return -EMSGSIZE;
2246
2247 for (i = 0; i < entry->group_count; i++)
2248 /* Note TLV array is 1-based */
2249 if (rocker_tlv_put_u32(desc_info, i + 1, entry->group_ids[i]))
2250 return -EMSGSIZE;
2251
2252 rocker_tlv_nest_end(desc_info, group_ids);
2253
2254 return 0;
2255 }
2256
2257 static int
rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info * desc_info,const struct rocker_group_tbl_entry * entry)2258 rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
2259 const struct rocker_group_tbl_entry *entry)
2260 {
2261 if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
2262 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
2263 ETH_ALEN, entry->l3_unicast.eth_src))
2264 return -EMSGSIZE;
2265 if (!is_zero_ether_addr(entry->l3_unicast.eth_dst) &&
2266 rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
2267 ETH_ALEN, entry->l3_unicast.eth_dst))
2268 return -EMSGSIZE;
2269 if (entry->l3_unicast.vlan_id &&
2270 rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_VLAN_ID,
2271 entry->l3_unicast.vlan_id))
2272 return -EMSGSIZE;
2273 if (rocker_tlv_put_u8(desc_info, ROCKER_TLV_OF_DPA_TTL_CHECK,
2274 entry->l3_unicast.ttl_check))
2275 return -EMSGSIZE;
2276 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
2277 entry->l3_unicast.group_id))
2278 return -EMSGSIZE;
2279
2280 return 0;
2281 }
2282
rocker_cmd_group_tbl_add(const struct rocker_port * rocker_port,struct rocker_desc_info * desc_info,void * priv)2283 static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
2284 struct rocker_desc_info *desc_info,
2285 void *priv)
2286 {
2287 struct rocker_group_tbl_entry *entry = priv;
2288 struct rocker_tlv *cmd_info;
2289 int err = 0;
2290
2291 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2292 return -EMSGSIZE;
2293 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2294 if (!cmd_info)
2295 return -EMSGSIZE;
2296
2297 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2298 entry->group_id))
2299 return -EMSGSIZE;
2300
2301 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2302 case ROCKER_OF_DPA_GROUP_TYPE_L2_INTERFACE:
2303 err = rocker_cmd_group_tbl_add_l2_interface(desc_info, entry);
2304 break;
2305 case ROCKER_OF_DPA_GROUP_TYPE_L2_REWRITE:
2306 err = rocker_cmd_group_tbl_add_l2_rewrite(desc_info, entry);
2307 break;
2308 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2309 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2310 err = rocker_cmd_group_tbl_add_group_ids(desc_info, entry);
2311 break;
2312 case ROCKER_OF_DPA_GROUP_TYPE_L3_UCAST:
2313 err = rocker_cmd_group_tbl_add_l3_unicast(desc_info, entry);
2314 break;
2315 default:
2316 err = -ENOTSUPP;
2317 break;
2318 }
2319
2320 if (err)
2321 return err;
2322
2323 rocker_tlv_nest_end(desc_info, cmd_info);
2324
2325 return 0;
2326 }
2327
rocker_cmd_group_tbl_del(const struct rocker_port * rocker_port,struct rocker_desc_info * desc_info,void * priv)2328 static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
2329 struct rocker_desc_info *desc_info,
2330 void *priv)
2331 {
2332 const struct rocker_group_tbl_entry *entry = priv;
2333 struct rocker_tlv *cmd_info;
2334
2335 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE, entry->cmd))
2336 return -EMSGSIZE;
2337 cmd_info = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
2338 if (!cmd_info)
2339 return -EMSGSIZE;
2340 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID,
2341 entry->group_id))
2342 return -EMSGSIZE;
2343 rocker_tlv_nest_end(desc_info, cmd_info);
2344
2345 return 0;
2346 }
2347
2348 /***************************************************
2349 * Flow, group, FDB, internal VLAN and neigh tables
2350 ***************************************************/
2351
rocker_init_tbls(struct rocker * rocker)2352 static int rocker_init_tbls(struct rocker *rocker)
2353 {
2354 hash_init(rocker->flow_tbl);
2355 spin_lock_init(&rocker->flow_tbl_lock);
2356
2357 hash_init(rocker->group_tbl);
2358 spin_lock_init(&rocker->group_tbl_lock);
2359
2360 hash_init(rocker->fdb_tbl);
2361 spin_lock_init(&rocker->fdb_tbl_lock);
2362
2363 hash_init(rocker->internal_vlan_tbl);
2364 spin_lock_init(&rocker->internal_vlan_tbl_lock);
2365
2366 hash_init(rocker->neigh_tbl);
2367 spin_lock_init(&rocker->neigh_tbl_lock);
2368
2369 return 0;
2370 }
2371
rocker_free_tbls(struct rocker * rocker)2372 static void rocker_free_tbls(struct rocker *rocker)
2373 {
2374 unsigned long flags;
2375 struct rocker_flow_tbl_entry *flow_entry;
2376 struct rocker_group_tbl_entry *group_entry;
2377 struct rocker_fdb_tbl_entry *fdb_entry;
2378 struct rocker_internal_vlan_tbl_entry *internal_vlan_entry;
2379 struct rocker_neigh_tbl_entry *neigh_entry;
2380 struct hlist_node *tmp;
2381 int bkt;
2382
2383 spin_lock_irqsave(&rocker->flow_tbl_lock, flags);
2384 hash_for_each_safe(rocker->flow_tbl, bkt, tmp, flow_entry, entry)
2385 hash_del(&flow_entry->entry);
2386 spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
2387
2388 spin_lock_irqsave(&rocker->group_tbl_lock, flags);
2389 hash_for_each_safe(rocker->group_tbl, bkt, tmp, group_entry, entry)
2390 hash_del(&group_entry->entry);
2391 spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
2392
2393 spin_lock_irqsave(&rocker->fdb_tbl_lock, flags);
2394 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, fdb_entry, entry)
2395 hash_del(&fdb_entry->entry);
2396 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, flags);
2397
2398 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, flags);
2399 hash_for_each_safe(rocker->internal_vlan_tbl, bkt,
2400 tmp, internal_vlan_entry, entry)
2401 hash_del(&internal_vlan_entry->entry);
2402 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, flags);
2403
2404 spin_lock_irqsave(&rocker->neigh_tbl_lock, flags);
2405 hash_for_each_safe(rocker->neigh_tbl, bkt, tmp, neigh_entry, entry)
2406 hash_del(&neigh_entry->entry);
2407 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, flags);
2408 }
2409
2410 static struct rocker_flow_tbl_entry *
rocker_flow_tbl_find(const struct rocker * rocker,const struct rocker_flow_tbl_entry * match)2411 rocker_flow_tbl_find(const struct rocker *rocker,
2412 const struct rocker_flow_tbl_entry *match)
2413 {
2414 struct rocker_flow_tbl_entry *found;
2415 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2416
2417 hash_for_each_possible(rocker->flow_tbl, found,
2418 entry, match->key_crc32) {
2419 if (memcmp(&found->key, &match->key, key_len) == 0)
2420 return found;
2421 }
2422
2423 return NULL;
2424 }
2425
rocker_flow_tbl_add(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,struct rocker_flow_tbl_entry * match)2426 static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
2427 struct switchdev_trans *trans, int flags,
2428 struct rocker_flow_tbl_entry *match)
2429 {
2430 struct rocker *rocker = rocker_port->rocker;
2431 struct rocker_flow_tbl_entry *found;
2432 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2433 unsigned long lock_flags;
2434
2435 match->key_crc32 = crc32(~0, &match->key, key_len);
2436
2437 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
2438
2439 found = rocker_flow_tbl_find(rocker, match);
2440
2441 if (found) {
2442 match->cookie = found->cookie;
2443 if (!switchdev_trans_ph_prepare(trans))
2444 hash_del(&found->entry);
2445 rocker_port_kfree(trans, found);
2446 found = match;
2447 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
2448 } else {
2449 found = match;
2450 found->cookie = rocker->flow_tbl_next_cookie++;
2451 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD;
2452 }
2453
2454 if (!switchdev_trans_ph_prepare(trans))
2455 hash_add(rocker->flow_tbl, &found->entry, found->key_crc32);
2456
2457 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
2458
2459 return rocker_cmd_exec(rocker_port, trans, flags,
2460 rocker_cmd_flow_tbl_add, found, NULL, NULL);
2461 }
2462
rocker_flow_tbl_del(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,struct rocker_flow_tbl_entry * match)2463 static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
2464 struct switchdev_trans *trans, int flags,
2465 struct rocker_flow_tbl_entry *match)
2466 {
2467 struct rocker *rocker = rocker_port->rocker;
2468 struct rocker_flow_tbl_entry *found;
2469 size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
2470 unsigned long lock_flags;
2471 int err = 0;
2472
2473 match->key_crc32 = crc32(~0, &match->key, key_len);
2474
2475 spin_lock_irqsave(&rocker->flow_tbl_lock, lock_flags);
2476
2477 found = rocker_flow_tbl_find(rocker, match);
2478
2479 if (found) {
2480 if (!switchdev_trans_ph_prepare(trans))
2481 hash_del(&found->entry);
2482 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL;
2483 }
2484
2485 spin_unlock_irqrestore(&rocker->flow_tbl_lock, lock_flags);
2486
2487 rocker_port_kfree(trans, match);
2488
2489 if (found) {
2490 err = rocker_cmd_exec(rocker_port, trans, flags,
2491 rocker_cmd_flow_tbl_del,
2492 found, NULL, NULL);
2493 rocker_port_kfree(trans, found);
2494 }
2495
2496 return err;
2497 }
2498
rocker_flow_tbl_do(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,struct rocker_flow_tbl_entry * entry)2499 static int rocker_flow_tbl_do(struct rocker_port *rocker_port,
2500 struct switchdev_trans *trans, int flags,
2501 struct rocker_flow_tbl_entry *entry)
2502 {
2503 if (flags & ROCKER_OP_FLAG_REMOVE)
2504 return rocker_flow_tbl_del(rocker_port, trans, flags, entry);
2505 else
2506 return rocker_flow_tbl_add(rocker_port, trans, flags, entry);
2507 }
2508
rocker_flow_tbl_ig_port(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,u32 in_pport,u32 in_pport_mask,enum rocker_of_dpa_table_id goto_tbl)2509 static int rocker_flow_tbl_ig_port(struct rocker_port *rocker_port,
2510 struct switchdev_trans *trans, int flags,
2511 u32 in_pport, u32 in_pport_mask,
2512 enum rocker_of_dpa_table_id goto_tbl)
2513 {
2514 struct rocker_flow_tbl_entry *entry;
2515
2516 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2517 if (!entry)
2518 return -ENOMEM;
2519
2520 entry->key.priority = ROCKER_PRIORITY_IG_PORT;
2521 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
2522 entry->key.ig_port.in_pport = in_pport;
2523 entry->key.ig_port.in_pport_mask = in_pport_mask;
2524 entry->key.ig_port.goto_tbl = goto_tbl;
2525
2526 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2527 }
2528
rocker_flow_tbl_vlan(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,u32 in_pport,__be16 vlan_id,__be16 vlan_id_mask,enum rocker_of_dpa_table_id goto_tbl,bool untagged,__be16 new_vlan_id)2529 static int rocker_flow_tbl_vlan(struct rocker_port *rocker_port,
2530 struct switchdev_trans *trans, int flags,
2531 u32 in_pport, __be16 vlan_id,
2532 __be16 vlan_id_mask,
2533 enum rocker_of_dpa_table_id goto_tbl,
2534 bool untagged, __be16 new_vlan_id)
2535 {
2536 struct rocker_flow_tbl_entry *entry;
2537
2538 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2539 if (!entry)
2540 return -ENOMEM;
2541
2542 entry->key.priority = ROCKER_PRIORITY_VLAN;
2543 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_VLAN;
2544 entry->key.vlan.in_pport = in_pport;
2545 entry->key.vlan.vlan_id = vlan_id;
2546 entry->key.vlan.vlan_id_mask = vlan_id_mask;
2547 entry->key.vlan.goto_tbl = goto_tbl;
2548
2549 entry->key.vlan.untagged = untagged;
2550 entry->key.vlan.new_vlan_id = new_vlan_id;
2551
2552 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2553 }
2554
rocker_flow_tbl_term_mac(struct rocker_port * rocker_port,struct switchdev_trans * trans,u32 in_pport,u32 in_pport_mask,__be16 eth_type,const u8 * eth_dst,const u8 * eth_dst_mask,__be16 vlan_id,__be16 vlan_id_mask,bool copy_to_cpu,int flags)2555 static int rocker_flow_tbl_term_mac(struct rocker_port *rocker_port,
2556 struct switchdev_trans *trans,
2557 u32 in_pport, u32 in_pport_mask,
2558 __be16 eth_type, const u8 *eth_dst,
2559 const u8 *eth_dst_mask, __be16 vlan_id,
2560 __be16 vlan_id_mask, bool copy_to_cpu,
2561 int flags)
2562 {
2563 struct rocker_flow_tbl_entry *entry;
2564
2565 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2566 if (!entry)
2567 return -ENOMEM;
2568
2569 if (is_multicast_ether_addr(eth_dst)) {
2570 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_MCAST;
2571 entry->key.term_mac.goto_tbl =
2572 ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
2573 } else {
2574 entry->key.priority = ROCKER_PRIORITY_TERM_MAC_UCAST;
2575 entry->key.term_mac.goto_tbl =
2576 ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2577 }
2578
2579 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
2580 entry->key.term_mac.in_pport = in_pport;
2581 entry->key.term_mac.in_pport_mask = in_pport_mask;
2582 entry->key.term_mac.eth_type = eth_type;
2583 ether_addr_copy(entry->key.term_mac.eth_dst, eth_dst);
2584 ether_addr_copy(entry->key.term_mac.eth_dst_mask, eth_dst_mask);
2585 entry->key.term_mac.vlan_id = vlan_id;
2586 entry->key.term_mac.vlan_id_mask = vlan_id_mask;
2587 entry->key.term_mac.copy_to_cpu = copy_to_cpu;
2588
2589 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2590 }
2591
rocker_flow_tbl_bridge(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,const u8 * eth_dst,const u8 * eth_dst_mask,__be16 vlan_id,u32 tunnel_id,enum rocker_of_dpa_table_id goto_tbl,u32 group_id,bool copy_to_cpu)2592 static int rocker_flow_tbl_bridge(struct rocker_port *rocker_port,
2593 struct switchdev_trans *trans, int flags,
2594 const u8 *eth_dst, const u8 *eth_dst_mask,
2595 __be16 vlan_id, u32 tunnel_id,
2596 enum rocker_of_dpa_table_id goto_tbl,
2597 u32 group_id, bool copy_to_cpu)
2598 {
2599 struct rocker_flow_tbl_entry *entry;
2600 u32 priority;
2601 bool vlan_bridging = !!vlan_id;
2602 bool dflt = !eth_dst || (eth_dst && eth_dst_mask);
2603 bool wild = false;
2604
2605 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2606 if (!entry)
2607 return -ENOMEM;
2608
2609 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_BRIDGING;
2610
2611 if (eth_dst) {
2612 entry->key.bridge.has_eth_dst = 1;
2613 ether_addr_copy(entry->key.bridge.eth_dst, eth_dst);
2614 }
2615 if (eth_dst_mask) {
2616 entry->key.bridge.has_eth_dst_mask = 1;
2617 ether_addr_copy(entry->key.bridge.eth_dst_mask, eth_dst_mask);
2618 if (!ether_addr_equal(eth_dst_mask, ff_mac))
2619 wild = true;
2620 }
2621
2622 priority = ROCKER_PRIORITY_UNKNOWN;
2623 if (vlan_bridging && dflt && wild)
2624 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_WILD;
2625 else if (vlan_bridging && dflt && !wild)
2626 priority = ROCKER_PRIORITY_BRIDGING_VLAN_DFLT_EXACT;
2627 else if (vlan_bridging && !dflt)
2628 priority = ROCKER_PRIORITY_BRIDGING_VLAN;
2629 else if (!vlan_bridging && dflt && wild)
2630 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_WILD;
2631 else if (!vlan_bridging && dflt && !wild)
2632 priority = ROCKER_PRIORITY_BRIDGING_TENANT_DFLT_EXACT;
2633 else if (!vlan_bridging && !dflt)
2634 priority = ROCKER_PRIORITY_BRIDGING_TENANT;
2635
2636 entry->key.priority = priority;
2637 entry->key.bridge.vlan_id = vlan_id;
2638 entry->key.bridge.tunnel_id = tunnel_id;
2639 entry->key.bridge.goto_tbl = goto_tbl;
2640 entry->key.bridge.group_id = group_id;
2641 entry->key.bridge.copy_to_cpu = copy_to_cpu;
2642
2643 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2644 }
2645
rocker_flow_tbl_ucast4_routing(struct rocker_port * rocker_port,struct switchdev_trans * trans,__be16 eth_type,__be32 dst,__be32 dst_mask,u32 priority,enum rocker_of_dpa_table_id goto_tbl,u32 group_id,int flags)2646 static int rocker_flow_tbl_ucast4_routing(struct rocker_port *rocker_port,
2647 struct switchdev_trans *trans,
2648 __be16 eth_type, __be32 dst,
2649 __be32 dst_mask, u32 priority,
2650 enum rocker_of_dpa_table_id goto_tbl,
2651 u32 group_id, int flags)
2652 {
2653 struct rocker_flow_tbl_entry *entry;
2654
2655 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2656 if (!entry)
2657 return -ENOMEM;
2658
2659 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
2660 entry->key.priority = priority;
2661 entry->key.ucast_routing.eth_type = eth_type;
2662 entry->key.ucast_routing.dst4 = dst;
2663 entry->key.ucast_routing.dst4_mask = dst_mask;
2664 entry->key.ucast_routing.goto_tbl = goto_tbl;
2665 entry->key.ucast_routing.group_id = group_id;
2666 entry->key_len = offsetof(struct rocker_flow_tbl_key,
2667 ucast_routing.group_id);
2668
2669 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2670 }
2671
rocker_flow_tbl_acl(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,u32 in_pport,u32 in_pport_mask,const u8 * eth_src,const u8 * eth_src_mask,const u8 * eth_dst,const u8 * eth_dst_mask,__be16 eth_type,__be16 vlan_id,__be16 vlan_id_mask,u8 ip_proto,u8 ip_proto_mask,u8 ip_tos,u8 ip_tos_mask,u32 group_id)2672 static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
2673 struct switchdev_trans *trans, int flags,
2674 u32 in_pport, u32 in_pport_mask,
2675 const u8 *eth_src, const u8 *eth_src_mask,
2676 const u8 *eth_dst, const u8 *eth_dst_mask,
2677 __be16 eth_type, __be16 vlan_id,
2678 __be16 vlan_id_mask, u8 ip_proto,
2679 u8 ip_proto_mask, u8 ip_tos, u8 ip_tos_mask,
2680 u32 group_id)
2681 {
2682 u32 priority;
2683 struct rocker_flow_tbl_entry *entry;
2684
2685 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2686 if (!entry)
2687 return -ENOMEM;
2688
2689 priority = ROCKER_PRIORITY_ACL_NORMAL;
2690 if (eth_dst && eth_dst_mask) {
2691 if (ether_addr_equal(eth_dst_mask, mcast_mac))
2692 priority = ROCKER_PRIORITY_ACL_DFLT;
2693 else if (is_link_local_ether_addr(eth_dst))
2694 priority = ROCKER_PRIORITY_ACL_CTRL;
2695 }
2696
2697 entry->key.priority = priority;
2698 entry->key.tbl_id = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2699 entry->key.acl.in_pport = in_pport;
2700 entry->key.acl.in_pport_mask = in_pport_mask;
2701
2702 if (eth_src)
2703 ether_addr_copy(entry->key.acl.eth_src, eth_src);
2704 if (eth_src_mask)
2705 ether_addr_copy(entry->key.acl.eth_src_mask, eth_src_mask);
2706 if (eth_dst)
2707 ether_addr_copy(entry->key.acl.eth_dst, eth_dst);
2708 if (eth_dst_mask)
2709 ether_addr_copy(entry->key.acl.eth_dst_mask, eth_dst_mask);
2710
2711 entry->key.acl.eth_type = eth_type;
2712 entry->key.acl.vlan_id = vlan_id;
2713 entry->key.acl.vlan_id_mask = vlan_id_mask;
2714 entry->key.acl.ip_proto = ip_proto;
2715 entry->key.acl.ip_proto_mask = ip_proto_mask;
2716 entry->key.acl.ip_tos = ip_tos;
2717 entry->key.acl.ip_tos_mask = ip_tos_mask;
2718 entry->key.acl.group_id = group_id;
2719
2720 return rocker_flow_tbl_do(rocker_port, trans, flags, entry);
2721 }
2722
2723 static struct rocker_group_tbl_entry *
rocker_group_tbl_find(const struct rocker * rocker,const struct rocker_group_tbl_entry * match)2724 rocker_group_tbl_find(const struct rocker *rocker,
2725 const struct rocker_group_tbl_entry *match)
2726 {
2727 struct rocker_group_tbl_entry *found;
2728
2729 hash_for_each_possible(rocker->group_tbl, found,
2730 entry, match->group_id) {
2731 if (found->group_id == match->group_id)
2732 return found;
2733 }
2734
2735 return NULL;
2736 }
2737
rocker_group_tbl_entry_free(struct switchdev_trans * trans,struct rocker_group_tbl_entry * entry)2738 static void rocker_group_tbl_entry_free(struct switchdev_trans *trans,
2739 struct rocker_group_tbl_entry *entry)
2740 {
2741 switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
2742 case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
2743 case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
2744 rocker_port_kfree(trans, entry->group_ids);
2745 break;
2746 default:
2747 break;
2748 }
2749 rocker_port_kfree(trans, entry);
2750 }
2751
rocker_group_tbl_add(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,struct rocker_group_tbl_entry * match)2752 static int rocker_group_tbl_add(struct rocker_port *rocker_port,
2753 struct switchdev_trans *trans, int flags,
2754 struct rocker_group_tbl_entry *match)
2755 {
2756 struct rocker *rocker = rocker_port->rocker;
2757 struct rocker_group_tbl_entry *found;
2758 unsigned long lock_flags;
2759
2760 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
2761
2762 found = rocker_group_tbl_find(rocker, match);
2763
2764 if (found) {
2765 if (!switchdev_trans_ph_prepare(trans))
2766 hash_del(&found->entry);
2767 rocker_group_tbl_entry_free(trans, found);
2768 found = match;
2769 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
2770 } else {
2771 found = match;
2772 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD;
2773 }
2774
2775 if (!switchdev_trans_ph_prepare(trans))
2776 hash_add(rocker->group_tbl, &found->entry, found->group_id);
2777
2778 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
2779
2780 return rocker_cmd_exec(rocker_port, trans, flags,
2781 rocker_cmd_group_tbl_add, found, NULL, NULL);
2782 }
2783
rocker_group_tbl_del(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,struct rocker_group_tbl_entry * match)2784 static int rocker_group_tbl_del(struct rocker_port *rocker_port,
2785 struct switchdev_trans *trans, int flags,
2786 struct rocker_group_tbl_entry *match)
2787 {
2788 struct rocker *rocker = rocker_port->rocker;
2789 struct rocker_group_tbl_entry *found;
2790 unsigned long lock_flags;
2791 int err = 0;
2792
2793 spin_lock_irqsave(&rocker->group_tbl_lock, lock_flags);
2794
2795 found = rocker_group_tbl_find(rocker, match);
2796
2797 if (found) {
2798 if (!switchdev_trans_ph_prepare(trans))
2799 hash_del(&found->entry);
2800 found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL;
2801 }
2802
2803 spin_unlock_irqrestore(&rocker->group_tbl_lock, lock_flags);
2804
2805 rocker_group_tbl_entry_free(trans, match);
2806
2807 if (found) {
2808 err = rocker_cmd_exec(rocker_port, trans, flags,
2809 rocker_cmd_group_tbl_del,
2810 found, NULL, NULL);
2811 rocker_group_tbl_entry_free(trans, found);
2812 }
2813
2814 return err;
2815 }
2816
rocker_group_tbl_do(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,struct rocker_group_tbl_entry * entry)2817 static int rocker_group_tbl_do(struct rocker_port *rocker_port,
2818 struct switchdev_trans *trans, int flags,
2819 struct rocker_group_tbl_entry *entry)
2820 {
2821 if (flags & ROCKER_OP_FLAG_REMOVE)
2822 return rocker_group_tbl_del(rocker_port, trans, flags, entry);
2823 else
2824 return rocker_group_tbl_add(rocker_port, trans, flags, entry);
2825 }
2826
rocker_group_l2_interface(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,__be16 vlan_id,u32 out_pport,int pop_vlan)2827 static int rocker_group_l2_interface(struct rocker_port *rocker_port,
2828 struct switchdev_trans *trans, int flags,
2829 __be16 vlan_id, u32 out_pport,
2830 int pop_vlan)
2831 {
2832 struct rocker_group_tbl_entry *entry;
2833
2834 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2835 if (!entry)
2836 return -ENOMEM;
2837
2838 entry->group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
2839 entry->l2_interface.pop_vlan = pop_vlan;
2840
2841 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2842 }
2843
rocker_group_l2_fan_out(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,u8 group_count,const u32 * group_ids,u32 group_id)2844 static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
2845 struct switchdev_trans *trans,
2846 int flags, u8 group_count,
2847 const u32 *group_ids, u32 group_id)
2848 {
2849 struct rocker_group_tbl_entry *entry;
2850
2851 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2852 if (!entry)
2853 return -ENOMEM;
2854
2855 entry->group_id = group_id;
2856 entry->group_count = group_count;
2857
2858 entry->group_ids = rocker_port_kcalloc(rocker_port, trans, flags,
2859 group_count, sizeof(u32));
2860 if (!entry->group_ids) {
2861 rocker_port_kfree(trans, entry);
2862 return -ENOMEM;
2863 }
2864 memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
2865
2866 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2867 }
2868
rocker_group_l2_flood(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,__be16 vlan_id,u8 group_count,const u32 * group_ids,u32 group_id)2869 static int rocker_group_l2_flood(struct rocker_port *rocker_port,
2870 struct switchdev_trans *trans, int flags,
2871 __be16 vlan_id, u8 group_count,
2872 const u32 *group_ids, u32 group_id)
2873 {
2874 return rocker_group_l2_fan_out(rocker_port, trans, flags,
2875 group_count, group_ids,
2876 group_id);
2877 }
2878
rocker_group_l3_unicast(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,u32 index,const u8 * src_mac,const u8 * dst_mac,__be16 vlan_id,bool ttl_check,u32 pport)2879 static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
2880 struct switchdev_trans *trans, int flags,
2881 u32 index, const u8 *src_mac, const u8 *dst_mac,
2882 __be16 vlan_id, bool ttl_check, u32 pport)
2883 {
2884 struct rocker_group_tbl_entry *entry;
2885
2886 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2887 if (!entry)
2888 return -ENOMEM;
2889
2890 entry->group_id = ROCKER_GROUP_L3_UNICAST(index);
2891 if (src_mac)
2892 ether_addr_copy(entry->l3_unicast.eth_src, src_mac);
2893 if (dst_mac)
2894 ether_addr_copy(entry->l3_unicast.eth_dst, dst_mac);
2895 entry->l3_unicast.vlan_id = vlan_id;
2896 entry->l3_unicast.ttl_check = ttl_check;
2897 entry->l3_unicast.group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, pport);
2898
2899 return rocker_group_tbl_do(rocker_port, trans, flags, entry);
2900 }
2901
2902 static struct rocker_neigh_tbl_entry *
rocker_neigh_tbl_find(const struct rocker * rocker,__be32 ip_addr)2903 rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
2904 {
2905 struct rocker_neigh_tbl_entry *found;
2906
2907 hash_for_each_possible(rocker->neigh_tbl, found,
2908 entry, be32_to_cpu(ip_addr))
2909 if (found->ip_addr == ip_addr)
2910 return found;
2911
2912 return NULL;
2913 }
2914
_rocker_neigh_add(struct rocker * rocker,struct switchdev_trans * trans,struct rocker_neigh_tbl_entry * entry)2915 static void _rocker_neigh_add(struct rocker *rocker,
2916 struct switchdev_trans *trans,
2917 struct rocker_neigh_tbl_entry *entry)
2918 {
2919 if (!switchdev_trans_ph_commit(trans))
2920 entry->index = rocker->neigh_tbl_next_index++;
2921 if (switchdev_trans_ph_prepare(trans))
2922 return;
2923 entry->ref_count++;
2924 hash_add(rocker->neigh_tbl, &entry->entry,
2925 be32_to_cpu(entry->ip_addr));
2926 }
2927
_rocker_neigh_del(struct switchdev_trans * trans,struct rocker_neigh_tbl_entry * entry)2928 static void _rocker_neigh_del(struct switchdev_trans *trans,
2929 struct rocker_neigh_tbl_entry *entry)
2930 {
2931 if (switchdev_trans_ph_prepare(trans))
2932 return;
2933 if (--entry->ref_count == 0) {
2934 hash_del(&entry->entry);
2935 rocker_port_kfree(trans, entry);
2936 }
2937 }
2938
_rocker_neigh_update(struct rocker_neigh_tbl_entry * entry,struct switchdev_trans * trans,const u8 * eth_dst,bool ttl_check)2939 static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
2940 struct switchdev_trans *trans,
2941 const u8 *eth_dst, bool ttl_check)
2942 {
2943 if (eth_dst) {
2944 ether_addr_copy(entry->eth_dst, eth_dst);
2945 entry->ttl_check = ttl_check;
2946 } else if (!switchdev_trans_ph_prepare(trans)) {
2947 entry->ref_count++;
2948 }
2949 }
2950
rocker_port_ipv4_neigh(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,__be32 ip_addr,const u8 * eth_dst)2951 static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
2952 struct switchdev_trans *trans,
2953 int flags, __be32 ip_addr, const u8 *eth_dst)
2954 {
2955 struct rocker *rocker = rocker_port->rocker;
2956 struct rocker_neigh_tbl_entry *entry;
2957 struct rocker_neigh_tbl_entry *found;
2958 unsigned long lock_flags;
2959 __be16 eth_type = htons(ETH_P_IP);
2960 enum rocker_of_dpa_table_id goto_tbl =
2961 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
2962 u32 group_id;
2963 u32 priority = 0;
2964 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
2965 bool updating;
2966 bool removing;
2967 int err = 0;
2968
2969 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
2970 if (!entry)
2971 return -ENOMEM;
2972
2973 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
2974
2975 found = rocker_neigh_tbl_find(rocker, ip_addr);
2976
2977 updating = found && adding;
2978 removing = found && !adding;
2979 adding = !found && adding;
2980
2981 if (adding) {
2982 entry->ip_addr = ip_addr;
2983 entry->dev = rocker_port->dev;
2984 ether_addr_copy(entry->eth_dst, eth_dst);
2985 entry->ttl_check = true;
2986 _rocker_neigh_add(rocker, trans, entry);
2987 } else if (removing) {
2988 memcpy(entry, found, sizeof(*entry));
2989 _rocker_neigh_del(trans, found);
2990 } else if (updating) {
2991 _rocker_neigh_update(found, trans, eth_dst, true);
2992 memcpy(entry, found, sizeof(*entry));
2993 } else {
2994 err = -ENOENT;
2995 }
2996
2997 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
2998
2999 if (err)
3000 goto err_out;
3001
3002 /* For each active neighbor, we have an L3 unicast group and
3003 * a /32 route to the neighbor, which uses the L3 unicast
3004 * group. The L3 unicast group can also be referred to by
3005 * other routes' nexthops.
3006 */
3007
3008 err = rocker_group_l3_unicast(rocker_port, trans, flags,
3009 entry->index,
3010 rocker_port->dev->dev_addr,
3011 entry->eth_dst,
3012 rocker_port->internal_vlan_id,
3013 entry->ttl_check,
3014 rocker_port->pport);
3015 if (err) {
3016 netdev_err(rocker_port->dev,
3017 "Error (%d) L3 unicast group index %d\n",
3018 err, entry->index);
3019 goto err_out;
3020 }
3021
3022 if (adding || removing) {
3023 group_id = ROCKER_GROUP_L3_UNICAST(entry->index);
3024 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans,
3025 eth_type, ip_addr,
3026 inet_make_mask(32),
3027 priority, goto_tbl,
3028 group_id, flags);
3029
3030 if (err)
3031 netdev_err(rocker_port->dev,
3032 "Error (%d) /32 unicast route %pI4 group 0x%08x\n",
3033 err, &entry->ip_addr, group_id);
3034 }
3035
3036 err_out:
3037 if (!adding)
3038 rocker_port_kfree(trans, entry);
3039
3040 return err;
3041 }
3042
rocker_port_ipv4_resolve(struct rocker_port * rocker_port,struct switchdev_trans * trans,__be32 ip_addr)3043 static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
3044 struct switchdev_trans *trans,
3045 __be32 ip_addr)
3046 {
3047 struct net_device *dev = rocker_port->dev;
3048 struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
3049 int err = 0;
3050
3051 if (!n) {
3052 n = neigh_create(&arp_tbl, &ip_addr, dev);
3053 if (IS_ERR(n))
3054 return IS_ERR(n);
3055 }
3056
3057 /* If the neigh is already resolved, then go ahead and
3058 * install the entry, otherwise start the ARP process to
3059 * resolve the neigh.
3060 */
3061
3062 if (n->nud_state & NUD_VALID)
3063 err = rocker_port_ipv4_neigh(rocker_port, trans, 0,
3064 ip_addr, n->ha);
3065 else
3066 neigh_event_send(n, NULL);
3067
3068 neigh_release(n);
3069 return err;
3070 }
3071
rocker_port_ipv4_nh(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,__be32 ip_addr,u32 * index)3072 static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
3073 struct switchdev_trans *trans, int flags,
3074 __be32 ip_addr, u32 *index)
3075 {
3076 struct rocker *rocker = rocker_port->rocker;
3077 struct rocker_neigh_tbl_entry *entry;
3078 struct rocker_neigh_tbl_entry *found;
3079 unsigned long lock_flags;
3080 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3081 bool updating;
3082 bool removing;
3083 bool resolved = true;
3084 int err = 0;
3085
3086 entry = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*entry));
3087 if (!entry)
3088 return -ENOMEM;
3089
3090 spin_lock_irqsave(&rocker->neigh_tbl_lock, lock_flags);
3091
3092 found = rocker_neigh_tbl_find(rocker, ip_addr);
3093 if (found)
3094 *index = found->index;
3095
3096 updating = found && adding;
3097 removing = found && !adding;
3098 adding = !found && adding;
3099
3100 if (adding) {
3101 entry->ip_addr = ip_addr;
3102 entry->dev = rocker_port->dev;
3103 _rocker_neigh_add(rocker, trans, entry);
3104 *index = entry->index;
3105 resolved = false;
3106 } else if (removing) {
3107 _rocker_neigh_del(trans, found);
3108 } else if (updating) {
3109 _rocker_neigh_update(found, trans, NULL, false);
3110 resolved = !is_zero_ether_addr(found->eth_dst);
3111 } else {
3112 err = -ENOENT;
3113 }
3114
3115 spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
3116
3117 if (!adding)
3118 rocker_port_kfree(trans, entry);
3119
3120 if (err)
3121 return err;
3122
3123 /* Resolved means neigh ip_addr is resolved to neigh mac. */
3124
3125 if (!resolved)
3126 err = rocker_port_ipv4_resolve(rocker_port, trans, ip_addr);
3127
3128 return err;
3129 }
3130
rocker_port_vlan_flood_group(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,__be16 vlan_id)3131 static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
3132 struct switchdev_trans *trans,
3133 int flags, __be16 vlan_id)
3134 {
3135 struct rocker_port *p;
3136 const struct rocker *rocker = rocker_port->rocker;
3137 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3138 u32 *group_ids;
3139 u8 group_count = 0;
3140 int err = 0;
3141 int i;
3142
3143 group_ids = rocker_port_kcalloc(rocker_port, trans, flags,
3144 rocker->port_count, sizeof(u32));
3145 if (!group_ids)
3146 return -ENOMEM;
3147
3148 /* Adjust the flood group for this VLAN. The flood group
3149 * references an L2 interface group for each port in this
3150 * VLAN.
3151 */
3152
3153 for (i = 0; i < rocker->port_count; i++) {
3154 p = rocker->ports[i];
3155 if (!p)
3156 continue;
3157 if (!rocker_port_is_bridged(p))
3158 continue;
3159 if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
3160 group_ids[group_count++] =
3161 ROCKER_GROUP_L2_INTERFACE(vlan_id, p->pport);
3162 }
3163 }
3164
3165 /* If there are no bridged ports in this VLAN, we're done */
3166 if (group_count == 0)
3167 goto no_ports_in_vlan;
3168
3169 err = rocker_group_l2_flood(rocker_port, trans, flags, vlan_id,
3170 group_count, group_ids, group_id);
3171 if (err)
3172 netdev_err(rocker_port->dev,
3173 "Error (%d) port VLAN l2 flood group\n", err);
3174
3175 no_ports_in_vlan:
3176 rocker_port_kfree(trans, group_ids);
3177 return err;
3178 }
3179
rocker_port_vlan_l2_groups(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,__be16 vlan_id,bool pop_vlan)3180 static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
3181 struct switchdev_trans *trans, int flags,
3182 __be16 vlan_id, bool pop_vlan)
3183 {
3184 const struct rocker *rocker = rocker_port->rocker;
3185 struct rocker_port *p;
3186 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3187 u32 out_pport;
3188 int ref = 0;
3189 int err;
3190 int i;
3191
3192 /* An L2 interface group for this port in this VLAN, but
3193 * only when port STP state is LEARNING|FORWARDING.
3194 */
3195
3196 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3197 rocker_port->stp_state == BR_STATE_FORWARDING) {
3198 out_pport = rocker_port->pport;
3199 err = rocker_group_l2_interface(rocker_port, trans, flags,
3200 vlan_id, out_pport, pop_vlan);
3201 if (err) {
3202 netdev_err(rocker_port->dev,
3203 "Error (%d) port VLAN l2 group for pport %d\n",
3204 err, out_pport);
3205 return err;
3206 }
3207 }
3208
3209 /* An L2 interface group for this VLAN to CPU port.
3210 * Add when first port joins this VLAN and destroy when
3211 * last port leaves this VLAN.
3212 */
3213
3214 for (i = 0; i < rocker->port_count; i++) {
3215 p = rocker->ports[i];
3216 if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
3217 ref++;
3218 }
3219
3220 if ((!adding || ref != 1) && (adding || ref != 0))
3221 return 0;
3222
3223 out_pport = 0;
3224 err = rocker_group_l2_interface(rocker_port, trans, flags,
3225 vlan_id, out_pport, pop_vlan);
3226 if (err) {
3227 netdev_err(rocker_port->dev,
3228 "Error (%d) port VLAN l2 group for CPU port\n", err);
3229 return err;
3230 }
3231
3232 return 0;
3233 }
3234
3235 static struct rocker_ctrl {
3236 const u8 *eth_dst;
3237 const u8 *eth_dst_mask;
3238 __be16 eth_type;
3239 bool acl;
3240 bool bridge;
3241 bool term;
3242 bool copy_to_cpu;
3243 } rocker_ctrls[] = {
3244 [ROCKER_CTRL_LINK_LOCAL_MCAST] = {
3245 /* pass link local multicast pkts up to CPU for filtering */
3246 .eth_dst = ll_mac,
3247 .eth_dst_mask = ll_mask,
3248 .acl = true,
3249 },
3250 [ROCKER_CTRL_LOCAL_ARP] = {
3251 /* pass local ARP pkts up to CPU */
3252 .eth_dst = zero_mac,
3253 .eth_dst_mask = zero_mac,
3254 .eth_type = htons(ETH_P_ARP),
3255 .acl = true,
3256 },
3257 [ROCKER_CTRL_IPV4_MCAST] = {
3258 /* pass IPv4 mcast pkts up to CPU, RFC 1112 */
3259 .eth_dst = ipv4_mcast,
3260 .eth_dst_mask = ipv4_mask,
3261 .eth_type = htons(ETH_P_IP),
3262 .term = true,
3263 .copy_to_cpu = true,
3264 },
3265 [ROCKER_CTRL_IPV6_MCAST] = {
3266 /* pass IPv6 mcast pkts up to CPU, RFC 2464 */
3267 .eth_dst = ipv6_mcast,
3268 .eth_dst_mask = ipv6_mask,
3269 .eth_type = htons(ETH_P_IPV6),
3270 .term = true,
3271 .copy_to_cpu = true,
3272 },
3273 [ROCKER_CTRL_DFLT_BRIDGING] = {
3274 /* flood any pkts on vlan */
3275 .bridge = true,
3276 .copy_to_cpu = true,
3277 },
3278 [ROCKER_CTRL_DFLT_OVS] = {
3279 /* pass all pkts up to CPU */
3280 .eth_dst = zero_mac,
3281 .eth_dst_mask = zero_mac,
3282 .acl = true,
3283 },
3284 };
3285
rocker_port_ctrl_vlan_acl(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,const struct rocker_ctrl * ctrl,__be16 vlan_id)3286 static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
3287 struct switchdev_trans *trans, int flags,
3288 const struct rocker_ctrl *ctrl, __be16 vlan_id)
3289 {
3290 u32 in_pport = rocker_port->pport;
3291 u32 in_pport_mask = 0xffffffff;
3292 u32 out_pport = 0;
3293 const u8 *eth_src = NULL;
3294 const u8 *eth_src_mask = NULL;
3295 __be16 vlan_id_mask = htons(0xffff);
3296 u8 ip_proto = 0;
3297 u8 ip_proto_mask = 0;
3298 u8 ip_tos = 0;
3299 u8 ip_tos_mask = 0;
3300 u32 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3301 int err;
3302
3303 err = rocker_flow_tbl_acl(rocker_port, trans, flags,
3304 in_pport, in_pport_mask,
3305 eth_src, eth_src_mask,
3306 ctrl->eth_dst, ctrl->eth_dst_mask,
3307 ctrl->eth_type,
3308 vlan_id, vlan_id_mask,
3309 ip_proto, ip_proto_mask,
3310 ip_tos, ip_tos_mask,
3311 group_id);
3312
3313 if (err)
3314 netdev_err(rocker_port->dev, "Error (%d) ctrl ACL\n", err);
3315
3316 return err;
3317 }
3318
rocker_port_ctrl_vlan_bridge(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,const struct rocker_ctrl * ctrl,__be16 vlan_id)3319 static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
3320 struct switchdev_trans *trans,
3321 int flags,
3322 const struct rocker_ctrl *ctrl,
3323 __be16 vlan_id)
3324 {
3325 enum rocker_of_dpa_table_id goto_tbl =
3326 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3327 u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
3328 u32 tunnel_id = 0;
3329 int err;
3330
3331 if (!rocker_port_is_bridged(rocker_port))
3332 return 0;
3333
3334 err = rocker_flow_tbl_bridge(rocker_port, trans, flags,
3335 ctrl->eth_dst, ctrl->eth_dst_mask,
3336 vlan_id, tunnel_id,
3337 goto_tbl, group_id, ctrl->copy_to_cpu);
3338
3339 if (err)
3340 netdev_err(rocker_port->dev, "Error (%d) ctrl FLOOD\n", err);
3341
3342 return err;
3343 }
3344
rocker_port_ctrl_vlan_term(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,const struct rocker_ctrl * ctrl,__be16 vlan_id)3345 static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
3346 struct switchdev_trans *trans, int flags,
3347 const struct rocker_ctrl *ctrl, __be16 vlan_id)
3348 {
3349 u32 in_pport_mask = 0xffffffff;
3350 __be16 vlan_id_mask = htons(0xffff);
3351 int err;
3352
3353 if (ntohs(vlan_id) == 0)
3354 vlan_id = rocker_port->internal_vlan_id;
3355
3356 err = rocker_flow_tbl_term_mac(rocker_port, trans,
3357 rocker_port->pport, in_pport_mask,
3358 ctrl->eth_type, ctrl->eth_dst,
3359 ctrl->eth_dst_mask, vlan_id,
3360 vlan_id_mask, ctrl->copy_to_cpu,
3361 flags);
3362
3363 if (err)
3364 netdev_err(rocker_port->dev, "Error (%d) ctrl term\n", err);
3365
3366 return err;
3367 }
3368
rocker_port_ctrl_vlan(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,const struct rocker_ctrl * ctrl,__be16 vlan_id)3369 static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
3370 struct switchdev_trans *trans, int flags,
3371 const struct rocker_ctrl *ctrl, __be16 vlan_id)
3372 {
3373 if (ctrl->acl)
3374 return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
3375 ctrl, vlan_id);
3376 if (ctrl->bridge)
3377 return rocker_port_ctrl_vlan_bridge(rocker_port, trans, flags,
3378 ctrl, vlan_id);
3379
3380 if (ctrl->term)
3381 return rocker_port_ctrl_vlan_term(rocker_port, trans, flags,
3382 ctrl, vlan_id);
3383
3384 return -EOPNOTSUPP;
3385 }
3386
rocker_port_ctrl_vlan_add(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,__be16 vlan_id)3387 static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
3388 struct switchdev_trans *trans, int flags,
3389 __be16 vlan_id)
3390 {
3391 int err = 0;
3392 int i;
3393
3394 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3395 if (rocker_port->ctrls[i]) {
3396 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
3397 &rocker_ctrls[i], vlan_id);
3398 if (err)
3399 return err;
3400 }
3401 }
3402
3403 return err;
3404 }
3405
rocker_port_ctrl(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,const struct rocker_ctrl * ctrl)3406 static int rocker_port_ctrl(struct rocker_port *rocker_port,
3407 struct switchdev_trans *trans, int flags,
3408 const struct rocker_ctrl *ctrl)
3409 {
3410 u16 vid;
3411 int err = 0;
3412
3413 for (vid = 1; vid < VLAN_N_VID; vid++) {
3414 if (!test_bit(vid, rocker_port->vlan_bitmap))
3415 continue;
3416 err = rocker_port_ctrl_vlan(rocker_port, trans, flags,
3417 ctrl, htons(vid));
3418 if (err)
3419 break;
3420 }
3421
3422 return err;
3423 }
3424
rocker_port_vlan(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,u16 vid)3425 static int rocker_port_vlan(struct rocker_port *rocker_port,
3426 struct switchdev_trans *trans, int flags, u16 vid)
3427 {
3428 enum rocker_of_dpa_table_id goto_tbl =
3429 ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
3430 u32 in_pport = rocker_port->pport;
3431 __be16 vlan_id = htons(vid);
3432 __be16 vlan_id_mask = htons(0xffff);
3433 __be16 internal_vlan_id;
3434 bool untagged;
3435 bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
3436 int err;
3437
3438 internal_vlan_id = rocker_port_vid_to_vlan(rocker_port, vid, &untagged);
3439
3440 if (adding && test_bit(ntohs(internal_vlan_id),
3441 rocker_port->vlan_bitmap))
3442 return 0; /* already added */
3443 else if (!adding && !test_bit(ntohs(internal_vlan_id),
3444 rocker_port->vlan_bitmap))
3445 return 0; /* already removed */
3446
3447 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3448
3449 if (adding) {
3450 err = rocker_port_ctrl_vlan_add(rocker_port, trans, flags,
3451 internal_vlan_id);
3452 if (err) {
3453 netdev_err(rocker_port->dev,
3454 "Error (%d) port ctrl vlan add\n", err);
3455 goto err_out;
3456 }
3457 }
3458
3459 err = rocker_port_vlan_l2_groups(rocker_port, trans, flags,
3460 internal_vlan_id, untagged);
3461 if (err) {
3462 netdev_err(rocker_port->dev,
3463 "Error (%d) port VLAN l2 groups\n", err);
3464 goto err_out;
3465 }
3466
3467 err = rocker_port_vlan_flood_group(rocker_port, trans, flags,
3468 internal_vlan_id);
3469 if (err) {
3470 netdev_err(rocker_port->dev,
3471 "Error (%d) port VLAN l2 flood group\n", err);
3472 goto err_out;
3473 }
3474
3475 err = rocker_flow_tbl_vlan(rocker_port, trans, flags,
3476 in_pport, vlan_id, vlan_id_mask,
3477 goto_tbl, untagged, internal_vlan_id);
3478 if (err)
3479 netdev_err(rocker_port->dev,
3480 "Error (%d) port VLAN table\n", err);
3481
3482 err_out:
3483 if (switchdev_trans_ph_prepare(trans))
3484 change_bit(ntohs(internal_vlan_id), rocker_port->vlan_bitmap);
3485
3486 return err;
3487 }
3488
rocker_port_ig_tbl(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags)3489 static int rocker_port_ig_tbl(struct rocker_port *rocker_port,
3490 struct switchdev_trans *trans, int flags)
3491 {
3492 enum rocker_of_dpa_table_id goto_tbl;
3493 u32 in_pport;
3494 u32 in_pport_mask;
3495 int err;
3496
3497 /* Normal Ethernet Frames. Matches pkts from any local physical
3498 * ports. Goto VLAN tbl.
3499 */
3500
3501 in_pport = 0;
3502 in_pport_mask = 0xffff0000;
3503 goto_tbl = ROCKER_OF_DPA_TABLE_ID_VLAN;
3504
3505 err = rocker_flow_tbl_ig_port(rocker_port, trans, flags,
3506 in_pport, in_pport_mask,
3507 goto_tbl);
3508 if (err)
3509 netdev_err(rocker_port->dev,
3510 "Error (%d) ingress port table entry\n", err);
3511
3512 return err;
3513 }
3514
3515 struct rocker_fdb_learn_work {
3516 struct work_struct work;
3517 struct rocker_port *rocker_port;
3518 struct switchdev_trans *trans;
3519 int flags;
3520 u8 addr[ETH_ALEN];
3521 u16 vid;
3522 };
3523
rocker_port_fdb_learn_work(struct work_struct * work)3524 static void rocker_port_fdb_learn_work(struct work_struct *work)
3525 {
3526 const struct rocker_fdb_learn_work *lw =
3527 container_of(work, struct rocker_fdb_learn_work, work);
3528 bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
3529 bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
3530 struct switchdev_notifier_fdb_info info;
3531
3532 info.addr = lw->addr;
3533 info.vid = lw->vid;
3534
3535 rtnl_lock();
3536 if (learned && removing)
3537 call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
3538 lw->rocker_port->dev, &info.info);
3539 else if (learned && !removing)
3540 call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
3541 lw->rocker_port->dev, &info.info);
3542 rtnl_unlock();
3543
3544 rocker_port_kfree(lw->trans, work);
3545 }
3546
rocker_port_fdb_learn(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,const u8 * addr,__be16 vlan_id)3547 static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
3548 struct switchdev_trans *trans, int flags,
3549 const u8 *addr, __be16 vlan_id)
3550 {
3551 struct rocker_fdb_learn_work *lw;
3552 enum rocker_of_dpa_table_id goto_tbl =
3553 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
3554 u32 out_pport = rocker_port->pport;
3555 u32 tunnel_id = 0;
3556 u32 group_id = ROCKER_GROUP_NONE;
3557 bool syncing = !!(rocker_port->brport_flags & BR_LEARNING_SYNC);
3558 bool copy_to_cpu = false;
3559 int err;
3560
3561 if (rocker_port_is_bridged(rocker_port))
3562 group_id = ROCKER_GROUP_L2_INTERFACE(vlan_id, out_pport);
3563
3564 if (!(flags & ROCKER_OP_FLAG_REFRESH)) {
3565 err = rocker_flow_tbl_bridge(rocker_port, trans, flags, addr,
3566 NULL, vlan_id, tunnel_id, goto_tbl,
3567 group_id, copy_to_cpu);
3568 if (err)
3569 return err;
3570 }
3571
3572 if (!syncing)
3573 return 0;
3574
3575 if (!rocker_port_is_bridged(rocker_port))
3576 return 0;
3577
3578 lw = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*lw));
3579 if (!lw)
3580 return -ENOMEM;
3581
3582 INIT_WORK(&lw->work, rocker_port_fdb_learn_work);
3583
3584 lw->rocker_port = rocker_port;
3585 lw->trans = trans;
3586 lw->flags = flags;
3587 ether_addr_copy(lw->addr, addr);
3588 lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
3589
3590 if (switchdev_trans_ph_prepare(trans))
3591 rocker_port_kfree(trans, lw);
3592 else
3593 schedule_work(&lw->work);
3594
3595 return 0;
3596 }
3597
3598 static struct rocker_fdb_tbl_entry *
rocker_fdb_tbl_find(const struct rocker * rocker,const struct rocker_fdb_tbl_entry * match)3599 rocker_fdb_tbl_find(const struct rocker *rocker,
3600 const struct rocker_fdb_tbl_entry *match)
3601 {
3602 struct rocker_fdb_tbl_entry *found;
3603
3604 hash_for_each_possible(rocker->fdb_tbl, found, entry, match->key_crc32)
3605 if (memcmp(&found->key, &match->key, sizeof(found->key)) == 0)
3606 return found;
3607
3608 return NULL;
3609 }
3610
rocker_port_fdb(struct rocker_port * rocker_port,struct switchdev_trans * trans,const unsigned char * addr,__be16 vlan_id,int flags)3611 static int rocker_port_fdb(struct rocker_port *rocker_port,
3612 struct switchdev_trans *trans,
3613 const unsigned char *addr,
3614 __be16 vlan_id, int flags)
3615 {
3616 struct rocker *rocker = rocker_port->rocker;
3617 struct rocker_fdb_tbl_entry *fdb;
3618 struct rocker_fdb_tbl_entry *found;
3619 bool removing = (flags & ROCKER_OP_FLAG_REMOVE);
3620 unsigned long lock_flags;
3621
3622 fdb = rocker_port_kzalloc(rocker_port, trans, flags, sizeof(*fdb));
3623 if (!fdb)
3624 return -ENOMEM;
3625
3626 fdb->learned = (flags & ROCKER_OP_FLAG_LEARNED);
3627 fdb->touched = jiffies;
3628 fdb->key.rocker_port = rocker_port;
3629 ether_addr_copy(fdb->key.addr, addr);
3630 fdb->key.vlan_id = vlan_id;
3631 fdb->key_crc32 = crc32(~0, &fdb->key, sizeof(fdb->key));
3632
3633 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3634
3635 found = rocker_fdb_tbl_find(rocker, fdb);
3636
3637 if (found) {
3638 found->touched = jiffies;
3639 if (removing) {
3640 rocker_port_kfree(trans, fdb);
3641 if (!switchdev_trans_ph_prepare(trans))
3642 hash_del(&found->entry);
3643 }
3644 } else if (!removing) {
3645 if (!switchdev_trans_ph_prepare(trans))
3646 hash_add(rocker->fdb_tbl, &fdb->entry,
3647 fdb->key_crc32);
3648 }
3649
3650 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3651
3652 /* Check if adding and already exists, or removing and can't find */
3653 if (!found != !removing) {
3654 rocker_port_kfree(trans, fdb);
3655 if (!found && removing)
3656 return 0;
3657 /* Refreshing existing to update aging timers */
3658 flags |= ROCKER_OP_FLAG_REFRESH;
3659 }
3660
3661 return rocker_port_fdb_learn(rocker_port, trans, flags, addr, vlan_id);
3662 }
3663
rocker_port_fdb_flush(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags)3664 static int rocker_port_fdb_flush(struct rocker_port *rocker_port,
3665 struct switchdev_trans *trans, int flags)
3666 {
3667 struct rocker *rocker = rocker_port->rocker;
3668 struct rocker_fdb_tbl_entry *found;
3669 unsigned long lock_flags;
3670 struct hlist_node *tmp;
3671 int bkt;
3672 int err = 0;
3673
3674 if (rocker_port->stp_state == BR_STATE_LEARNING ||
3675 rocker_port->stp_state == BR_STATE_FORWARDING)
3676 return 0;
3677
3678 flags |= ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE;
3679
3680 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3681
3682 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
3683 if (found->key.rocker_port != rocker_port)
3684 continue;
3685 if (!found->learned)
3686 continue;
3687 err = rocker_port_fdb_learn(rocker_port, trans, flags,
3688 found->key.addr,
3689 found->key.vlan_id);
3690 if (err)
3691 goto err_out;
3692 if (!switchdev_trans_ph_prepare(trans))
3693 hash_del(&found->entry);
3694 }
3695
3696 err_out:
3697 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3698
3699 return err;
3700 }
3701
rocker_fdb_cleanup(unsigned long data)3702 static void rocker_fdb_cleanup(unsigned long data)
3703 {
3704 struct rocker *rocker = (struct rocker *)data;
3705 struct rocker_port *rocker_port;
3706 struct rocker_fdb_tbl_entry *entry;
3707 struct hlist_node *tmp;
3708 unsigned long next_timer = jiffies + rocker->ageing_time;
3709 unsigned long expires;
3710 unsigned long lock_flags;
3711 int flags = ROCKER_OP_FLAG_NOWAIT | ROCKER_OP_FLAG_REMOVE |
3712 ROCKER_OP_FLAG_LEARNED;
3713 int bkt;
3714
3715 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
3716
3717 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, entry, entry) {
3718 if (!entry->learned)
3719 continue;
3720 rocker_port = entry->key.rocker_port;
3721 expires = entry->touched + rocker_port->ageing_time;
3722 if (time_before_eq(expires, jiffies)) {
3723 rocker_port_fdb_learn(rocker_port, NULL,
3724 flags, entry->key.addr,
3725 entry->key.vlan_id);
3726 hash_del(&entry->entry);
3727 } else if (time_before(expires, next_timer)) {
3728 next_timer = expires;
3729 }
3730 }
3731
3732 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
3733
3734 mod_timer(&rocker->fdb_cleanup_timer, round_jiffies_up(next_timer));
3735 }
3736
rocker_port_router_mac(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,__be16 vlan_id)3737 static int rocker_port_router_mac(struct rocker_port *rocker_port,
3738 struct switchdev_trans *trans, int flags,
3739 __be16 vlan_id)
3740 {
3741 u32 in_pport_mask = 0xffffffff;
3742 __be16 eth_type;
3743 const u8 *dst_mac_mask = ff_mac;
3744 __be16 vlan_id_mask = htons(0xffff);
3745 bool copy_to_cpu = false;
3746 int err;
3747
3748 if (ntohs(vlan_id) == 0)
3749 vlan_id = rocker_port->internal_vlan_id;
3750
3751 eth_type = htons(ETH_P_IP);
3752 err = rocker_flow_tbl_term_mac(rocker_port, trans,
3753 rocker_port->pport, in_pport_mask,
3754 eth_type, rocker_port->dev->dev_addr,
3755 dst_mac_mask, vlan_id, vlan_id_mask,
3756 copy_to_cpu, flags);
3757 if (err)
3758 return err;
3759
3760 eth_type = htons(ETH_P_IPV6);
3761 err = rocker_flow_tbl_term_mac(rocker_port, trans,
3762 rocker_port->pport, in_pport_mask,
3763 eth_type, rocker_port->dev->dev_addr,
3764 dst_mac_mask, vlan_id, vlan_id_mask,
3765 copy_to_cpu, flags);
3766
3767 return err;
3768 }
3769
rocker_port_fwding(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags)3770 static int rocker_port_fwding(struct rocker_port *rocker_port,
3771 struct switchdev_trans *trans, int flags)
3772 {
3773 bool pop_vlan;
3774 u32 out_pport;
3775 __be16 vlan_id;
3776 u16 vid;
3777 int err;
3778
3779 /* Port will be forwarding-enabled if its STP state is LEARNING
3780 * or FORWARDING. Traffic from CPU can still egress, regardless of
3781 * port STP state. Use L2 interface group on port VLANs as a way
3782 * to toggle port forwarding: if forwarding is disabled, L2
3783 * interface group will not exist.
3784 */
3785
3786 if (rocker_port->stp_state != BR_STATE_LEARNING &&
3787 rocker_port->stp_state != BR_STATE_FORWARDING)
3788 flags |= ROCKER_OP_FLAG_REMOVE;
3789
3790 out_pport = rocker_port->pport;
3791 for (vid = 1; vid < VLAN_N_VID; vid++) {
3792 if (!test_bit(vid, rocker_port->vlan_bitmap))
3793 continue;
3794 vlan_id = htons(vid);
3795 pop_vlan = rocker_vlan_id_is_internal(vlan_id);
3796 err = rocker_group_l2_interface(rocker_port, trans, flags,
3797 vlan_id, out_pport, pop_vlan);
3798 if (err) {
3799 netdev_err(rocker_port->dev,
3800 "Error (%d) port VLAN l2 group for pport %d\n",
3801 err, out_pport);
3802 return err;
3803 }
3804 }
3805
3806 return 0;
3807 }
3808
rocker_port_stp_update(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags,u8 state)3809 static int rocker_port_stp_update(struct rocker_port *rocker_port,
3810 struct switchdev_trans *trans, int flags,
3811 u8 state)
3812 {
3813 bool want[ROCKER_CTRL_MAX] = { 0, };
3814 bool prev_ctrls[ROCKER_CTRL_MAX];
3815 u8 uninitialized_var(prev_state);
3816 int err;
3817 int i;
3818
3819 if (switchdev_trans_ph_prepare(trans)) {
3820 memcpy(prev_ctrls, rocker_port->ctrls, sizeof(prev_ctrls));
3821 prev_state = rocker_port->stp_state;
3822 }
3823
3824 if (rocker_port->stp_state == state)
3825 return 0;
3826
3827 rocker_port->stp_state = state;
3828
3829 switch (state) {
3830 case BR_STATE_DISABLED:
3831 /* port is completely disabled */
3832 break;
3833 case BR_STATE_LISTENING:
3834 case BR_STATE_BLOCKING:
3835 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3836 break;
3837 case BR_STATE_LEARNING:
3838 case BR_STATE_FORWARDING:
3839 if (!rocker_port_is_ovsed(rocker_port))
3840 want[ROCKER_CTRL_LINK_LOCAL_MCAST] = true;
3841 want[ROCKER_CTRL_IPV4_MCAST] = true;
3842 want[ROCKER_CTRL_IPV6_MCAST] = true;
3843 if (rocker_port_is_bridged(rocker_port))
3844 want[ROCKER_CTRL_DFLT_BRIDGING] = true;
3845 else if (rocker_port_is_ovsed(rocker_port))
3846 want[ROCKER_CTRL_DFLT_OVS] = true;
3847 else
3848 want[ROCKER_CTRL_LOCAL_ARP] = true;
3849 break;
3850 }
3851
3852 for (i = 0; i < ROCKER_CTRL_MAX; i++) {
3853 if (want[i] != rocker_port->ctrls[i]) {
3854 int ctrl_flags = flags |
3855 (want[i] ? 0 : ROCKER_OP_FLAG_REMOVE);
3856 err = rocker_port_ctrl(rocker_port, trans, ctrl_flags,
3857 &rocker_ctrls[i]);
3858 if (err)
3859 goto err_out;
3860 rocker_port->ctrls[i] = want[i];
3861 }
3862 }
3863
3864 err = rocker_port_fdb_flush(rocker_port, trans, flags);
3865 if (err)
3866 goto err_out;
3867
3868 err = rocker_port_fwding(rocker_port, trans, flags);
3869
3870 err_out:
3871 if (switchdev_trans_ph_prepare(trans)) {
3872 memcpy(rocker_port->ctrls, prev_ctrls, sizeof(prev_ctrls));
3873 rocker_port->stp_state = prev_state;
3874 }
3875
3876 return err;
3877 }
3878
rocker_port_fwd_enable(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags)3879 static int rocker_port_fwd_enable(struct rocker_port *rocker_port,
3880 struct switchdev_trans *trans, int flags)
3881 {
3882 if (rocker_port_is_bridged(rocker_port))
3883 /* bridge STP will enable port */
3884 return 0;
3885
3886 /* port is not bridged, so simulate going to FORWARDING state */
3887 return rocker_port_stp_update(rocker_port, trans, flags,
3888 BR_STATE_FORWARDING);
3889 }
3890
rocker_port_fwd_disable(struct rocker_port * rocker_port,struct switchdev_trans * trans,int flags)3891 static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
3892 struct switchdev_trans *trans, int flags)
3893 {
3894 if (rocker_port_is_bridged(rocker_port))
3895 /* bridge STP will disable port */
3896 return 0;
3897
3898 /* port is not bridged, so simulate going to DISABLED state */
3899 return rocker_port_stp_update(rocker_port, trans, flags,
3900 BR_STATE_DISABLED);
3901 }
3902
3903 static struct rocker_internal_vlan_tbl_entry *
rocker_internal_vlan_tbl_find(const struct rocker * rocker,int ifindex)3904 rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
3905 {
3906 struct rocker_internal_vlan_tbl_entry *found;
3907
3908 hash_for_each_possible(rocker->internal_vlan_tbl, found,
3909 entry, ifindex) {
3910 if (found->ifindex == ifindex)
3911 return found;
3912 }
3913
3914 return NULL;
3915 }
3916
rocker_port_internal_vlan_id_get(struct rocker_port * rocker_port,int ifindex)3917 static __be16 rocker_port_internal_vlan_id_get(struct rocker_port *rocker_port,
3918 int ifindex)
3919 {
3920 struct rocker *rocker = rocker_port->rocker;
3921 struct rocker_internal_vlan_tbl_entry *entry;
3922 struct rocker_internal_vlan_tbl_entry *found;
3923 unsigned long lock_flags;
3924 int i;
3925
3926 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3927 if (!entry)
3928 return 0;
3929
3930 entry->ifindex = ifindex;
3931
3932 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3933
3934 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3935 if (found) {
3936 kfree(entry);
3937 goto found;
3938 }
3939
3940 found = entry;
3941 hash_add(rocker->internal_vlan_tbl, &found->entry, found->ifindex);
3942
3943 for (i = 0; i < ROCKER_N_INTERNAL_VLANS; i++) {
3944 if (test_and_set_bit(i, rocker->internal_vlan_bitmap))
3945 continue;
3946 found->vlan_id = htons(ROCKER_INTERNAL_VLAN_ID_BASE + i);
3947 goto found;
3948 }
3949
3950 netdev_err(rocker_port->dev, "Out of internal VLAN IDs\n");
3951
3952 found:
3953 found->ref_count++;
3954 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3955
3956 return found->vlan_id;
3957 }
3958
3959 static void
rocker_port_internal_vlan_id_put(const struct rocker_port * rocker_port,int ifindex)3960 rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
3961 int ifindex)
3962 {
3963 struct rocker *rocker = rocker_port->rocker;
3964 struct rocker_internal_vlan_tbl_entry *found;
3965 unsigned long lock_flags;
3966 unsigned long bit;
3967
3968 spin_lock_irqsave(&rocker->internal_vlan_tbl_lock, lock_flags);
3969
3970 found = rocker_internal_vlan_tbl_find(rocker, ifindex);
3971 if (!found) {
3972 netdev_err(rocker_port->dev,
3973 "ifindex (%d) not found in internal VLAN tbl\n",
3974 ifindex);
3975 goto not_found;
3976 }
3977
3978 if (--found->ref_count <= 0) {
3979 bit = ntohs(found->vlan_id) - ROCKER_INTERNAL_VLAN_ID_BASE;
3980 clear_bit(bit, rocker->internal_vlan_bitmap);
3981 hash_del(&found->entry);
3982 kfree(found);
3983 }
3984
3985 not_found:
3986 spin_unlock_irqrestore(&rocker->internal_vlan_tbl_lock, lock_flags);
3987 }
3988
rocker_port_fib_ipv4(struct rocker_port * rocker_port,struct switchdev_trans * trans,__be32 dst,int dst_len,const struct fib_info * fi,u32 tb_id,int flags)3989 static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
3990 struct switchdev_trans *trans, __be32 dst,
3991 int dst_len, const struct fib_info *fi,
3992 u32 tb_id, int flags)
3993 {
3994 const struct fib_nh *nh;
3995 __be16 eth_type = htons(ETH_P_IP);
3996 __be32 dst_mask = inet_make_mask(dst_len);
3997 __be16 internal_vlan_id = rocker_port->internal_vlan_id;
3998 u32 priority = fi->fib_priority;
3999 enum rocker_of_dpa_table_id goto_tbl =
4000 ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
4001 u32 group_id;
4002 bool nh_on_port;
4003 bool has_gw;
4004 u32 index;
4005 int err;
4006
4007 /* XXX support ECMP */
4008
4009 nh = fi->fib_nh;
4010 nh_on_port = (fi->fib_dev == rocker_port->dev);
4011 has_gw = !!nh->nh_gw;
4012
4013 if (has_gw && nh_on_port) {
4014 err = rocker_port_ipv4_nh(rocker_port, trans, flags,
4015 nh->nh_gw, &index);
4016 if (err)
4017 return err;
4018
4019 group_id = ROCKER_GROUP_L3_UNICAST(index);
4020 } else {
4021 /* Send to CPU for processing */
4022 group_id = ROCKER_GROUP_L2_INTERFACE(internal_vlan_id, 0);
4023 }
4024
4025 err = rocker_flow_tbl_ucast4_routing(rocker_port, trans, eth_type, dst,
4026 dst_mask, priority, goto_tbl,
4027 group_id, flags);
4028 if (err)
4029 netdev_err(rocker_port->dev, "Error (%d) IPv4 route %pI4\n",
4030 err, &dst);
4031
4032 return err;
4033 }
4034
4035 /*****************
4036 * Net device ops
4037 *****************/
4038
rocker_port_open(struct net_device * dev)4039 static int rocker_port_open(struct net_device *dev)
4040 {
4041 struct rocker_port *rocker_port = netdev_priv(dev);
4042 int err;
4043
4044 err = rocker_port_dma_rings_init(rocker_port);
4045 if (err)
4046 return err;
4047
4048 err = request_irq(rocker_msix_tx_vector(rocker_port),
4049 rocker_tx_irq_handler, 0,
4050 rocker_driver_name, rocker_port);
4051 if (err) {
4052 netdev_err(rocker_port->dev, "cannot assign tx irq\n");
4053 goto err_request_tx_irq;
4054 }
4055
4056 err = request_irq(rocker_msix_rx_vector(rocker_port),
4057 rocker_rx_irq_handler, 0,
4058 rocker_driver_name, rocker_port);
4059 if (err) {
4060 netdev_err(rocker_port->dev, "cannot assign rx irq\n");
4061 goto err_request_rx_irq;
4062 }
4063
4064 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
4065 if (err)
4066 goto err_fwd_enable;
4067
4068 napi_enable(&rocker_port->napi_tx);
4069 napi_enable(&rocker_port->napi_rx);
4070 if (!dev->proto_down)
4071 rocker_port_set_enable(rocker_port, true);
4072 netif_start_queue(dev);
4073 return 0;
4074
4075 err_fwd_enable:
4076 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4077 err_request_rx_irq:
4078 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4079 err_request_tx_irq:
4080 rocker_port_dma_rings_fini(rocker_port);
4081 return err;
4082 }
4083
rocker_port_stop(struct net_device * dev)4084 static int rocker_port_stop(struct net_device *dev)
4085 {
4086 struct rocker_port *rocker_port = netdev_priv(dev);
4087
4088 netif_stop_queue(dev);
4089 rocker_port_set_enable(rocker_port, false);
4090 napi_disable(&rocker_port->napi_rx);
4091 napi_disable(&rocker_port->napi_tx);
4092 rocker_port_fwd_disable(rocker_port, NULL,
4093 ROCKER_OP_FLAG_NOWAIT);
4094 free_irq(rocker_msix_rx_vector(rocker_port), rocker_port);
4095 free_irq(rocker_msix_tx_vector(rocker_port), rocker_port);
4096 rocker_port_dma_rings_fini(rocker_port);
4097
4098 return 0;
4099 }
4100
rocker_tx_desc_frags_unmap(const struct rocker_port * rocker_port,const struct rocker_desc_info * desc_info)4101 static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
4102 const struct rocker_desc_info *desc_info)
4103 {
4104 const struct rocker *rocker = rocker_port->rocker;
4105 struct pci_dev *pdev = rocker->pdev;
4106 const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
4107 struct rocker_tlv *attr;
4108 int rem;
4109
4110 rocker_tlv_parse_desc(attrs, ROCKER_TLV_TX_MAX, desc_info);
4111 if (!attrs[ROCKER_TLV_TX_FRAGS])
4112 return;
4113 rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
4114 const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
4115 dma_addr_t dma_handle;
4116 size_t len;
4117
4118 if (rocker_tlv_type(attr) != ROCKER_TLV_TX_FRAG)
4119 continue;
4120 rocker_tlv_parse_nested(frag_attrs, ROCKER_TLV_TX_FRAG_ATTR_MAX,
4121 attr);
4122 if (!frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
4123 !frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN])
4124 continue;
4125 dma_handle = rocker_tlv_get_u64(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
4126 len = rocker_tlv_get_u16(frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
4127 pci_unmap_single(pdev, dma_handle, len, DMA_TO_DEVICE);
4128 }
4129 }
4130
rocker_tx_desc_frag_map_put(const struct rocker_port * rocker_port,struct rocker_desc_info * desc_info,char * buf,size_t buf_len)4131 static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
4132 struct rocker_desc_info *desc_info,
4133 char *buf, size_t buf_len)
4134 {
4135 const struct rocker *rocker = rocker_port->rocker;
4136 struct pci_dev *pdev = rocker->pdev;
4137 dma_addr_t dma_handle;
4138 struct rocker_tlv *frag;
4139
4140 dma_handle = pci_map_single(pdev, buf, buf_len, DMA_TO_DEVICE);
4141 if (unlikely(pci_dma_mapping_error(pdev, dma_handle))) {
4142 if (net_ratelimit())
4143 netdev_err(rocker_port->dev, "failed to dma map tx frag\n");
4144 return -EIO;
4145 }
4146 frag = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAG);
4147 if (!frag)
4148 goto unmap_frag;
4149 if (rocker_tlv_put_u64(desc_info, ROCKER_TLV_TX_FRAG_ATTR_ADDR,
4150 dma_handle))
4151 goto nest_cancel;
4152 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_TX_FRAG_ATTR_LEN,
4153 buf_len))
4154 goto nest_cancel;
4155 rocker_tlv_nest_end(desc_info, frag);
4156 return 0;
4157
4158 nest_cancel:
4159 rocker_tlv_nest_cancel(desc_info, frag);
4160 unmap_frag:
4161 pci_unmap_single(pdev, dma_handle, buf_len, DMA_TO_DEVICE);
4162 return -EMSGSIZE;
4163 }
4164
rocker_port_xmit(struct sk_buff * skb,struct net_device * dev)4165 static netdev_tx_t rocker_port_xmit(struct sk_buff *skb, struct net_device *dev)
4166 {
4167 struct rocker_port *rocker_port = netdev_priv(dev);
4168 struct rocker *rocker = rocker_port->rocker;
4169 struct rocker_desc_info *desc_info;
4170 struct rocker_tlv *frags;
4171 int i;
4172 int err;
4173
4174 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4175 if (unlikely(!desc_info)) {
4176 if (net_ratelimit())
4177 netdev_err(dev, "tx ring full when queue awake\n");
4178 return NETDEV_TX_BUSY;
4179 }
4180
4181 rocker_desc_cookie_ptr_set(desc_info, skb);
4182
4183 frags = rocker_tlv_nest_start(desc_info, ROCKER_TLV_TX_FRAGS);
4184 if (!frags)
4185 goto out;
4186 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4187 skb->data, skb_headlen(skb));
4188 if (err)
4189 goto nest_cancel;
4190 if (skb_shinfo(skb)->nr_frags > ROCKER_TX_FRAGS_MAX) {
4191 err = skb_linearize(skb);
4192 if (err)
4193 goto unmap_frags;
4194 }
4195
4196 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4197 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4198
4199 err = rocker_tx_desc_frag_map_put(rocker_port, desc_info,
4200 skb_frag_address(frag),
4201 skb_frag_size(frag));
4202 if (err)
4203 goto unmap_frags;
4204 }
4205 rocker_tlv_nest_end(desc_info, frags);
4206
4207 rocker_desc_gen_clear(desc_info);
4208 rocker_desc_head_set(rocker, &rocker_port->tx_ring, desc_info);
4209
4210 desc_info = rocker_desc_head_get(&rocker_port->tx_ring);
4211 if (!desc_info)
4212 netif_stop_queue(dev);
4213
4214 return NETDEV_TX_OK;
4215
4216 unmap_frags:
4217 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4218 nest_cancel:
4219 rocker_tlv_nest_cancel(desc_info, frags);
4220 out:
4221 dev_kfree_skb(skb);
4222 dev->stats.tx_dropped++;
4223
4224 return NETDEV_TX_OK;
4225 }
4226
rocker_port_set_mac_address(struct net_device * dev,void * p)4227 static int rocker_port_set_mac_address(struct net_device *dev, void *p)
4228 {
4229 struct sockaddr *addr = p;
4230 struct rocker_port *rocker_port = netdev_priv(dev);
4231 int err;
4232
4233 if (!is_valid_ether_addr(addr->sa_data))
4234 return -EADDRNOTAVAIL;
4235
4236 err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
4237 if (err)
4238 return err;
4239 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4240 return 0;
4241 }
4242
rocker_port_change_mtu(struct net_device * dev,int new_mtu)4243 static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
4244 {
4245 struct rocker_port *rocker_port = netdev_priv(dev);
4246 int running = netif_running(dev);
4247 int err;
4248
4249 #define ROCKER_PORT_MIN_MTU 68
4250 #define ROCKER_PORT_MAX_MTU 9000
4251
4252 if (new_mtu < ROCKER_PORT_MIN_MTU || new_mtu > ROCKER_PORT_MAX_MTU)
4253 return -EINVAL;
4254
4255 if (running)
4256 rocker_port_stop(dev);
4257
4258 netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
4259 dev->mtu = new_mtu;
4260
4261 err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
4262 if (err)
4263 return err;
4264
4265 if (running)
4266 err = rocker_port_open(dev);
4267
4268 return err;
4269 }
4270
rocker_port_get_phys_port_name(struct net_device * dev,char * buf,size_t len)4271 static int rocker_port_get_phys_port_name(struct net_device *dev,
4272 char *buf, size_t len)
4273 {
4274 struct rocker_port *rocker_port = netdev_priv(dev);
4275 struct port_name name = { .buf = buf, .len = len };
4276 int err;
4277
4278 err = rocker_cmd_exec(rocker_port, NULL, 0,
4279 rocker_cmd_get_port_settings_prep, NULL,
4280 rocker_cmd_get_port_settings_phys_name_proc,
4281 &name);
4282
4283 return err ? -EOPNOTSUPP : 0;
4284 }
4285
rocker_port_change_proto_down(struct net_device * dev,bool proto_down)4286 static int rocker_port_change_proto_down(struct net_device *dev,
4287 bool proto_down)
4288 {
4289 struct rocker_port *rocker_port = netdev_priv(dev);
4290
4291 if (rocker_port->dev->flags & IFF_UP)
4292 rocker_port_set_enable(rocker_port, !proto_down);
4293 rocker_port->dev->proto_down = proto_down;
4294 return 0;
4295 }
4296
rocker_port_neigh_destroy(struct neighbour * n)4297 static void rocker_port_neigh_destroy(struct neighbour *n)
4298 {
4299 struct rocker_port *rocker_port = netdev_priv(n->dev);
4300 int flags = ROCKER_OP_FLAG_REMOVE | ROCKER_OP_FLAG_NOWAIT;
4301 __be32 ip_addr = *(__be32 *)n->primary_key;
4302
4303 rocker_port_ipv4_neigh(rocker_port, NULL,
4304 flags, ip_addr, n->ha);
4305 }
4306
4307 static const struct net_device_ops rocker_port_netdev_ops = {
4308 .ndo_open = rocker_port_open,
4309 .ndo_stop = rocker_port_stop,
4310 .ndo_start_xmit = rocker_port_xmit,
4311 .ndo_set_mac_address = rocker_port_set_mac_address,
4312 .ndo_change_mtu = rocker_port_change_mtu,
4313 .ndo_bridge_getlink = switchdev_port_bridge_getlink,
4314 .ndo_bridge_setlink = switchdev_port_bridge_setlink,
4315 .ndo_bridge_dellink = switchdev_port_bridge_dellink,
4316 .ndo_fdb_add = switchdev_port_fdb_add,
4317 .ndo_fdb_del = switchdev_port_fdb_del,
4318 .ndo_fdb_dump = switchdev_port_fdb_dump,
4319 .ndo_get_phys_port_name = rocker_port_get_phys_port_name,
4320 .ndo_change_proto_down = rocker_port_change_proto_down,
4321 .ndo_neigh_destroy = rocker_port_neigh_destroy,
4322 };
4323
4324 /********************
4325 * swdev interface
4326 ********************/
4327
rocker_port_attr_get(struct net_device * dev,struct switchdev_attr * attr)4328 static int rocker_port_attr_get(struct net_device *dev,
4329 struct switchdev_attr *attr)
4330 {
4331 const struct rocker_port *rocker_port = netdev_priv(dev);
4332 const struct rocker *rocker = rocker_port->rocker;
4333
4334 switch (attr->id) {
4335 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
4336 attr->u.ppid.id_len = sizeof(rocker->hw.id);
4337 memcpy(&attr->u.ppid.id, &rocker->hw.id, attr->u.ppid.id_len);
4338 break;
4339 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
4340 attr->u.brport_flags = rocker_port->brport_flags;
4341 break;
4342 default:
4343 return -EOPNOTSUPP;
4344 }
4345
4346 return 0;
4347 }
4348
rocker_port_brport_flags_set(struct rocker_port * rocker_port,struct switchdev_trans * trans,unsigned long brport_flags)4349 static int rocker_port_brport_flags_set(struct rocker_port *rocker_port,
4350 struct switchdev_trans *trans,
4351 unsigned long brport_flags)
4352 {
4353 unsigned long orig_flags;
4354 int err = 0;
4355
4356 orig_flags = rocker_port->brport_flags;
4357 rocker_port->brport_flags = brport_flags;
4358 if ((orig_flags ^ rocker_port->brport_flags) & BR_LEARNING)
4359 err = rocker_port_set_learning(rocker_port, trans);
4360
4361 if (switchdev_trans_ph_prepare(trans))
4362 rocker_port->brport_flags = orig_flags;
4363
4364 return err;
4365 }
4366
rocker_port_bridge_ageing_time(struct rocker_port * rocker_port,struct switchdev_trans * trans,u32 ageing_time)4367 static int rocker_port_bridge_ageing_time(struct rocker_port *rocker_port,
4368 struct switchdev_trans *trans,
4369 u32 ageing_time)
4370 {
4371 struct rocker *rocker = rocker_port->rocker;
4372
4373 if (!switchdev_trans_ph_prepare(trans)) {
4374 rocker_port->ageing_time = clock_t_to_jiffies(ageing_time);
4375 if (rocker_port->ageing_time < rocker->ageing_time)
4376 rocker->ageing_time = rocker_port->ageing_time;
4377 mod_timer(&rocker_port->rocker->fdb_cleanup_timer, jiffies);
4378 }
4379
4380 return 0;
4381 }
4382
rocker_port_attr_set(struct net_device * dev,const struct switchdev_attr * attr,struct switchdev_trans * trans)4383 static int rocker_port_attr_set(struct net_device *dev,
4384 const struct switchdev_attr *attr,
4385 struct switchdev_trans *trans)
4386 {
4387 struct rocker_port *rocker_port = netdev_priv(dev);
4388 int err = 0;
4389
4390 switch (attr->id) {
4391 case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
4392 err = rocker_port_stp_update(rocker_port, trans, 0,
4393 attr->u.stp_state);
4394 break;
4395 case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
4396 err = rocker_port_brport_flags_set(rocker_port, trans,
4397 attr->u.brport_flags);
4398 break;
4399 case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
4400 err = rocker_port_bridge_ageing_time(rocker_port, trans,
4401 attr->u.ageing_time);
4402 break;
4403 default:
4404 err = -EOPNOTSUPP;
4405 break;
4406 }
4407
4408 return err;
4409 }
4410
rocker_port_vlan_add(struct rocker_port * rocker_port,struct switchdev_trans * trans,u16 vid,u16 flags)4411 static int rocker_port_vlan_add(struct rocker_port *rocker_port,
4412 struct switchdev_trans *trans,
4413 u16 vid, u16 flags)
4414 {
4415 int err;
4416
4417 /* XXX deal with flags for PVID and untagged */
4418
4419 err = rocker_port_vlan(rocker_port, trans, 0, vid);
4420 if (err)
4421 return err;
4422
4423 err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
4424 if (err)
4425 rocker_port_vlan(rocker_port, trans,
4426 ROCKER_OP_FLAG_REMOVE, vid);
4427
4428 return err;
4429 }
4430
rocker_port_vlans_add(struct rocker_port * rocker_port,struct switchdev_trans * trans,const struct switchdev_obj_port_vlan * vlan)4431 static int rocker_port_vlans_add(struct rocker_port *rocker_port,
4432 struct switchdev_trans *trans,
4433 const struct switchdev_obj_port_vlan *vlan)
4434 {
4435 u16 vid;
4436 int err;
4437
4438 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
4439 err = rocker_port_vlan_add(rocker_port, trans,
4440 vid, vlan->flags);
4441 if (err)
4442 return err;
4443 }
4444
4445 return 0;
4446 }
4447
rocker_port_fdb_add(struct rocker_port * rocker_port,struct switchdev_trans * trans,const struct switchdev_obj_port_fdb * fdb)4448 static int rocker_port_fdb_add(struct rocker_port *rocker_port,
4449 struct switchdev_trans *trans,
4450 const struct switchdev_obj_port_fdb *fdb)
4451 {
4452 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4453 int flags = 0;
4454
4455 if (!rocker_port_is_bridged(rocker_port))
4456 return -EINVAL;
4457
4458 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4459 }
4460
rocker_port_obj_add(struct net_device * dev,const struct switchdev_obj * obj,struct switchdev_trans * trans)4461 static int rocker_port_obj_add(struct net_device *dev,
4462 const struct switchdev_obj *obj,
4463 struct switchdev_trans *trans)
4464 {
4465 struct rocker_port *rocker_port = netdev_priv(dev);
4466 const struct switchdev_obj_ipv4_fib *fib4;
4467 int err = 0;
4468
4469 switch (obj->id) {
4470 case SWITCHDEV_OBJ_ID_PORT_VLAN:
4471 err = rocker_port_vlans_add(rocker_port, trans,
4472 SWITCHDEV_OBJ_PORT_VLAN(obj));
4473 break;
4474 case SWITCHDEV_OBJ_ID_IPV4_FIB:
4475 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
4476 err = rocker_port_fib_ipv4(rocker_port, trans,
4477 htonl(fib4->dst), fib4->dst_len,
4478 fib4->fi, fib4->tb_id, 0);
4479 break;
4480 case SWITCHDEV_OBJ_ID_PORT_FDB:
4481 err = rocker_port_fdb_add(rocker_port, trans,
4482 SWITCHDEV_OBJ_PORT_FDB(obj));
4483 break;
4484 default:
4485 err = -EOPNOTSUPP;
4486 break;
4487 }
4488
4489 return err;
4490 }
4491
rocker_port_vlan_del(struct rocker_port * rocker_port,u16 vid,u16 flags)4492 static int rocker_port_vlan_del(struct rocker_port *rocker_port,
4493 u16 vid, u16 flags)
4494 {
4495 int err;
4496
4497 err = rocker_port_router_mac(rocker_port, NULL,
4498 ROCKER_OP_FLAG_REMOVE, htons(vid));
4499 if (err)
4500 return err;
4501
4502 return rocker_port_vlan(rocker_port, NULL,
4503 ROCKER_OP_FLAG_REMOVE, vid);
4504 }
4505
rocker_port_vlans_del(struct rocker_port * rocker_port,const struct switchdev_obj_port_vlan * vlan)4506 static int rocker_port_vlans_del(struct rocker_port *rocker_port,
4507 const struct switchdev_obj_port_vlan *vlan)
4508 {
4509 u16 vid;
4510 int err;
4511
4512 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
4513 err = rocker_port_vlan_del(rocker_port, vid, vlan->flags);
4514 if (err)
4515 return err;
4516 }
4517
4518 return 0;
4519 }
4520
rocker_port_fdb_del(struct rocker_port * rocker_port,struct switchdev_trans * trans,const struct switchdev_obj_port_fdb * fdb)4521 static int rocker_port_fdb_del(struct rocker_port *rocker_port,
4522 struct switchdev_trans *trans,
4523 const struct switchdev_obj_port_fdb *fdb)
4524 {
4525 __be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
4526 int flags = ROCKER_OP_FLAG_REMOVE;
4527
4528 if (!rocker_port_is_bridged(rocker_port))
4529 return -EINVAL;
4530
4531 return rocker_port_fdb(rocker_port, trans, fdb->addr, vlan_id, flags);
4532 }
4533
rocker_port_obj_del(struct net_device * dev,const struct switchdev_obj * obj)4534 static int rocker_port_obj_del(struct net_device *dev,
4535 const struct switchdev_obj *obj)
4536 {
4537 struct rocker_port *rocker_port = netdev_priv(dev);
4538 const struct switchdev_obj_ipv4_fib *fib4;
4539 int err = 0;
4540
4541 switch (obj->id) {
4542 case SWITCHDEV_OBJ_ID_PORT_VLAN:
4543 err = rocker_port_vlans_del(rocker_port,
4544 SWITCHDEV_OBJ_PORT_VLAN(obj));
4545 break;
4546 case SWITCHDEV_OBJ_ID_IPV4_FIB:
4547 fib4 = SWITCHDEV_OBJ_IPV4_FIB(obj);
4548 err = rocker_port_fib_ipv4(rocker_port, NULL,
4549 htonl(fib4->dst), fib4->dst_len,
4550 fib4->fi, fib4->tb_id,
4551 ROCKER_OP_FLAG_REMOVE);
4552 break;
4553 case SWITCHDEV_OBJ_ID_PORT_FDB:
4554 err = rocker_port_fdb_del(rocker_port, NULL,
4555 SWITCHDEV_OBJ_PORT_FDB(obj));
4556 break;
4557 default:
4558 err = -EOPNOTSUPP;
4559 break;
4560 }
4561
4562 return err;
4563 }
4564
rocker_port_fdb_dump(const struct rocker_port * rocker_port,struct switchdev_obj_port_fdb * fdb,switchdev_obj_dump_cb_t * cb)4565 static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
4566 struct switchdev_obj_port_fdb *fdb,
4567 switchdev_obj_dump_cb_t *cb)
4568 {
4569 struct rocker *rocker = rocker_port->rocker;
4570 struct rocker_fdb_tbl_entry *found;
4571 struct hlist_node *tmp;
4572 unsigned long lock_flags;
4573 int bkt;
4574 int err = 0;
4575
4576 spin_lock_irqsave(&rocker->fdb_tbl_lock, lock_flags);
4577 hash_for_each_safe(rocker->fdb_tbl, bkt, tmp, found, entry) {
4578 if (found->key.rocker_port != rocker_port)
4579 continue;
4580 ether_addr_copy(fdb->addr, found->key.addr);
4581 fdb->ndm_state = NUD_REACHABLE;
4582 fdb->vid = rocker_port_vlan_to_vid(rocker_port,
4583 found->key.vlan_id);
4584 err = cb(&fdb->obj);
4585 if (err)
4586 break;
4587 }
4588 spin_unlock_irqrestore(&rocker->fdb_tbl_lock, lock_flags);
4589
4590 return err;
4591 }
4592
rocker_port_vlan_dump(const struct rocker_port * rocker_port,struct switchdev_obj_port_vlan * vlan,switchdev_obj_dump_cb_t * cb)4593 static int rocker_port_vlan_dump(const struct rocker_port *rocker_port,
4594 struct switchdev_obj_port_vlan *vlan,
4595 switchdev_obj_dump_cb_t *cb)
4596 {
4597 u16 vid;
4598 int err = 0;
4599
4600 for (vid = 1; vid < VLAN_N_VID; vid++) {
4601 if (!test_bit(vid, rocker_port->vlan_bitmap))
4602 continue;
4603 vlan->flags = 0;
4604 if (rocker_vlan_id_is_internal(htons(vid)))
4605 vlan->flags |= BRIDGE_VLAN_INFO_PVID;
4606 vlan->vid_begin = vlan->vid_end = vid;
4607 err = cb(&vlan->obj);
4608 if (err)
4609 break;
4610 }
4611
4612 return err;
4613 }
4614
rocker_port_obj_dump(struct net_device * dev,struct switchdev_obj * obj,switchdev_obj_dump_cb_t * cb)4615 static int rocker_port_obj_dump(struct net_device *dev,
4616 struct switchdev_obj *obj,
4617 switchdev_obj_dump_cb_t *cb)
4618 {
4619 const struct rocker_port *rocker_port = netdev_priv(dev);
4620 int err = 0;
4621
4622 switch (obj->id) {
4623 case SWITCHDEV_OBJ_ID_PORT_FDB:
4624 err = rocker_port_fdb_dump(rocker_port,
4625 SWITCHDEV_OBJ_PORT_FDB(obj), cb);
4626 break;
4627 case SWITCHDEV_OBJ_ID_PORT_VLAN:
4628 err = rocker_port_vlan_dump(rocker_port,
4629 SWITCHDEV_OBJ_PORT_VLAN(obj), cb);
4630 break;
4631 default:
4632 err = -EOPNOTSUPP;
4633 break;
4634 }
4635
4636 return err;
4637 }
4638
4639 static const struct switchdev_ops rocker_port_switchdev_ops = {
4640 .switchdev_port_attr_get = rocker_port_attr_get,
4641 .switchdev_port_attr_set = rocker_port_attr_set,
4642 .switchdev_port_obj_add = rocker_port_obj_add,
4643 .switchdev_port_obj_del = rocker_port_obj_del,
4644 .switchdev_port_obj_dump = rocker_port_obj_dump,
4645 };
4646
4647 /********************
4648 * ethtool interface
4649 ********************/
4650
rocker_port_get_settings(struct net_device * dev,struct ethtool_cmd * ecmd)4651 static int rocker_port_get_settings(struct net_device *dev,
4652 struct ethtool_cmd *ecmd)
4653 {
4654 struct rocker_port *rocker_port = netdev_priv(dev);
4655
4656 return rocker_cmd_get_port_settings_ethtool(rocker_port, ecmd);
4657 }
4658
rocker_port_set_settings(struct net_device * dev,struct ethtool_cmd * ecmd)4659 static int rocker_port_set_settings(struct net_device *dev,
4660 struct ethtool_cmd *ecmd)
4661 {
4662 struct rocker_port *rocker_port = netdev_priv(dev);
4663
4664 return rocker_cmd_set_port_settings_ethtool(rocker_port, ecmd);
4665 }
4666
rocker_port_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * drvinfo)4667 static void rocker_port_get_drvinfo(struct net_device *dev,
4668 struct ethtool_drvinfo *drvinfo)
4669 {
4670 strlcpy(drvinfo->driver, rocker_driver_name, sizeof(drvinfo->driver));
4671 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
4672 }
4673
4674 static struct rocker_port_stats {
4675 char str[ETH_GSTRING_LEN];
4676 int type;
4677 } rocker_port_stats[] = {
4678 { "rx_packets", ROCKER_TLV_CMD_PORT_STATS_RX_PKTS, },
4679 { "rx_bytes", ROCKER_TLV_CMD_PORT_STATS_RX_BYTES, },
4680 { "rx_dropped", ROCKER_TLV_CMD_PORT_STATS_RX_DROPPED, },
4681 { "rx_errors", ROCKER_TLV_CMD_PORT_STATS_RX_ERRORS, },
4682
4683 { "tx_packets", ROCKER_TLV_CMD_PORT_STATS_TX_PKTS, },
4684 { "tx_bytes", ROCKER_TLV_CMD_PORT_STATS_TX_BYTES, },
4685 { "tx_dropped", ROCKER_TLV_CMD_PORT_STATS_TX_DROPPED, },
4686 { "tx_errors", ROCKER_TLV_CMD_PORT_STATS_TX_ERRORS, },
4687 };
4688
4689 #define ROCKER_PORT_STATS_LEN ARRAY_SIZE(rocker_port_stats)
4690
rocker_port_get_strings(struct net_device * netdev,u32 stringset,u8 * data)4691 static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
4692 u8 *data)
4693 {
4694 u8 *p = data;
4695 int i;
4696
4697 switch (stringset) {
4698 case ETH_SS_STATS:
4699 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4700 memcpy(p, rocker_port_stats[i].str, ETH_GSTRING_LEN);
4701 p += ETH_GSTRING_LEN;
4702 }
4703 break;
4704 }
4705 }
4706
4707 static int
rocker_cmd_get_port_stats_prep(const struct rocker_port * rocker_port,struct rocker_desc_info * desc_info,void * priv)4708 rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
4709 struct rocker_desc_info *desc_info,
4710 void *priv)
4711 {
4712 struct rocker_tlv *cmd_stats;
4713
4714 if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
4715 ROCKER_TLV_CMD_TYPE_GET_PORT_STATS))
4716 return -EMSGSIZE;
4717
4718 cmd_stats = rocker_tlv_nest_start(desc_info, ROCKER_TLV_CMD_INFO);
4719 if (!cmd_stats)
4720 return -EMSGSIZE;
4721
4722 if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_CMD_PORT_STATS_PPORT,
4723 rocker_port->pport))
4724 return -EMSGSIZE;
4725
4726 rocker_tlv_nest_end(desc_info, cmd_stats);
4727
4728 return 0;
4729 }
4730
4731 static int
rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port * rocker_port,const struct rocker_desc_info * desc_info,void * priv)4732 rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
4733 const struct rocker_desc_info *desc_info,
4734 void *priv)
4735 {
4736 const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
4737 const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
4738 const struct rocker_tlv *pattr;
4739 u32 pport;
4740 u64 *data = priv;
4741 int i;
4742
4743 rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
4744
4745 if (!attrs[ROCKER_TLV_CMD_INFO])
4746 return -EIO;
4747
4748 rocker_tlv_parse_nested(stats_attrs, ROCKER_TLV_CMD_PORT_STATS_MAX,
4749 attrs[ROCKER_TLV_CMD_INFO]);
4750
4751 if (!stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT])
4752 return -EIO;
4753
4754 pport = rocker_tlv_get_u32(stats_attrs[ROCKER_TLV_CMD_PORT_STATS_PPORT]);
4755 if (pport != rocker_port->pport)
4756 return -EIO;
4757
4758 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); i++) {
4759 pattr = stats_attrs[rocker_port_stats[i].type];
4760 if (!pattr)
4761 continue;
4762
4763 data[i] = rocker_tlv_get_u64(pattr);
4764 }
4765
4766 return 0;
4767 }
4768
rocker_cmd_get_port_stats_ethtool(struct rocker_port * rocker_port,void * priv)4769 static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
4770 void *priv)
4771 {
4772 return rocker_cmd_exec(rocker_port, NULL, 0,
4773 rocker_cmd_get_port_stats_prep, NULL,
4774 rocker_cmd_get_port_stats_ethtool_proc,
4775 priv);
4776 }
4777
rocker_port_get_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)4778 static void rocker_port_get_stats(struct net_device *dev,
4779 struct ethtool_stats *stats, u64 *data)
4780 {
4781 struct rocker_port *rocker_port = netdev_priv(dev);
4782
4783 if (rocker_cmd_get_port_stats_ethtool(rocker_port, data) != 0) {
4784 int i;
4785
4786 for (i = 0; i < ARRAY_SIZE(rocker_port_stats); ++i)
4787 data[i] = 0;
4788 }
4789 }
4790
rocker_port_get_sset_count(struct net_device * netdev,int sset)4791 static int rocker_port_get_sset_count(struct net_device *netdev, int sset)
4792 {
4793 switch (sset) {
4794 case ETH_SS_STATS:
4795 return ROCKER_PORT_STATS_LEN;
4796 default:
4797 return -EOPNOTSUPP;
4798 }
4799 }
4800
4801 static const struct ethtool_ops rocker_port_ethtool_ops = {
4802 .get_settings = rocker_port_get_settings,
4803 .set_settings = rocker_port_set_settings,
4804 .get_drvinfo = rocker_port_get_drvinfo,
4805 .get_link = ethtool_op_get_link,
4806 .get_strings = rocker_port_get_strings,
4807 .get_ethtool_stats = rocker_port_get_stats,
4808 .get_sset_count = rocker_port_get_sset_count,
4809 };
4810
4811 /*****************
4812 * NAPI interface
4813 *****************/
4814
rocker_port_napi_tx_get(struct napi_struct * napi)4815 static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
4816 {
4817 return container_of(napi, struct rocker_port, napi_tx);
4818 }
4819
rocker_port_poll_tx(struct napi_struct * napi,int budget)4820 static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
4821 {
4822 struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
4823 const struct rocker *rocker = rocker_port->rocker;
4824 const struct rocker_desc_info *desc_info;
4825 u32 credits = 0;
4826 int err;
4827
4828 /* Cleanup tx descriptors */
4829 while ((desc_info = rocker_desc_tail_get(&rocker_port->tx_ring))) {
4830 struct sk_buff *skb;
4831
4832 err = rocker_desc_err(desc_info);
4833 if (err && net_ratelimit())
4834 netdev_err(rocker_port->dev, "tx desc received with err %d\n",
4835 err);
4836 rocker_tx_desc_frags_unmap(rocker_port, desc_info);
4837
4838 skb = rocker_desc_cookie_ptr_get(desc_info);
4839 if (err == 0) {
4840 rocker_port->dev->stats.tx_packets++;
4841 rocker_port->dev->stats.tx_bytes += skb->len;
4842 } else {
4843 rocker_port->dev->stats.tx_errors++;
4844 }
4845
4846 dev_kfree_skb_any(skb);
4847 credits++;
4848 }
4849
4850 if (credits && netif_queue_stopped(rocker_port->dev))
4851 netif_wake_queue(rocker_port->dev);
4852
4853 napi_complete(napi);
4854 rocker_dma_ring_credits_set(rocker, &rocker_port->tx_ring, credits);
4855
4856 return 0;
4857 }
4858
rocker_port_rx_proc(const struct rocker * rocker,const struct rocker_port * rocker_port,struct rocker_desc_info * desc_info)4859 static int rocker_port_rx_proc(const struct rocker *rocker,
4860 const struct rocker_port *rocker_port,
4861 struct rocker_desc_info *desc_info)
4862 {
4863 const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
4864 struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
4865 size_t rx_len;
4866 u16 rx_flags = 0;
4867
4868 if (!skb)
4869 return -ENOENT;
4870
4871 rocker_tlv_parse_desc(attrs, ROCKER_TLV_RX_MAX, desc_info);
4872 if (!attrs[ROCKER_TLV_RX_FRAG_LEN])
4873 return -EINVAL;
4874 if (attrs[ROCKER_TLV_RX_FLAGS])
4875 rx_flags = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FLAGS]);
4876
4877 rocker_dma_rx_ring_skb_unmap(rocker, attrs);
4878
4879 rx_len = rocker_tlv_get_u16(attrs[ROCKER_TLV_RX_FRAG_LEN]);
4880 skb_put(skb, rx_len);
4881 skb->protocol = eth_type_trans(skb, rocker_port->dev);
4882
4883 if (rx_flags & ROCKER_RX_FLAGS_FWD_OFFLOAD)
4884 skb->offload_fwd_mark = rocker_port->dev->offload_fwd_mark;
4885
4886 rocker_port->dev->stats.rx_packets++;
4887 rocker_port->dev->stats.rx_bytes += skb->len;
4888
4889 netif_receive_skb(skb);
4890
4891 return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
4892 }
4893
rocker_port_napi_rx_get(struct napi_struct * napi)4894 static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
4895 {
4896 return container_of(napi, struct rocker_port, napi_rx);
4897 }
4898
rocker_port_poll_rx(struct napi_struct * napi,int budget)4899 static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
4900 {
4901 struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
4902 const struct rocker *rocker = rocker_port->rocker;
4903 struct rocker_desc_info *desc_info;
4904 u32 credits = 0;
4905 int err;
4906
4907 /* Process rx descriptors */
4908 while (credits < budget &&
4909 (desc_info = rocker_desc_tail_get(&rocker_port->rx_ring))) {
4910 err = rocker_desc_err(desc_info);
4911 if (err) {
4912 if (net_ratelimit())
4913 netdev_err(rocker_port->dev, "rx desc received with err %d\n",
4914 err);
4915 } else {
4916 err = rocker_port_rx_proc(rocker, rocker_port,
4917 desc_info);
4918 if (err && net_ratelimit())
4919 netdev_err(rocker_port->dev, "rx processing failed with err %d\n",
4920 err);
4921 }
4922 if (err)
4923 rocker_port->dev->stats.rx_errors++;
4924
4925 rocker_desc_gen_clear(desc_info);
4926 rocker_desc_head_set(rocker, &rocker_port->rx_ring, desc_info);
4927 credits++;
4928 }
4929
4930 if (credits < budget)
4931 napi_complete(napi);
4932
4933 rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
4934
4935 return credits;
4936 }
4937
4938 /*****************
4939 * PCI driver ops
4940 *****************/
4941
rocker_carrier_init(const struct rocker_port * rocker_port)4942 static void rocker_carrier_init(const struct rocker_port *rocker_port)
4943 {
4944 const struct rocker *rocker = rocker_port->rocker;
4945 u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
4946 bool link_up;
4947
4948 link_up = link_status & (1 << rocker_port->pport);
4949 if (link_up)
4950 netif_carrier_on(rocker_port->dev);
4951 else
4952 netif_carrier_off(rocker_port->dev);
4953 }
4954
rocker_remove_ports(const struct rocker * rocker)4955 static void rocker_remove_ports(const struct rocker *rocker)
4956 {
4957 struct rocker_port *rocker_port;
4958 int i;
4959
4960 for (i = 0; i < rocker->port_count; i++) {
4961 rocker_port = rocker->ports[i];
4962 if (!rocker_port)
4963 continue;
4964 rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
4965 unregister_netdev(rocker_port->dev);
4966 free_netdev(rocker_port->dev);
4967 }
4968 kfree(rocker->ports);
4969 }
4970
rocker_port_dev_addr_init(struct rocker_port * rocker_port)4971 static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
4972 {
4973 const struct rocker *rocker = rocker_port->rocker;
4974 const struct pci_dev *pdev = rocker->pdev;
4975 int err;
4976
4977 err = rocker_cmd_get_port_settings_macaddr(rocker_port,
4978 rocker_port->dev->dev_addr);
4979 if (err) {
4980 dev_warn(&pdev->dev, "failed to get mac address, using random\n");
4981 eth_hw_addr_random(rocker_port->dev);
4982 }
4983 }
4984
rocker_probe_port(struct rocker * rocker,unsigned int port_number)4985 static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
4986 {
4987 const struct pci_dev *pdev = rocker->pdev;
4988 struct rocker_port *rocker_port;
4989 struct net_device *dev;
4990 u16 untagged_vid = 0;
4991 int err;
4992
4993 dev = alloc_etherdev(sizeof(struct rocker_port));
4994 if (!dev)
4995 return -ENOMEM;
4996 rocker_port = netdev_priv(dev);
4997 rocker_port->dev = dev;
4998 rocker_port->rocker = rocker;
4999 rocker_port->port_number = port_number;
5000 rocker_port->pport = port_number + 1;
5001 rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
5002 rocker_port->ageing_time = BR_DEFAULT_AGEING_TIME;
5003
5004 rocker_port_dev_addr_init(rocker_port);
5005 dev->netdev_ops = &rocker_port_netdev_ops;
5006 dev->ethtool_ops = &rocker_port_ethtool_ops;
5007 dev->switchdev_ops = &rocker_port_switchdev_ops;
5008 netif_napi_add(dev, &rocker_port->napi_tx, rocker_port_poll_tx,
5009 NAPI_POLL_WEIGHT);
5010 netif_napi_add(dev, &rocker_port->napi_rx, rocker_port_poll_rx,
5011 NAPI_POLL_WEIGHT);
5012 rocker_carrier_init(rocker_port);
5013
5014 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_SG;
5015
5016 err = register_netdev(dev);
5017 if (err) {
5018 dev_err(&pdev->dev, "register_netdev failed\n");
5019 goto err_register_netdev;
5020 }
5021 rocker->ports[port_number] = rocker_port;
5022
5023 switchdev_port_fwd_mark_set(rocker_port->dev, NULL, false);
5024
5025 rocker_port_set_learning(rocker_port, NULL);
5026
5027 err = rocker_port_ig_tbl(rocker_port, NULL, 0);
5028 if (err) {
5029 netdev_err(rocker_port->dev, "install ig port table failed\n");
5030 goto err_port_ig_tbl;
5031 }
5032
5033 rocker_port->internal_vlan_id =
5034 rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
5035
5036 err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5037 if (err) {
5038 netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
5039 goto err_untagged_vlan;
5040 }
5041
5042 return 0;
5043
5044 err_untagged_vlan:
5045 rocker_port_ig_tbl(rocker_port, NULL, ROCKER_OP_FLAG_REMOVE);
5046 err_port_ig_tbl:
5047 rocker->ports[port_number] = NULL;
5048 unregister_netdev(dev);
5049 err_register_netdev:
5050 free_netdev(dev);
5051 return err;
5052 }
5053
rocker_probe_ports(struct rocker * rocker)5054 static int rocker_probe_ports(struct rocker *rocker)
5055 {
5056 int i;
5057 size_t alloc_size;
5058 int err;
5059
5060 alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
5061 rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
5062 if (!rocker->ports)
5063 return -ENOMEM;
5064 for (i = 0; i < rocker->port_count; i++) {
5065 err = rocker_probe_port(rocker, i);
5066 if (err)
5067 goto remove_ports;
5068 }
5069 return 0;
5070
5071 remove_ports:
5072 rocker_remove_ports(rocker);
5073 return err;
5074 }
5075
rocker_msix_init(struct rocker * rocker)5076 static int rocker_msix_init(struct rocker *rocker)
5077 {
5078 struct pci_dev *pdev = rocker->pdev;
5079 int msix_entries;
5080 int i;
5081 int err;
5082
5083 msix_entries = pci_msix_vec_count(pdev);
5084 if (msix_entries < 0)
5085 return msix_entries;
5086
5087 if (msix_entries != ROCKER_MSIX_VEC_COUNT(rocker->port_count))
5088 return -EINVAL;
5089
5090 rocker->msix_entries = kmalloc_array(msix_entries,
5091 sizeof(struct msix_entry),
5092 GFP_KERNEL);
5093 if (!rocker->msix_entries)
5094 return -ENOMEM;
5095
5096 for (i = 0; i < msix_entries; i++)
5097 rocker->msix_entries[i].entry = i;
5098
5099 err = pci_enable_msix_exact(pdev, rocker->msix_entries, msix_entries);
5100 if (err < 0)
5101 goto err_enable_msix;
5102
5103 return 0;
5104
5105 err_enable_msix:
5106 kfree(rocker->msix_entries);
5107 return err;
5108 }
5109
rocker_msix_fini(const struct rocker * rocker)5110 static void rocker_msix_fini(const struct rocker *rocker)
5111 {
5112 pci_disable_msix(rocker->pdev);
5113 kfree(rocker->msix_entries);
5114 }
5115
rocker_probe(struct pci_dev * pdev,const struct pci_device_id * id)5116 static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id)
5117 {
5118 struct rocker *rocker;
5119 int err;
5120
5121 rocker = kzalloc(sizeof(*rocker), GFP_KERNEL);
5122 if (!rocker)
5123 return -ENOMEM;
5124
5125 err = pci_enable_device(pdev);
5126 if (err) {
5127 dev_err(&pdev->dev, "pci_enable_device failed\n");
5128 goto err_pci_enable_device;
5129 }
5130
5131 err = pci_request_regions(pdev, rocker_driver_name);
5132 if (err) {
5133 dev_err(&pdev->dev, "pci_request_regions failed\n");
5134 goto err_pci_request_regions;
5135 }
5136
5137 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
5138 if (!err) {
5139 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
5140 if (err) {
5141 dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
5142 goto err_pci_set_dma_mask;
5143 }
5144 } else {
5145 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5146 if (err) {
5147 dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
5148 goto err_pci_set_dma_mask;
5149 }
5150 }
5151
5152 if (pci_resource_len(pdev, 0) < ROCKER_PCI_BAR0_SIZE) {
5153 dev_err(&pdev->dev, "invalid PCI region size\n");
5154 err = -EINVAL;
5155 goto err_pci_resource_len_check;
5156 }
5157
5158 rocker->hw_addr = ioremap(pci_resource_start(pdev, 0),
5159 pci_resource_len(pdev, 0));
5160 if (!rocker->hw_addr) {
5161 dev_err(&pdev->dev, "ioremap failed\n");
5162 err = -EIO;
5163 goto err_ioremap;
5164 }
5165 pci_set_master(pdev);
5166
5167 rocker->pdev = pdev;
5168 pci_set_drvdata(pdev, rocker);
5169
5170 rocker->port_count = rocker_read32(rocker, PORT_PHYS_COUNT);
5171
5172 err = rocker_msix_init(rocker);
5173 if (err) {
5174 dev_err(&pdev->dev, "MSI-X init failed\n");
5175 goto err_msix_init;
5176 }
5177
5178 err = rocker_basic_hw_test(rocker);
5179 if (err) {
5180 dev_err(&pdev->dev, "basic hw test failed\n");
5181 goto err_basic_hw_test;
5182 }
5183
5184 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5185
5186 err = rocker_dma_rings_init(rocker);
5187 if (err)
5188 goto err_dma_rings_init;
5189
5190 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD),
5191 rocker_cmd_irq_handler, 0,
5192 rocker_driver_name, rocker);
5193 if (err) {
5194 dev_err(&pdev->dev, "cannot assign cmd irq\n");
5195 goto err_request_cmd_irq;
5196 }
5197
5198 err = request_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT),
5199 rocker_event_irq_handler, 0,
5200 rocker_driver_name, rocker);
5201 if (err) {
5202 dev_err(&pdev->dev, "cannot assign event irq\n");
5203 goto err_request_event_irq;
5204 }
5205
5206 rocker->hw.id = rocker_read64(rocker, SWITCH_ID);
5207
5208 err = rocker_init_tbls(rocker);
5209 if (err) {
5210 dev_err(&pdev->dev, "cannot init rocker tables\n");
5211 goto err_init_tbls;
5212 }
5213
5214 rocker->ageing_time = BR_DEFAULT_AGEING_TIME;
5215 setup_timer(&rocker->fdb_cleanup_timer, rocker_fdb_cleanup,
5216 (unsigned long) rocker);
5217 mod_timer(&rocker->fdb_cleanup_timer, jiffies);
5218
5219 rocker->ageing_time = BR_DEFAULT_AGEING_TIME;
5220
5221 err = rocker_probe_ports(rocker);
5222 if (err) {
5223 dev_err(&pdev->dev, "failed to probe ports\n");
5224 goto err_probe_ports;
5225 }
5226
5227 dev_info(&pdev->dev, "Rocker switch with id %*phN\n",
5228 (int)sizeof(rocker->hw.id), &rocker->hw.id);
5229
5230 return 0;
5231
5232 err_probe_ports:
5233 del_timer_sync(&rocker->fdb_cleanup_timer);
5234 rocker_free_tbls(rocker);
5235 err_init_tbls:
5236 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5237 err_request_event_irq:
5238 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5239 err_request_cmd_irq:
5240 rocker_dma_rings_fini(rocker);
5241 err_dma_rings_init:
5242 err_basic_hw_test:
5243 rocker_msix_fini(rocker);
5244 err_msix_init:
5245 iounmap(rocker->hw_addr);
5246 err_ioremap:
5247 err_pci_resource_len_check:
5248 err_pci_set_dma_mask:
5249 pci_release_regions(pdev);
5250 err_pci_request_regions:
5251 pci_disable_device(pdev);
5252 err_pci_enable_device:
5253 kfree(rocker);
5254 return err;
5255 }
5256
rocker_remove(struct pci_dev * pdev)5257 static void rocker_remove(struct pci_dev *pdev)
5258 {
5259 struct rocker *rocker = pci_get_drvdata(pdev);
5260
5261 del_timer_sync(&rocker->fdb_cleanup_timer);
5262 rocker_free_tbls(rocker);
5263 rocker_write32(rocker, CONTROL, ROCKER_CONTROL_RESET);
5264 rocker_remove_ports(rocker);
5265 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_EVENT), rocker);
5266 free_irq(rocker_msix_vector(rocker, ROCKER_MSIX_VEC_CMD), rocker);
5267 rocker_dma_rings_fini(rocker);
5268 rocker_msix_fini(rocker);
5269 iounmap(rocker->hw_addr);
5270 pci_release_regions(rocker->pdev);
5271 pci_disable_device(rocker->pdev);
5272 kfree(rocker);
5273 }
5274
5275 static struct pci_driver rocker_pci_driver = {
5276 .name = rocker_driver_name,
5277 .id_table = rocker_pci_id_table,
5278 .probe = rocker_probe,
5279 .remove = rocker_remove,
5280 };
5281
5282 /************************************
5283 * Net device notifier event handler
5284 ************************************/
5285
rocker_port_dev_check(const struct net_device * dev)5286 static bool rocker_port_dev_check(const struct net_device *dev)
5287 {
5288 return dev->netdev_ops == &rocker_port_netdev_ops;
5289 }
5290
rocker_port_bridge_join(struct rocker_port * rocker_port,struct net_device * bridge)5291 static int rocker_port_bridge_join(struct rocker_port *rocker_port,
5292 struct net_device *bridge)
5293 {
5294 u16 untagged_vid = 0;
5295 int err;
5296
5297 /* Port is joining bridge, so the internal VLAN for the
5298 * port is going to change to the bridge internal VLAN.
5299 * Let's remove untagged VLAN (vid=0) from port and
5300 * re-add once internal VLAN has changed.
5301 */
5302
5303 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5304 if (err)
5305 return err;
5306
5307 rocker_port_internal_vlan_id_put(rocker_port,
5308 rocker_port->dev->ifindex);
5309 rocker_port->internal_vlan_id =
5310 rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
5311
5312 rocker_port->bridge_dev = bridge;
5313 switchdev_port_fwd_mark_set(rocker_port->dev, bridge, true);
5314
5315 return rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5316 }
5317
rocker_port_bridge_leave(struct rocker_port * rocker_port)5318 static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
5319 {
5320 u16 untagged_vid = 0;
5321 int err;
5322
5323 err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
5324 if (err)
5325 return err;
5326
5327 rocker_port_internal_vlan_id_put(rocker_port,
5328 rocker_port->bridge_dev->ifindex);
5329 rocker_port->internal_vlan_id =
5330 rocker_port_internal_vlan_id_get(rocker_port,
5331 rocker_port->dev->ifindex);
5332
5333 switchdev_port_fwd_mark_set(rocker_port->dev, rocker_port->bridge_dev,
5334 false);
5335 rocker_port->bridge_dev = NULL;
5336
5337 err = rocker_port_vlan_add(rocker_port, NULL, untagged_vid, 0);
5338 if (err)
5339 return err;
5340
5341 if (rocker_port->dev->flags & IFF_UP)
5342 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
5343
5344 return err;
5345 }
5346
5347
rocker_port_ovs_changed(struct rocker_port * rocker_port,struct net_device * master)5348 static int rocker_port_ovs_changed(struct rocker_port *rocker_port,
5349 struct net_device *master)
5350 {
5351 int err;
5352
5353 rocker_port->bridge_dev = master;
5354
5355 err = rocker_port_fwd_disable(rocker_port, NULL, 0);
5356 if (err)
5357 return err;
5358 err = rocker_port_fwd_enable(rocker_port, NULL, 0);
5359
5360 return err;
5361 }
5362
rocker_port_master_linked(struct rocker_port * rocker_port,struct net_device * master)5363 static int rocker_port_master_linked(struct rocker_port *rocker_port,
5364 struct net_device *master)
5365 {
5366 int err = 0;
5367
5368 if (netif_is_bridge_master(master))
5369 err = rocker_port_bridge_join(rocker_port, master);
5370 else if (netif_is_ovs_master(master))
5371 err = rocker_port_ovs_changed(rocker_port, master);
5372 return err;
5373 }
5374
rocker_port_master_unlinked(struct rocker_port * rocker_port)5375 static int rocker_port_master_unlinked(struct rocker_port *rocker_port)
5376 {
5377 int err = 0;
5378
5379 if (rocker_port_is_bridged(rocker_port))
5380 err = rocker_port_bridge_leave(rocker_port);
5381 else if (rocker_port_is_ovsed(rocker_port))
5382 err = rocker_port_ovs_changed(rocker_port, NULL);
5383 return err;
5384 }
5385
rocker_netdevice_event(struct notifier_block * unused,unsigned long event,void * ptr)5386 static int rocker_netdevice_event(struct notifier_block *unused,
5387 unsigned long event, void *ptr)
5388 {
5389 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5390 struct netdev_notifier_changeupper_info *info;
5391 struct rocker_port *rocker_port;
5392 int err;
5393
5394 if (!rocker_port_dev_check(dev))
5395 return NOTIFY_DONE;
5396
5397 switch (event) {
5398 case NETDEV_CHANGEUPPER:
5399 info = ptr;
5400 if (!info->master)
5401 goto out;
5402 rocker_port = netdev_priv(dev);
5403 if (info->linking) {
5404 err = rocker_port_master_linked(rocker_port,
5405 info->upper_dev);
5406 if (err)
5407 netdev_warn(dev, "failed to reflect master linked (err %d)\n",
5408 err);
5409 } else {
5410 err = rocker_port_master_unlinked(rocker_port);
5411 if (err)
5412 netdev_warn(dev, "failed to reflect master unlinked (err %d)\n",
5413 err);
5414 }
5415 break;
5416 }
5417 out:
5418 return NOTIFY_DONE;
5419 }
5420
5421 static struct notifier_block rocker_netdevice_nb __read_mostly = {
5422 .notifier_call = rocker_netdevice_event,
5423 };
5424
5425 /************************************
5426 * Net event notifier event handler
5427 ************************************/
5428
rocker_neigh_update(struct net_device * dev,struct neighbour * n)5429 static int rocker_neigh_update(struct net_device *dev, struct neighbour *n)
5430 {
5431 struct rocker_port *rocker_port = netdev_priv(dev);
5432 int flags = (n->nud_state & NUD_VALID ? 0 : ROCKER_OP_FLAG_REMOVE) |
5433 ROCKER_OP_FLAG_NOWAIT;
5434 __be32 ip_addr = *(__be32 *)n->primary_key;
5435
5436 return rocker_port_ipv4_neigh(rocker_port, NULL, flags, ip_addr, n->ha);
5437 }
5438
rocker_netevent_event(struct notifier_block * unused,unsigned long event,void * ptr)5439 static int rocker_netevent_event(struct notifier_block *unused,
5440 unsigned long event, void *ptr)
5441 {
5442 struct net_device *dev;
5443 struct neighbour *n = ptr;
5444 int err;
5445
5446 switch (event) {
5447 case NETEVENT_NEIGH_UPDATE:
5448 if (n->tbl != &arp_tbl)
5449 return NOTIFY_DONE;
5450 dev = n->dev;
5451 if (!rocker_port_dev_check(dev))
5452 return NOTIFY_DONE;
5453 err = rocker_neigh_update(dev, n);
5454 if (err)
5455 netdev_warn(dev,
5456 "failed to handle neigh update (err %d)\n",
5457 err);
5458 break;
5459 }
5460
5461 return NOTIFY_DONE;
5462 }
5463
5464 static struct notifier_block rocker_netevent_nb __read_mostly = {
5465 .notifier_call = rocker_netevent_event,
5466 };
5467
5468 /***********************
5469 * Module init and exit
5470 ***********************/
5471
rocker_module_init(void)5472 static int __init rocker_module_init(void)
5473 {
5474 int err;
5475
5476 register_netdevice_notifier(&rocker_netdevice_nb);
5477 register_netevent_notifier(&rocker_netevent_nb);
5478 err = pci_register_driver(&rocker_pci_driver);
5479 if (err)
5480 goto err_pci_register_driver;
5481 return 0;
5482
5483 err_pci_register_driver:
5484 unregister_netevent_notifier(&rocker_netevent_nb);
5485 unregister_netdevice_notifier(&rocker_netdevice_nb);
5486 return err;
5487 }
5488
rocker_module_exit(void)5489 static void __exit rocker_module_exit(void)
5490 {
5491 unregister_netevent_notifier(&rocker_netevent_nb);
5492 unregister_netdevice_notifier(&rocker_netdevice_nb);
5493 pci_unregister_driver(&rocker_pci_driver);
5494 }
5495
5496 module_init(rocker_module_init);
5497 module_exit(rocker_module_exit);
5498
5499 MODULE_LICENSE("GPL v2");
5500 MODULE_AUTHOR("Jiri Pirko <jiri@resnulli.us>");
5501 MODULE_AUTHOR("Scott Feldman <sfeldma@gmail.com>");
5502 MODULE_DESCRIPTION("Rocker switch device driver");
5503 MODULE_DEVICE_TABLE(pci, rocker_pci_id_table);
5504