1/********************************************************************** 2 * Author: Cavium Networks 3 * 4 * Contact: support@caviumnetworks.com 5 * This file is part of the OCTEON SDK 6 * 7 * Copyright (c) 2003-2010 Cavium Networks 8 * 9 * This file is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License, Version 2, as 11 * published by the Free Software Foundation. 12 * 13 * This file is distributed in the hope that it will be useful, but 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 * NONINFRINGEMENT. See the GNU General Public License for more 17 * details. 18 * 19 * You should have received a copy of the GNU General Public License 20 * along with this file; if not, write to the Free Software 21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 22 * or visit http://www.gnu.org/licenses/. 23 * 24 * This file may also be available under a different license from Cavium. 25 * Contact Cavium Networks for more information 26**********************************************************************/ 27#include <linux/module.h> 28#include <linux/kernel.h> 29#include <linux/cache.h> 30#include <linux/cpumask.h> 31#include <linux/netdevice.h> 32#include <linux/etherdevice.h> 33#include <linux/ip.h> 34#include <linux/string.h> 35#include <linux/prefetch.h> 36#include <linux/ratelimit.h> 37#include <linux/smp.h> 38#include <linux/interrupt.h> 39#include <net/dst.h> 40#ifdef CONFIG_XFRM 41#include <linux/xfrm.h> 42#include <net/xfrm.h> 43#endif /* CONFIG_XFRM */ 44 45#include <linux/atomic.h> 46 47#include <asm/octeon/octeon.h> 48 49#include "ethernet-defines.h" 50#include "ethernet-mem.h" 51#include "ethernet-rx.h" 52#include "octeon-ethernet.h" 53#include "ethernet-util.h" 54 55#include <asm/octeon/cvmx-helper.h> 56#include <asm/octeon/cvmx-wqe.h> 57#include <asm/octeon/cvmx-fau.h> 58#include <asm/octeon/cvmx-pow.h> 59#include <asm/octeon/cvmx-pip.h> 60#include <asm/octeon/cvmx-scratch.h> 61 62#include <asm/octeon/cvmx-gmxx-defs.h> 63 64static struct napi_struct cvm_oct_napi; 65 66/** 67 * cvm_oct_do_interrupt - interrupt handler. 68 * 69 * The interrupt occurs whenever the POW has packets in our group. 70 * 71 */ 72static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id) 73{ 74 /* Disable the IRQ and start napi_poll. */ 75 disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group); 76 napi_schedule(&cvm_oct_napi); 77 78 return IRQ_HANDLED; 79} 80 81/** 82 * cvm_oct_check_rcv_error - process receive errors 83 * @work: Work queue entry pointing to the packet. 84 * 85 * Returns Non-zero if the packet can be dropped, zero otherwise. 86 */ 87static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work) 88{ 89 if ((work->word2.snoip.err_code == 10) && (work->len <= 64)) { 90 /* 91 * Ignore length errors on min size packets. Some 92 * equipment incorrectly pads packets to 64+4FCS 93 * instead of 60+4FCS. Note these packets still get 94 * counted as frame errors. 95 */ 96 } else 97 if (USE_10MBPS_PREAMBLE_WORKAROUND 98 && ((work->word2.snoip.err_code == 5) 99 || (work->word2.snoip.err_code == 7))) { 100 101 /* 102 * We received a packet with either an alignment error 103 * or a FCS error. This may be signalling that we are 104 * running 10Mbps with GMXX_RXX_FRM_CTL[PRE_CHK] 105 * off. If this is the case we need to parse the 106 * packet to determine if we can remove a non spec 107 * preamble and generate a correct packet. 108 */ 109 int interface = cvmx_helper_get_interface_num(work->ipprt); 110 int index = cvmx_helper_get_interface_index_num(work->ipprt); 111 union cvmx_gmxx_rxx_frm_ctl gmxx_rxx_frm_ctl; 112 113 gmxx_rxx_frm_ctl.u64 = 114 cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL(index, interface)); 115 if (gmxx_rxx_frm_ctl.s.pre_chk == 0) { 116 117 uint8_t *ptr = 118 cvmx_phys_to_ptr(work->packet_ptr.s.addr); 119 int i = 0; 120 121 while (i < work->len - 1) { 122 if (*ptr != 0x55) 123 break; 124 ptr++; 125 i++; 126 } 127 128 if (*ptr == 0xd5) { 129 /* 130 printk_ratelimited("Port %d received 0xd5 preamble\n", 131 work->ipprt); 132 */ 133 work->packet_ptr.s.addr += i + 1; 134 work->len -= i + 5; 135 } else if ((*ptr & 0xf) == 0xd) { 136 /* 137 printk_ratelimited("Port %d received 0x?d preamble\n", 138 work->ipprt); 139 */ 140 work->packet_ptr.s.addr += i; 141 work->len -= i + 4; 142 for (i = 0; i < work->len; i++) { 143 *ptr = 144 ((*ptr & 0xf0) >> 4) | 145 ((*(ptr + 1) & 0xf) << 4); 146 ptr++; 147 } 148 } else { 149 printk_ratelimited("Port %d unknown preamble, packet dropped\n", 150 work->ipprt); 151 /* 152 cvmx_helper_dump_packet(work); 153 */ 154 cvm_oct_free_work(work); 155 return 1; 156 } 157 } 158 } else { 159 printk_ratelimited("Port %d receive error code %d, packet dropped\n", 160 work->ipprt, work->word2.snoip.err_code); 161 cvm_oct_free_work(work); 162 return 1; 163 } 164 165 return 0; 166} 167 168/** 169 * cvm_oct_napi_poll - the NAPI poll function. 170 * @napi: The NAPI instance, or null if called from cvm_oct_poll_controller 171 * @budget: Maximum number of packets to receive. 172 * 173 * Returns the number of packets processed. 174 */ 175static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) 176{ 177 const int coreid = cvmx_get_core_num(); 178 uint64_t old_group_mask; 179 uint64_t old_scratch; 180 int rx_count = 0; 181 int did_work_request = 0; 182 int packet_not_copied; 183 184 /* Prefetch cvm_oct_device since we know we need it soon */ 185 prefetch(cvm_oct_device); 186 187 if (USE_ASYNC_IOBDMA) { 188 /* Save scratch in case userspace is using it */ 189 CVMX_SYNCIOBDMA; 190 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH); 191 } 192 193 /* Only allow work for our group (and preserve priorities) */ 194 old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid)); 195 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), 196 (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group); 197 198 if (USE_ASYNC_IOBDMA) { 199 cvmx_pow_work_request_async(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT); 200 did_work_request = 1; 201 } 202 203 while (rx_count < budget) { 204 struct sk_buff *skb = NULL; 205 struct sk_buff **pskb = NULL; 206 int skb_in_hw; 207 cvmx_wqe_t *work; 208 209 if (USE_ASYNC_IOBDMA && did_work_request) 210 work = cvmx_pow_work_response_async(CVMX_SCR_SCRATCH); 211 else 212 work = cvmx_pow_work_request_sync(CVMX_POW_NO_WAIT); 213 214 prefetch(work); 215 did_work_request = 0; 216 if (work == NULL) { 217 union cvmx_pow_wq_int wq_int; 218 219 wq_int.u64 = 0; 220 wq_int.s.iq_dis = 1 << pow_receive_group; 221 wq_int.s.wq_int = 1 << pow_receive_group; 222 cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64); 223 break; 224 } 225 pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - 226 sizeof(void *)); 227 prefetch(pskb); 228 229 if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) { 230 cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, 231 CVMX_POW_NO_WAIT); 232 did_work_request = 1; 233 } 234 rx_count++; 235 236 skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1; 237 if (likely(skb_in_hw)) { 238 skb = *pskb; 239 prefetch(&skb->head); 240 prefetch(&skb->len); 241 } 242 prefetch(cvm_oct_device[work->ipprt]); 243 244 /* Immediately throw away all packets with receive errors */ 245 if (unlikely(work->word2.snoip.rcv_error)) { 246 if (cvm_oct_check_rcv_error(work)) 247 continue; 248 } 249 250 /* 251 * We can only use the zero copy path if skbuffs are 252 * in the FPA pool and the packet fits in a single 253 * buffer. 254 */ 255 if (likely(skb_in_hw)) { 256 skb->data = skb->head + work->packet_ptr.s.addr - 257 cvmx_ptr_to_phys(skb->head); 258 prefetch(skb->data); 259 skb->len = work->len; 260 skb_set_tail_pointer(skb, skb->len); 261 packet_not_copied = 1; 262 } else { 263 /* 264 * We have to copy the packet. First allocate 265 * an skbuff for it. 266 */ 267 skb = dev_alloc_skb(work->len); 268 if (!skb) { 269 cvm_oct_free_work(work); 270 continue; 271 } 272 273 /* 274 * Check if we've received a packet that was 275 * entirely stored in the work entry. 276 */ 277 if (unlikely(work->word2.s.bufs == 0)) { 278 uint8_t *ptr = work->packet_data; 279 280 if (likely(!work->word2.s.not_IP)) { 281 /* 282 * The beginning of the packet 283 * moves for IP packets. 284 */ 285 if (work->word2.s.is_v6) 286 ptr += 2; 287 else 288 ptr += 6; 289 } 290 memcpy(skb_put(skb, work->len), ptr, work->len); 291 /* No packet buffers to free */ 292 } else { 293 int segments = work->word2.s.bufs; 294 union cvmx_buf_ptr segment_ptr = 295 work->packet_ptr; 296 int len = work->len; 297 298 while (segments--) { 299 union cvmx_buf_ptr next_ptr = 300 *(union cvmx_buf_ptr *)cvmx_phys_to_ptr(segment_ptr.s.addr - 8); 301 302 /* 303 * Octeon Errata PKI-100: The segment size is 304 * wrong. Until it is fixed, calculate the 305 * segment size based on the packet pool 306 * buffer size. When it is fixed, the 307 * following line should be replaced with this 308 * one: int segment_size = 309 * segment_ptr.s.size; 310 */ 311 int segment_size = 312 CVMX_FPA_PACKET_POOL_SIZE - 313 (segment_ptr.s.addr - 314 (((segment_ptr.s.addr >> 7) - 315 segment_ptr.s.back) << 7)); 316 /* 317 * Don't copy more than what 318 * is left in the packet. 319 */ 320 if (segment_size > len) 321 segment_size = len; 322 /* Copy the data into the packet */ 323 memcpy(skb_put(skb, segment_size), 324 cvmx_phys_to_ptr(segment_ptr.s.addr), 325 segment_size); 326 len -= segment_size; 327 segment_ptr = next_ptr; 328 } 329 } 330 packet_not_copied = 0; 331 } 332 333 if (likely((work->ipprt < TOTAL_NUMBER_OF_PORTS) && 334 cvm_oct_device[work->ipprt])) { 335 struct net_device *dev = cvm_oct_device[work->ipprt]; 336 struct octeon_ethernet *priv = netdev_priv(dev); 337 338 /* 339 * Only accept packets for devices that are 340 * currently up. 341 */ 342 if (likely(dev->flags & IFF_UP)) { 343 skb->protocol = eth_type_trans(skb, dev); 344 skb->dev = dev; 345 346 if (unlikely(work->word2.s.not_IP || 347 work->word2.s.IP_exc || 348 work->word2.s.L4_error || 349 !work->word2.s.tcp_or_udp)) 350 skb->ip_summed = CHECKSUM_NONE; 351 else 352 skb->ip_summed = CHECKSUM_UNNECESSARY; 353 354 /* Increment RX stats for virtual ports */ 355 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) { 356#ifdef CONFIG_64BIT 357 atomic64_add(1, 358 (atomic64_t *)&priv->stats.rx_packets); 359 atomic64_add(skb->len, 360 (atomic64_t *)&priv->stats.rx_bytes); 361#else 362 atomic_add(1, 363 (atomic_t *)&priv->stats.rx_packets); 364 atomic_add(skb->len, 365 (atomic_t *)&priv->stats.rx_bytes); 366#endif 367 } 368 netif_receive_skb(skb); 369 } else { 370 /* Drop any packet received for a device that isn't up */ 371 /* 372 printk_ratelimited("%s: Device not up, packet dropped\n", 373 dev->name); 374 */ 375#ifdef CONFIG_64BIT 376 atomic64_add(1, 377 (atomic64_t *)&priv->stats.rx_dropped); 378#else 379 atomic_add(1, 380 (atomic_t *)&priv->stats.rx_dropped); 381#endif 382 dev_kfree_skb_irq(skb); 383 } 384 } else { 385 /* 386 * Drop any packet received for a device that 387 * doesn't exist. 388 */ 389 printk_ratelimited("Port %d not controlled by Linux, packet dropped\n", 390 work->ipprt); 391 dev_kfree_skb_irq(skb); 392 } 393 /* 394 * Check to see if the skbuff and work share the same 395 * packet buffer. 396 */ 397 if (USE_SKBUFFS_IN_HW && likely(packet_not_copied)) { 398 /* 399 * This buffer needs to be replaced, increment 400 * the number of buffers we need to free by 401 * one. 402 */ 403 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 404 1); 405 406 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 407 DONT_WRITEBACK(1)); 408 } else { 409 cvm_oct_free_work(work); 410 } 411 } 412 /* Restore the original POW group mask */ 413 cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid), old_group_mask); 414 if (USE_ASYNC_IOBDMA) { 415 /* Restore the scratch area */ 416 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch); 417 } 418 cvm_oct_rx_refill_pool(0); 419 420 if (rx_count < budget && napi != NULL) { 421 /* No more work */ 422 napi_complete(napi); 423 enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group); 424 } 425 return rx_count; 426} 427 428#ifdef CONFIG_NET_POLL_CONTROLLER 429/** 430 * cvm_oct_poll_controller - poll for receive packets 431 * device. 432 * 433 * @dev: Device to poll. Unused 434 */ 435void cvm_oct_poll_controller(struct net_device *dev) 436{ 437 cvm_oct_napi_poll(NULL, 16); 438} 439#endif 440 441void cvm_oct_rx_initialize(void) 442{ 443 int i; 444 struct net_device *dev_for_napi = NULL; 445 union cvmx_pow_wq_int_thrx int_thr; 446 union cvmx_pow_wq_int_pc int_pc; 447 448 for (i = 0; i < TOTAL_NUMBER_OF_PORTS; i++) { 449 if (cvm_oct_device[i]) { 450 dev_for_napi = cvm_oct_device[i]; 451 break; 452 } 453 } 454 455 if (NULL == dev_for_napi) 456 panic("No net_devices were allocated."); 457 458 netif_napi_add(dev_for_napi, &cvm_oct_napi, cvm_oct_napi_poll, 459 rx_napi_weight); 460 napi_enable(&cvm_oct_napi); 461 462 /* Register an IRQ handler to receive POW interrupts */ 463 i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group, 464 cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device); 465 466 if (i) 467 panic("Could not acquire Ethernet IRQ %d\n", 468 OCTEON_IRQ_WORKQ0 + pow_receive_group); 469 470 disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group); 471 472 int_thr.u64 = 0; 473 int_thr.s.tc_en = 1; 474 int_thr.s.tc_thr = 1; 475 /* Enable POW interrupt when our port has at least one packet */ 476 cvmx_write_csr(CVMX_POW_WQ_INT_THRX(pow_receive_group), int_thr.u64); 477 478 int_pc.u64 = 0; 479 int_pc.s.pc_thr = 5; 480 cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64); 481 482 /* Schedule NAPI now. This will indirectly enable the interrupt. */ 483 napi_schedule(&cvm_oct_napi); 484} 485 486void cvm_oct_rx_shutdown(void) 487{ 488 netif_napi_del(&cvm_oct_napi); 489} 490