1/* 2 * Copyright 2011 Tilera Corporation. All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation, version 2. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 * NON INFRINGEMENT. See the GNU General Public License for 12 * more details. 13 */ 14 15#include <linux/module.h> 16#include <linux/init.h> 17#include <linux/moduleparam.h> 18#include <linux/sched.h> 19#include <linux/kernel.h> /* printk() */ 20#include <linux/slab.h> /* kmalloc() */ 21#include <linux/errno.h> /* error codes */ 22#include <linux/types.h> /* size_t */ 23#include <linux/interrupt.h> 24#include <linux/in.h> 25#include <linux/netdevice.h> /* struct device, and other headers */ 26#include <linux/etherdevice.h> /* eth_type_trans */ 27#include <linux/skbuff.h> 28#include <linux/ioctl.h> 29#include <linux/cdev.h> 30#include <linux/hugetlb.h> 31#include <linux/in6.h> 32#include <linux/timer.h> 33#include <linux/io.h> 34#include <linux/u64_stats_sync.h> 35#include <asm/checksum.h> 36#include <asm/homecache.h> 37 38#include <hv/drv_xgbe_intf.h> 39#include <hv/drv_xgbe_impl.h> 40#include <hv/hypervisor.h> 41#include <hv/netio_intf.h> 42 43/* For TSO */ 44#include <linux/ip.h> 45#include <linux/tcp.h> 46 47 48/* 49 * First, "tile_net_init_module()" initializes all four "devices" which 50 * can be used by linux. 51 * 52 * Then, "ifconfig DEVICE up" calls "tile_net_open()", which analyzes 53 * the network cpus, then uses "tile_net_open_aux()" to initialize 54 * LIPP/LEPP, and then uses "tile_net_open_inner()" to register all 55 * the tiles, provide buffers to LIPP, allow ingress to start, and 56 * turn on hypervisor interrupt handling (and NAPI) on all tiles. 57 * 58 * If registration fails due to the link being down, then "retry_work" 59 * is used to keep calling "tile_net_open_inner()" until it succeeds. 60 * 61 * If "ifconfig DEVICE down" is called, it uses "tile_net_stop()" to 62 * stop egress, drain the LIPP buffers, unregister all the tiles, stop 63 * LIPP/LEPP, and wipe the LEPP queue. 64 * 65 * We start out with the ingress interrupt enabled on each CPU. When 66 * this interrupt fires, we disable it, and call "napi_schedule()". 67 * This will cause "tile_net_poll()" to be called, which will pull 68 * packets from the netio queue, filtering them out, or passing them 69 * to "netif_receive_skb()". If our budget is exhausted, we will 70 * return, knowing we will be called again later. Otherwise, we 71 * reenable the ingress interrupt, and call "napi_complete()". 72 * 73 * HACK: Since disabling the ingress interrupt is not reliable, we 74 * ignore the interrupt if the global "active" flag is false. 75 * 76 * 77 * NOTE: The use of "native_driver" ensures that EPP exists, and that 78 * we are using "LIPP" and "LEPP". 79 * 80 * NOTE: Failing to free completions for an arbitrarily long time 81 * (which is defined to be illegal) does in fact cause bizarre 82 * problems. The "egress_timer" helps prevent this from happening. 83 */ 84 85 86/* HACK: Allow use of "jumbo" packets. */ 87/* This should be 1500 if "jumbo" is not set in LIPP. */ 88/* This should be at most 10226 (10240 - 14) if "jumbo" is set in LIPP. */ 89/* ISSUE: This has not been thoroughly tested (except at 1500). */ 90#define TILE_NET_MTU 1500 91 92/* HACK: Define this to verify incoming packets. */ 93/* #define TILE_NET_VERIFY_INGRESS */ 94 95/* Use 3000 to enable the Linux Traffic Control (QoS) layer, else 0. */ 96#define TILE_NET_TX_QUEUE_LEN 0 97 98/* Define to dump packets (prints out the whole packet on tx and rx). */ 99/* #define TILE_NET_DUMP_PACKETS */ 100 101/* Define to enable debug spew (all PDEBUG's are enabled). */ 102/* #define TILE_NET_DEBUG */ 103 104 105/* Define to activate paranoia checks. */ 106/* #define TILE_NET_PARANOIA */ 107 108/* Default transmit lockup timeout period, in jiffies. */ 109#define TILE_NET_TIMEOUT (5 * HZ) 110 111/* Default retry interval for bringing up the NetIO interface, in jiffies. */ 112#define TILE_NET_RETRY_INTERVAL (5 * HZ) 113 114/* Number of ports (xgbe0, xgbe1, gbe0, gbe1). */ 115#define TILE_NET_DEVS 4 116 117 118 119/* Paranoia. */ 120#if NET_IP_ALIGN != LIPP_PACKET_PADDING 121#error "NET_IP_ALIGN must match LIPP_PACKET_PADDING." 122#endif 123 124 125/* Debug print. */ 126#ifdef TILE_NET_DEBUG 127#define PDEBUG(fmt, args...) net_printk(fmt, ## args) 128#else 129#define PDEBUG(fmt, args...) 130#endif 131 132 133MODULE_AUTHOR("Tilera"); 134MODULE_LICENSE("GPL"); 135 136 137/* 138 * Queue of incoming packets for a specific cpu and device. 139 * 140 * Includes a pointer to the "system" data, and the actual "user" data. 141 */ 142struct tile_netio_queue { 143 netio_queue_impl_t *__system_part; 144 netio_queue_user_impl_t __user_part; 145 146}; 147 148 149/* 150 * Statistics counters for a specific cpu and device. 151 */ 152struct tile_net_stats_t { 153 struct u64_stats_sync syncp; 154 u64 rx_packets; /* total packets received */ 155 u64 tx_packets; /* total packets transmitted */ 156 u64 rx_bytes; /* total bytes received */ 157 u64 tx_bytes; /* total bytes transmitted */ 158 u64 rx_errors; /* packets truncated or marked bad by hw */ 159 u64 rx_dropped; /* packets not for us or intf not up */ 160}; 161 162 163/* 164 * Info for a specific cpu and device. 165 * 166 * ISSUE: There is a "dev" pointer in "napi" as well. 167 */ 168struct tile_net_cpu { 169 /* The NAPI struct. */ 170 struct napi_struct napi; 171 /* Packet queue. */ 172 struct tile_netio_queue queue; 173 /* Statistics. */ 174 struct tile_net_stats_t stats; 175 /* True iff NAPI is enabled. */ 176 bool napi_enabled; 177 /* True if this tile has successfully registered with the IPP. */ 178 bool registered; 179 /* True if the link was down last time we tried to register. */ 180 bool link_down; 181 /* True if "egress_timer" is scheduled. */ 182 bool egress_timer_scheduled; 183 /* Number of small sk_buffs which must still be provided. */ 184 unsigned int num_needed_small_buffers; 185 /* Number of large sk_buffs which must still be provided. */ 186 unsigned int num_needed_large_buffers; 187 /* A timer for handling egress completions. */ 188 struct timer_list egress_timer; 189}; 190 191 192/* 193 * Info for a specific device. 194 */ 195struct tile_net_priv { 196 /* Our network device. */ 197 struct net_device *dev; 198 /* Pages making up the egress queue. */ 199 struct page *eq_pages; 200 /* Address of the actual egress queue. */ 201 lepp_queue_t *eq; 202 /* Protects "eq". */ 203 spinlock_t eq_lock; 204 /* The hypervisor handle for this interface. */ 205 int hv_devhdl; 206 /* The intr bit mask that IDs this device. */ 207 u32 intr_id; 208 /* True iff "tile_net_open_aux()" has succeeded. */ 209 bool partly_opened; 210 /* True iff the device is "active". */ 211 bool active; 212 /* Effective network cpus. */ 213 struct cpumask network_cpus_map; 214 /* Number of network cpus. */ 215 int network_cpus_count; 216 /* Credits per network cpu. */ 217 int network_cpus_credits; 218 /* For NetIO bringup retries. */ 219 struct delayed_work retry_work; 220 /* Quick access to per cpu data. */ 221 struct tile_net_cpu *cpu[NR_CPUS]; 222}; 223 224/* Log2 of the number of small pages needed for the egress queue. */ 225#define EQ_ORDER get_order(sizeof(lepp_queue_t)) 226/* Size of the egress queue's pages. */ 227#define EQ_SIZE (1 << (PAGE_SHIFT + EQ_ORDER)) 228 229/* 230 * The actual devices (xgbe0, xgbe1, gbe0, gbe1). 231 */ 232static struct net_device *tile_net_devs[TILE_NET_DEVS]; 233 234/* 235 * The "tile_net_cpu" structures for each device. 236 */ 237static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe0); 238static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe1); 239static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe0); 240static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe1); 241 242 243/* 244 * True if "network_cpus" was specified. 245 */ 246static bool network_cpus_used; 247 248/* 249 * The actual cpus in "network_cpus". 250 */ 251static struct cpumask network_cpus_map; 252 253 254 255#ifdef TILE_NET_DEBUG 256/* 257 * printk with extra stuff. 258 * 259 * We print the CPU we're running in brackets. 260 */ 261static void net_printk(char *fmt, ...) 262{ 263 int i; 264 int len; 265 va_list args; 266 static char buf[256]; 267 268 len = sprintf(buf, "tile_net[%2.2d]: ", smp_processor_id()); 269 va_start(args, fmt); 270 i = vscnprintf(buf + len, sizeof(buf) - len - 1, fmt, args); 271 va_end(args); 272 buf[255] = '\0'; 273 pr_notice(buf); 274} 275#endif 276 277 278#ifdef TILE_NET_DUMP_PACKETS 279/* 280 * Dump a packet. 281 */ 282static void dump_packet(unsigned char *data, unsigned long length, char *s) 283{ 284 int my_cpu = smp_processor_id(); 285 286 unsigned long i; 287 char buf[128]; 288 289 static unsigned int count; 290 291 pr_info("dump_packet(data %p, length 0x%lx s %s count 0x%x)\n", 292 data, length, s, count++); 293 294 pr_info("\n"); 295 296 for (i = 0; i < length; i++) { 297 if ((i & 0xf) == 0) 298 sprintf(buf, "[%02d] %8.8lx:", my_cpu, i); 299 sprintf(buf + strlen(buf), " %2.2x", data[i]); 300 if ((i & 0xf) == 0xf || i == length - 1) { 301 strcat(buf, "\n"); 302 pr_info("%s", buf); 303 } 304 } 305} 306#endif 307 308 309/* 310 * Provide support for the __netio_fastio1() swint 311 * (see <hv/drv_xgbe_intf.h> for how it is used). 312 * 313 * The fastio swint2 call may clobber all the caller-saved registers. 314 * It rarely clobbers memory, but we allow for the possibility in 315 * the signature just to be on the safe side. 316 * 317 * Also, gcc doesn't seem to allow an input operand to be 318 * clobbered, so we fake it with dummy outputs. 319 * 320 * This function can't be static because of the way it is declared 321 * in the netio header. 322 */ 323inline int __netio_fastio1(u32 fastio_index, u32 arg0) 324{ 325 long result, clobber_r1, clobber_r10; 326 asm volatile("swint2" 327 : "=R00" (result), 328 "=R01" (clobber_r1), "=R10" (clobber_r10) 329 : "R10" (fastio_index), "R01" (arg0) 330 : "memory", "r2", "r3", "r4", 331 "r5", "r6", "r7", "r8", "r9", 332 "r11", "r12", "r13", "r14", 333 "r15", "r16", "r17", "r18", "r19", 334 "r20", "r21", "r22", "r23", "r24", 335 "r25", "r26", "r27", "r28", "r29"); 336 return result; 337} 338 339 340static void tile_net_return_credit(struct tile_net_cpu *info) 341{ 342 struct tile_netio_queue *queue = &info->queue; 343 netio_queue_user_impl_t *qup = &queue->__user_part; 344 345 /* Return four credits after every fourth packet. */ 346 if (--qup->__receive_credit_remaining == 0) { 347 u32 interval = qup->__receive_credit_interval; 348 qup->__receive_credit_remaining = interval; 349 __netio_fastio_return_credits(qup->__fastio_index, interval); 350 } 351} 352 353 354 355/* 356 * Provide a linux buffer to LIPP. 357 */ 358static void tile_net_provide_linux_buffer(struct tile_net_cpu *info, 359 void *va, bool small) 360{ 361 struct tile_netio_queue *queue = &info->queue; 362 363 /* Convert "va" and "small" to "linux_buffer_t". */ 364 unsigned int buffer = ((unsigned int)(__pa(va) >> 7) << 1) + small; 365 366 __netio_fastio_free_buffer(queue->__user_part.__fastio_index, buffer); 367} 368 369 370/* 371 * Provide a linux buffer for LIPP. 372 * 373 * Note that the ACTUAL allocation for each buffer is a "struct sk_buff", 374 * plus a chunk of memory that includes not only the requested bytes, but 375 * also NET_SKB_PAD bytes of initial padding, and a "struct skb_shared_info". 376 * 377 * Note that "struct skb_shared_info" is 88 bytes with 64K pages and 378 * 268 bytes with 4K pages (since the frags[] array needs 18 entries). 379 * 380 * Without jumbo packets, the maximum packet size will be 1536 bytes, 381 * and we use 2 bytes (NET_IP_ALIGN) of padding. ISSUE: If we told 382 * the hardware to clip at 1518 bytes instead of 1536 bytes, then we 383 * could save an entire cache line, but in practice, we don't need it. 384 * 385 * Since CPAs are 38 bits, and we can only encode the high 31 bits in 386 * a "linux_buffer_t", the low 7 bits must be zero, and thus, we must 387 * align the actual "va" mod 128. 388 * 389 * We assume that the underlying "head" will be aligned mod 64. Note 390 * that in practice, we have seen "head" NOT aligned mod 128 even when 391 * using 2048 byte allocations, which is surprising. 392 * 393 * If "head" WAS always aligned mod 128, we could change LIPP to 394 * assume that the low SIX bits are zero, and the 7th bit is one, that 395 * is, align the actual "va" mod 128 plus 64, which would be "free". 396 * 397 * For now, the actual "head" pointer points at NET_SKB_PAD bytes of 398 * padding, plus 28 or 92 bytes of extra padding, plus the sk_buff 399 * pointer, plus the NET_IP_ALIGN padding, plus 126 or 1536 bytes for 400 * the actual packet, plus 62 bytes of empty padding, plus some 401 * padding and the "struct skb_shared_info". 402 * 403 * With 64K pages, a large buffer thus needs 32+92+4+2+1536+62+88 404 * bytes, or 1816 bytes, which fits comfortably into 2048 bytes. 405 * 406 * With 64K pages, a small buffer thus needs 32+92+4+2+126+88 407 * bytes, or 344 bytes, which means we are wasting 64+ bytes, and 408 * could presumably increase the size of small buffers. 409 * 410 * With 4K pages, a large buffer thus needs 32+92+4+2+1536+62+268 411 * bytes, or 1996 bytes, which fits comfortably into 2048 bytes. 412 * 413 * With 4K pages, a small buffer thus needs 32+92+4+2+126+268 414 * bytes, or 524 bytes, which is annoyingly wasteful. 415 * 416 * Maybe we should increase LIPP_SMALL_PACKET_SIZE to 192? 417 * 418 * ISSUE: Maybe we should increase "NET_SKB_PAD" to 64? 419 */ 420static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info, 421 bool small) 422{ 423#if TILE_NET_MTU <= 1536 424 /* Without "jumbo", 2 + 1536 should be sufficient. */ 425 unsigned int large_size = NET_IP_ALIGN + 1536; 426#else 427 /* ISSUE: This has not been tested. */ 428 unsigned int large_size = NET_IP_ALIGN + TILE_NET_MTU + 100; 429#endif 430 431 /* Avoid "false sharing" with last cache line. */ 432 /* ISSUE: This is already done by "netdev_alloc_skb()". */ 433 unsigned int len = 434 (((small ? LIPP_SMALL_PACKET_SIZE : large_size) + 435 CHIP_L2_LINE_SIZE() - 1) & -CHIP_L2_LINE_SIZE()); 436 437 unsigned int padding = 128 - NET_SKB_PAD; 438 unsigned int align; 439 440 struct sk_buff *skb; 441 void *va; 442 443 struct sk_buff **skb_ptr; 444 445 /* Request 96 extra bytes for alignment purposes. */ 446 skb = netdev_alloc_skb(info->napi.dev, len + padding); 447 if (skb == NULL) 448 return false; 449 450 /* Skip 32 or 96 bytes to align "data" mod 128. */ 451 align = -(long)skb->data & (128 - 1); 452 BUG_ON(align > padding); 453 skb_reserve(skb, align); 454 455 /* This address is given to IPP. */ 456 va = skb->data; 457 458 /* Buffers must not span a huge page. */ 459 BUG_ON(((((long)va & ~HPAGE_MASK) + len) & HPAGE_MASK) != 0); 460 461#ifdef TILE_NET_PARANOIA 462#if CHIP_HAS_CBOX_HOME_MAP() 463 if (hash_default) { 464 HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)va); 465 if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3) 466 panic("Non-HFH ingress buffer! VA=%p Mode=%d PTE=%llx", 467 va, hv_pte_get_mode(pte), hv_pte_val(pte)); 468 } 469#endif 470#endif 471 472 /* Invalidate the packet buffer. */ 473 if (!hash_default) 474 __inv_buffer(va, len); 475 476 /* Skip two bytes to satisfy LIPP assumptions. */ 477 /* Note that this aligns IP on a 16 byte boundary. */ 478 /* ISSUE: Do this when the packet arrives? */ 479 skb_reserve(skb, NET_IP_ALIGN); 480 481 /* Save a back-pointer to 'skb'. */ 482 skb_ptr = va - sizeof(*skb_ptr); 483 *skb_ptr = skb; 484 485 /* Make sure "skb_ptr" has been flushed. */ 486 __insn_mf(); 487 488 /* Provide the new buffer. */ 489 tile_net_provide_linux_buffer(info, va, small); 490 491 return true; 492} 493 494 495/* 496 * Provide linux buffers for LIPP. 497 */ 498static void tile_net_provide_needed_buffers(struct tile_net_cpu *info) 499{ 500 while (info->num_needed_small_buffers != 0) { 501 if (!tile_net_provide_needed_buffer(info, true)) 502 goto oops; 503 info->num_needed_small_buffers--; 504 } 505 506 while (info->num_needed_large_buffers != 0) { 507 if (!tile_net_provide_needed_buffer(info, false)) 508 goto oops; 509 info->num_needed_large_buffers--; 510 } 511 512 return; 513 514oops: 515 516 /* Add a description to the page allocation failure dump. */ 517 pr_notice("Could not provide a linux buffer to LIPP.\n"); 518} 519 520 521/* 522 * Grab some LEPP completions, and store them in "comps", of size 523 * "comps_size", and return the number of completions which were 524 * stored, so the caller can free them. 525 */ 526static unsigned int tile_net_lepp_grab_comps(lepp_queue_t *eq, 527 struct sk_buff *comps[], 528 unsigned int comps_size, 529 unsigned int min_size) 530{ 531 unsigned int n = 0; 532 533 unsigned int comp_head = eq->comp_head; 534 unsigned int comp_busy = eq->comp_busy; 535 536 while (comp_head != comp_busy && n < comps_size) { 537 comps[n++] = eq->comps[comp_head]; 538 LEPP_QINC(comp_head); 539 } 540 541 if (n < min_size) 542 return 0; 543 544 eq->comp_head = comp_head; 545 546 return n; 547} 548 549 550/* 551 * Free some comps, and return true iff there are still some pending. 552 */ 553static bool tile_net_lepp_free_comps(struct net_device *dev, bool all) 554{ 555 struct tile_net_priv *priv = netdev_priv(dev); 556 557 lepp_queue_t *eq = priv->eq; 558 559 struct sk_buff *olds[64]; 560 unsigned int wanted = 64; 561 unsigned int i, n; 562 bool pending; 563 564 spin_lock(&priv->eq_lock); 565 566 if (all) 567 eq->comp_busy = eq->comp_tail; 568 569 n = tile_net_lepp_grab_comps(eq, olds, wanted, 0); 570 571 pending = (eq->comp_head != eq->comp_tail); 572 573 spin_unlock(&priv->eq_lock); 574 575 for (i = 0; i < n; i++) 576 kfree_skb(olds[i]); 577 578 return pending; 579} 580 581 582/* 583 * Make sure the egress timer is scheduled. 584 * 585 * Note that we use "schedule if not scheduled" logic instead of the more 586 * obvious "reschedule" logic, because "reschedule" is fairly expensive. 587 */ 588static void tile_net_schedule_egress_timer(struct tile_net_cpu *info) 589{ 590 if (!info->egress_timer_scheduled) { 591 mod_timer_pinned(&info->egress_timer, jiffies + 1); 592 info->egress_timer_scheduled = true; 593 } 594} 595 596 597/* 598 * The "function" for "info->egress_timer". 599 * 600 * This timer will reschedule itself as long as there are any pending 601 * completions expected (on behalf of any tile). 602 * 603 * ISSUE: Realistically, will the timer ever stop scheduling itself? 604 * 605 * ISSUE: This timer is almost never actually needed, so just use a global 606 * timer that can run on any tile. 607 * 608 * ISSUE: Maybe instead track number of expected completions, and free 609 * only that many, resetting to zero if "pending" is ever false. 610 */ 611static void tile_net_handle_egress_timer(unsigned long arg) 612{ 613 struct tile_net_cpu *info = (struct tile_net_cpu *)arg; 614 struct net_device *dev = info->napi.dev; 615 616 /* The timer is no longer scheduled. */ 617 info->egress_timer_scheduled = false; 618 619 /* Free comps, and reschedule timer if more are pending. */ 620 if (tile_net_lepp_free_comps(dev, false)) 621 tile_net_schedule_egress_timer(info); 622} 623 624 625static void tile_net_discard_aux(struct tile_net_cpu *info, int index) 626{ 627 struct tile_netio_queue *queue = &info->queue; 628 netio_queue_impl_t *qsp = queue->__system_part; 629 netio_queue_user_impl_t *qup = &queue->__user_part; 630 631 int index2_aux = index + sizeof(netio_pkt_t); 632 int index2 = 633 ((index2_aux == 634 qsp->__packet_receive_queue.__last_packet_plus_one) ? 635 0 : index2_aux); 636 637 netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index); 638 639 /* Extract the "linux_buffer_t". */ 640 unsigned int buffer = pkt->__packet.word; 641 642 /* Convert "linux_buffer_t" to "va". */ 643 void *va = __va((phys_addr_t)(buffer >> 1) << 7); 644 645 /* Acquire the associated "skb". */ 646 struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); 647 struct sk_buff *skb = *skb_ptr; 648 649 kfree_skb(skb); 650 651 /* Consume this packet. */ 652 qup->__packet_receive_read = index2; 653} 654 655 656/* 657 * Like "tile_net_poll()", but just discard packets. 658 */ 659static void tile_net_discard_packets(struct net_device *dev) 660{ 661 struct tile_net_priv *priv = netdev_priv(dev); 662 int my_cpu = smp_processor_id(); 663 struct tile_net_cpu *info = priv->cpu[my_cpu]; 664 struct tile_netio_queue *queue = &info->queue; 665 netio_queue_impl_t *qsp = queue->__system_part; 666 netio_queue_user_impl_t *qup = &queue->__user_part; 667 668 while (qup->__packet_receive_read != 669 qsp->__packet_receive_queue.__packet_write) { 670 int index = qup->__packet_receive_read; 671 tile_net_discard_aux(info, index); 672 } 673} 674 675 676/* 677 * Handle the next packet. Return true if "processed", false if "filtered". 678 */ 679static bool tile_net_poll_aux(struct tile_net_cpu *info, int index) 680{ 681 struct net_device *dev = info->napi.dev; 682 683 struct tile_netio_queue *queue = &info->queue; 684 netio_queue_impl_t *qsp = queue->__system_part; 685 netio_queue_user_impl_t *qup = &queue->__user_part; 686 struct tile_net_stats_t *stats = &info->stats; 687 688 int filter; 689 690 int index2_aux = index + sizeof(netio_pkt_t); 691 int index2 = 692 ((index2_aux == 693 qsp->__packet_receive_queue.__last_packet_plus_one) ? 694 0 : index2_aux); 695 696 netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index); 697 698 netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt); 699 netio_pkt_status_t pkt_status = NETIO_PKT_STATUS_M(metadata, pkt); 700 701 /* Extract the packet size. FIXME: Shouldn't the second line */ 702 /* get subtracted? Mostly moot, since it should be "zero". */ 703 unsigned long len = 704 (NETIO_PKT_CUSTOM_LENGTH(pkt) + 705 NET_IP_ALIGN - NETIO_PACKET_PADDING); 706 707 /* Extract the "linux_buffer_t". */ 708 unsigned int buffer = pkt->__packet.word; 709 710 /* Extract "small" (vs "large"). */ 711 bool small = ((buffer & 1) != 0); 712 713 /* Convert "linux_buffer_t" to "va". */ 714 void *va = __va((phys_addr_t)(buffer >> 1) << 7); 715 716 /* Extract the packet data pointer. */ 717 /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */ 718 unsigned char *buf = va + NET_IP_ALIGN; 719 720 /* Invalidate the packet buffer. */ 721 if (!hash_default) 722 __inv_buffer(buf, len); 723 724 /* ISSUE: Is this needed? */ 725 dev->last_rx = jiffies; 726 727#ifdef TILE_NET_DUMP_PACKETS 728 dump_packet(buf, len, "rx"); 729#endif /* TILE_NET_DUMP_PACKETS */ 730 731#ifdef TILE_NET_VERIFY_INGRESS 732 if (pkt_status == NETIO_PKT_STATUS_OVERSIZE && len >= 64) { 733 dump_packet(buf, len, "rx"); 734 panic("Unexpected OVERSIZE."); 735 } 736#endif 737 738 filter = 0; 739 740 if (pkt_status == NETIO_PKT_STATUS_BAD) { 741 /* Handle CRC error and hardware truncation. */ 742 filter = 2; 743 } else if (!(dev->flags & IFF_UP)) { 744 /* Filter packets received before we're up. */ 745 filter = 1; 746 } else if (NETIO_PKT_ETHERTYPE_RECOGNIZED_M(metadata, pkt) && 747 pkt_status == NETIO_PKT_STATUS_UNDERSIZE) { 748 /* Filter "truncated" packets. */ 749 filter = 2; 750 } else if (!(dev->flags & IFF_PROMISC)) { 751 if (!is_multicast_ether_addr(buf)) { 752 /* Filter packets not for our address. */ 753 const u8 *mine = dev->dev_addr; 754 filter = !ether_addr_equal(mine, buf); 755 } 756 } 757 758 u64_stats_update_begin(&stats->syncp); 759 760 if (filter != 0) { 761 762 if (filter == 1) 763 stats->rx_dropped++; 764 else 765 stats->rx_errors++; 766 767 tile_net_provide_linux_buffer(info, va, small); 768 769 } else { 770 771 /* Acquire the associated "skb". */ 772 struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); 773 struct sk_buff *skb = *skb_ptr; 774 775 /* Paranoia. */ 776 if (skb->data != buf) 777 panic("Corrupt linux buffer from LIPP! " 778 "VA=%p, skb=%p, skb->data=%p\n", 779 va, skb, skb->data); 780 781 /* Encode the actual packet length. */ 782 skb_put(skb, len); 783 784 /* NOTE: This call also sets "skb->dev = dev". */ 785 skb->protocol = eth_type_trans(skb, dev); 786 787 /* Avoid recomputing "good" TCP/UDP checksums. */ 788 if (NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt)) 789 skb->ip_summed = CHECKSUM_UNNECESSARY; 790 791 netif_receive_skb(skb); 792 793 stats->rx_packets++; 794 stats->rx_bytes += len; 795 } 796 797 u64_stats_update_end(&stats->syncp); 798 799 /* ISSUE: It would be nice to defer this until the packet has */ 800 /* actually been processed. */ 801 tile_net_return_credit(info); 802 803 /* Consume this packet. */ 804 qup->__packet_receive_read = index2; 805 806 return !filter; 807} 808 809 810/* 811 * Handle some packets for the given device on the current CPU. 812 * 813 * If "tile_net_stop()" is called on some other tile while this 814 * function is running, we will return, hopefully before that 815 * other tile asks us to call "napi_disable()". 816 * 817 * The "rotting packet" race condition occurs if a packet arrives 818 * during the extremely narrow window between the queue appearing to 819 * be empty, and the ingress interrupt being re-enabled. This happens 820 * a LOT under heavy network load. 821 */ 822static int tile_net_poll(struct napi_struct *napi, int budget) 823{ 824 struct net_device *dev = napi->dev; 825 struct tile_net_priv *priv = netdev_priv(dev); 826 int my_cpu = smp_processor_id(); 827 struct tile_net_cpu *info = priv->cpu[my_cpu]; 828 struct tile_netio_queue *queue = &info->queue; 829 netio_queue_impl_t *qsp = queue->__system_part; 830 netio_queue_user_impl_t *qup = &queue->__user_part; 831 832 unsigned int work = 0; 833 834 if (budget <= 0) 835 goto done; 836 837 while (priv->active) { 838 int index = qup->__packet_receive_read; 839 if (index == qsp->__packet_receive_queue.__packet_write) 840 break; 841 842 if (tile_net_poll_aux(info, index)) { 843 if (++work >= budget) 844 goto done; 845 } 846 } 847 848 napi_complete(&info->napi); 849 850 if (!priv->active) 851 goto done; 852 853 /* Re-enable the ingress interrupt. */ 854 enable_percpu_irq(priv->intr_id, 0); 855 856 /* HACK: Avoid the "rotting packet" problem (see above). */ 857 if (qup->__packet_receive_read != 858 qsp->__packet_receive_queue.__packet_write) { 859 /* ISSUE: Sometimes this returns zero, presumably */ 860 /* because an interrupt was handled for this tile. */ 861 (void)napi_reschedule(&info->napi); 862 } 863 864done: 865 866 if (priv->active) 867 tile_net_provide_needed_buffers(info); 868 869 return work; 870} 871 872 873/* 874 * Handle an ingress interrupt for the given device on the current cpu. 875 * 876 * ISSUE: Sometimes this gets called after "disable_percpu_irq()" has 877 * been called! This is probably due to "pending hypervisor downcalls". 878 * 879 * ISSUE: Is there any race condition between the "napi_schedule()" here 880 * and the "napi_complete()" call above? 881 */ 882static irqreturn_t tile_net_handle_ingress_interrupt(int irq, void *dev_ptr) 883{ 884 struct net_device *dev = (struct net_device *)dev_ptr; 885 struct tile_net_priv *priv = netdev_priv(dev); 886 int my_cpu = smp_processor_id(); 887 struct tile_net_cpu *info = priv->cpu[my_cpu]; 888 889 /* Disable the ingress interrupt. */ 890 disable_percpu_irq(priv->intr_id); 891 892 /* Ignore unwanted interrupts. */ 893 if (!priv->active) 894 return IRQ_HANDLED; 895 896 /* ISSUE: Sometimes "info->napi_enabled" is false here. */ 897 898 napi_schedule(&info->napi); 899 900 return IRQ_HANDLED; 901} 902 903 904/* 905 * One time initialization per interface. 906 */ 907static int tile_net_open_aux(struct net_device *dev) 908{ 909 struct tile_net_priv *priv = netdev_priv(dev); 910 911 int ret; 912 int dummy; 913 unsigned int epp_lotar; 914 915 /* 916 * Find out where EPP memory should be homed. 917 */ 918 ret = hv_dev_pread(priv->hv_devhdl, 0, 919 (HV_VirtAddr)&epp_lotar, sizeof(epp_lotar), 920 NETIO_EPP_SHM_OFF); 921 if (ret < 0) { 922 pr_err("could not read epp_shm_queue lotar.\n"); 923 return -EIO; 924 } 925 926 /* 927 * Home the page on the EPP. 928 */ 929 { 930 int epp_home = hv_lotar_to_cpu(epp_lotar); 931 homecache_change_page_home(priv->eq_pages, EQ_ORDER, epp_home); 932 } 933 934 /* 935 * Register the EPP shared memory queue. 936 */ 937 { 938 netio_ipp_address_t ea = { 939 .va = 0, 940 .pa = __pa(priv->eq), 941 .pte = hv_pte(0), 942 .size = EQ_SIZE, 943 }; 944 ea.pte = hv_pte_set_lotar(ea.pte, epp_lotar); 945 ea.pte = hv_pte_set_mode(ea.pte, HV_PTE_MODE_CACHE_TILE_L3); 946 ret = hv_dev_pwrite(priv->hv_devhdl, 0, 947 (HV_VirtAddr)&ea, 948 sizeof(ea), 949 NETIO_EPP_SHM_OFF); 950 if (ret < 0) 951 return -EIO; 952 } 953 954 /* 955 * Start LIPP/LEPP. 956 */ 957 if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, 958 sizeof(dummy), NETIO_IPP_START_SHIM_OFF) < 0) { 959 pr_warn("Failed to start LIPP/LEPP\n"); 960 return -EIO; 961 } 962 963 return 0; 964} 965 966 967/* 968 * Register with hypervisor on the current CPU. 969 * 970 * Strangely, this function does important things even if it "fails", 971 * which is especially common if the link is not up yet. Hopefully 972 * these things are all "harmless" if done twice! 973 */ 974static void tile_net_register(void *dev_ptr) 975{ 976 struct net_device *dev = (struct net_device *)dev_ptr; 977 struct tile_net_priv *priv = netdev_priv(dev); 978 int my_cpu = smp_processor_id(); 979 struct tile_net_cpu *info; 980 981 struct tile_netio_queue *queue; 982 983 /* Only network cpus can receive packets. */ 984 int queue_id = 985 cpumask_test_cpu(my_cpu, &priv->network_cpus_map) ? 0 : 255; 986 987 netio_input_config_t config = { 988 .flags = 0, 989 .num_receive_packets = priv->network_cpus_credits, 990 .queue_id = queue_id 991 }; 992 993 int ret = 0; 994 netio_queue_impl_t *queuep; 995 996 PDEBUG("tile_net_register(queue_id %d)\n", queue_id); 997 998 if (!strcmp(dev->name, "xgbe0")) 999 info = this_cpu_ptr(&hv_xgbe0); 1000 else if (!strcmp(dev->name, "xgbe1")) 1001 info = this_cpu_ptr(&hv_xgbe1); 1002 else if (!strcmp(dev->name, "gbe0")) 1003 info = this_cpu_ptr(&hv_gbe0); 1004 else if (!strcmp(dev->name, "gbe1")) 1005 info = this_cpu_ptr(&hv_gbe1); 1006 else 1007 BUG(); 1008 1009 /* Initialize the egress timer. */ 1010 init_timer(&info->egress_timer); 1011 info->egress_timer.data = (long)info; 1012 info->egress_timer.function = tile_net_handle_egress_timer; 1013 1014 u64_stats_init(&info->stats.syncp); 1015 1016 priv->cpu[my_cpu] = info; 1017 1018 /* 1019 * Register ourselves with LIPP. This does a lot of stuff, 1020 * including invoking the LIPP registration code. 1021 */ 1022 ret = hv_dev_pwrite(priv->hv_devhdl, 0, 1023 (HV_VirtAddr)&config, 1024 sizeof(netio_input_config_t), 1025 NETIO_IPP_INPUT_REGISTER_OFF); 1026 PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n", 1027 ret); 1028 if (ret < 0) { 1029 if (ret != NETIO_LINK_DOWN) { 1030 printk(KERN_DEBUG "hv_dev_pwrite " 1031 "NETIO_IPP_INPUT_REGISTER_OFF failure %d\n", 1032 ret); 1033 } 1034 info->link_down = (ret == NETIO_LINK_DOWN); 1035 return; 1036 } 1037 1038 /* 1039 * Get the pointer to our queue's system part. 1040 */ 1041 1042 ret = hv_dev_pread(priv->hv_devhdl, 0, 1043 (HV_VirtAddr)&queuep, 1044 sizeof(netio_queue_impl_t *), 1045 NETIO_IPP_INPUT_REGISTER_OFF); 1046 PDEBUG("hv_dev_pread(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n", 1047 ret); 1048 PDEBUG("queuep %p\n", queuep); 1049 if (ret <= 0) { 1050 /* ISSUE: Shouldn't this be a fatal error? */ 1051 pr_err("hv_dev_pread NETIO_IPP_INPUT_REGISTER_OFF failure\n"); 1052 return; 1053 } 1054 1055 queue = &info->queue; 1056 1057 queue->__system_part = queuep; 1058 1059 memset(&queue->__user_part, 0, sizeof(netio_queue_user_impl_t)); 1060 1061 /* This is traditionally "config.num_receive_packets / 2". */ 1062 queue->__user_part.__receive_credit_interval = 4; 1063 queue->__user_part.__receive_credit_remaining = 1064 queue->__user_part.__receive_credit_interval; 1065 1066 /* 1067 * Get a fastio index from the hypervisor. 1068 * ISSUE: Shouldn't this check the result? 1069 */ 1070 ret = hv_dev_pread(priv->hv_devhdl, 0, 1071 (HV_VirtAddr)&queue->__user_part.__fastio_index, 1072 sizeof(queue->__user_part.__fastio_index), 1073 NETIO_IPP_GET_FASTIO_OFF); 1074 PDEBUG("hv_dev_pread(NETIO_IPP_GET_FASTIO_OFF) returned %d\n", ret); 1075 1076 /* Now we are registered. */ 1077 info->registered = true; 1078} 1079 1080 1081/* 1082 * Deregister with hypervisor on the current CPU. 1083 * 1084 * This simply discards all our credits, so no more packets will be 1085 * delivered to this tile. There may still be packets in our queue. 1086 * 1087 * Also, disable the ingress interrupt. 1088 */ 1089static void tile_net_deregister(void *dev_ptr) 1090{ 1091 struct net_device *dev = (struct net_device *)dev_ptr; 1092 struct tile_net_priv *priv = netdev_priv(dev); 1093 int my_cpu = smp_processor_id(); 1094 struct tile_net_cpu *info = priv->cpu[my_cpu]; 1095 1096 /* Disable the ingress interrupt. */ 1097 disable_percpu_irq(priv->intr_id); 1098 1099 /* Do nothing else if not registered. */ 1100 if (info == NULL || !info->registered) 1101 return; 1102 1103 { 1104 struct tile_netio_queue *queue = &info->queue; 1105 netio_queue_user_impl_t *qup = &queue->__user_part; 1106 1107 /* Discard all our credits. */ 1108 __netio_fastio_return_credits(qup->__fastio_index, -1); 1109 } 1110} 1111 1112 1113/* 1114 * Unregister with hypervisor on the current CPU. 1115 * 1116 * Also, disable the ingress interrupt. 1117 */ 1118static void tile_net_unregister(void *dev_ptr) 1119{ 1120 struct net_device *dev = (struct net_device *)dev_ptr; 1121 struct tile_net_priv *priv = netdev_priv(dev); 1122 int my_cpu = smp_processor_id(); 1123 struct tile_net_cpu *info = priv->cpu[my_cpu]; 1124 1125 int ret; 1126 int dummy = 0; 1127 1128 /* Disable the ingress interrupt. */ 1129 disable_percpu_irq(priv->intr_id); 1130 1131 /* Do nothing else if not registered. */ 1132 if (info == NULL || !info->registered) 1133 return; 1134 1135 /* Unregister ourselves with LIPP/LEPP. */ 1136 ret = hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, 1137 sizeof(dummy), NETIO_IPP_INPUT_UNREGISTER_OFF); 1138 if (ret < 0) 1139 panic("Failed to unregister with LIPP/LEPP!\n"); 1140 1141 /* Discard all packets still in our NetIO queue. */ 1142 tile_net_discard_packets(dev); 1143 1144 /* Reset state. */ 1145 info->num_needed_small_buffers = 0; 1146 info->num_needed_large_buffers = 0; 1147 1148 /* Cancel egress timer. */ 1149 del_timer(&info->egress_timer); 1150 info->egress_timer_scheduled = false; 1151} 1152 1153 1154/* 1155 * Helper function for "tile_net_stop()". 1156 * 1157 * Also used to handle registration failure in "tile_net_open_inner()", 1158 * when the various extra steps in "tile_net_stop()" are not necessary. 1159 */ 1160static void tile_net_stop_aux(struct net_device *dev) 1161{ 1162 struct tile_net_priv *priv = netdev_priv(dev); 1163 int i; 1164 1165 int dummy = 0; 1166 1167 /* 1168 * Unregister all tiles, so LIPP will stop delivering packets. 1169 * Also, delete all the "napi" objects (sequentially, to protect 1170 * "dev->napi_list"). 1171 */ 1172 on_each_cpu(tile_net_unregister, (void *)dev, 1); 1173 for_each_online_cpu(i) { 1174 struct tile_net_cpu *info = priv->cpu[i]; 1175 if (info != NULL && info->registered) { 1176 netif_napi_del(&info->napi); 1177 info->registered = false; 1178 } 1179 } 1180 1181 /* Stop LIPP/LEPP. */ 1182 if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, 1183 sizeof(dummy), NETIO_IPP_STOP_SHIM_OFF) < 0) 1184 panic("Failed to stop LIPP/LEPP!\n"); 1185 1186 priv->partly_opened = false; 1187} 1188 1189 1190/* 1191 * Disable NAPI for the given device on the current cpu. 1192 */ 1193static void tile_net_stop_disable(void *dev_ptr) 1194{ 1195 struct net_device *dev = (struct net_device *)dev_ptr; 1196 struct tile_net_priv *priv = netdev_priv(dev); 1197 int my_cpu = smp_processor_id(); 1198 struct tile_net_cpu *info = priv->cpu[my_cpu]; 1199 1200 /* Disable NAPI if needed. */ 1201 if (info != NULL && info->napi_enabled) { 1202 napi_disable(&info->napi); 1203 info->napi_enabled = false; 1204 } 1205} 1206 1207 1208/* 1209 * Enable NAPI and the ingress interrupt for the given device 1210 * on the current cpu. 1211 * 1212 * ISSUE: Only do this for "network cpus"? 1213 */ 1214static void tile_net_open_enable(void *dev_ptr) 1215{ 1216 struct net_device *dev = (struct net_device *)dev_ptr; 1217 struct tile_net_priv *priv = netdev_priv(dev); 1218 int my_cpu = smp_processor_id(); 1219 struct tile_net_cpu *info = priv->cpu[my_cpu]; 1220 1221 /* Enable NAPI. */ 1222 napi_enable(&info->napi); 1223 info->napi_enabled = true; 1224 1225 /* Enable the ingress interrupt. */ 1226 enable_percpu_irq(priv->intr_id, 0); 1227} 1228 1229 1230/* 1231 * tile_net_open_inner does most of the work of bringing up the interface. 1232 * It's called from tile_net_open(), and also from tile_net_retry_open(). 1233 * The return value is 0 if the interface was brought up, < 0 if 1234 * tile_net_open() should return the return value as an error, and > 0 if 1235 * tile_net_open() should return success and schedule a work item to 1236 * periodically retry the bringup. 1237 */ 1238static int tile_net_open_inner(struct net_device *dev) 1239{ 1240 struct tile_net_priv *priv = netdev_priv(dev); 1241 int my_cpu = smp_processor_id(); 1242 struct tile_net_cpu *info; 1243 struct tile_netio_queue *queue; 1244 int result = 0; 1245 int i; 1246 int dummy = 0; 1247 1248 /* 1249 * First try to register just on the local CPU, and handle any 1250 * semi-expected "link down" failure specially. Note that we 1251 * do NOT call "tile_net_stop_aux()", unlike below. 1252 */ 1253 tile_net_register(dev); 1254 info = priv->cpu[my_cpu]; 1255 if (!info->registered) { 1256 if (info->link_down) 1257 return 1; 1258 return -EAGAIN; 1259 } 1260 1261 /* 1262 * Now register everywhere else. If any registration fails, 1263 * even for "link down" (which might not be possible), we 1264 * clean up using "tile_net_stop_aux()". Also, add all the 1265 * "napi" objects (sequentially, to protect "dev->napi_list"). 1266 * ISSUE: Only use "netif_napi_add()" for "network cpus"? 1267 */ 1268 smp_call_function(tile_net_register, (void *)dev, 1); 1269 for_each_online_cpu(i) { 1270 struct tile_net_cpu *info = priv->cpu[i]; 1271 if (info->registered) 1272 netif_napi_add(dev, &info->napi, tile_net_poll, 64); 1273 else 1274 result = -EAGAIN; 1275 } 1276 if (result != 0) { 1277 tile_net_stop_aux(dev); 1278 return result; 1279 } 1280 1281 queue = &info->queue; 1282 1283 if (priv->intr_id == 0) { 1284 unsigned int irq; 1285 1286 /* 1287 * Acquire the irq allocated by the hypervisor. Every 1288 * queue gets the same irq. The "__intr_id" field is 1289 * "1 << irq", so we use "__ffs()" to extract "irq". 1290 */ 1291 priv->intr_id = queue->__system_part->__intr_id; 1292 BUG_ON(priv->intr_id == 0); 1293 irq = __ffs(priv->intr_id); 1294 1295 /* 1296 * Register the ingress interrupt handler for this 1297 * device, permanently. 1298 * 1299 * We used to call "free_irq()" in "tile_net_stop()", 1300 * and then re-register the handler here every time, 1301 * but that caused DNP errors in "handle_IRQ_event()" 1302 * because "desc->action" was NULL. See bug 9143. 1303 */ 1304 tile_irq_activate(irq, TILE_IRQ_PERCPU); 1305 BUG_ON(request_irq(irq, tile_net_handle_ingress_interrupt, 1306 0, dev->name, (void *)dev) != 0); 1307 } 1308 1309 { 1310 /* Allocate initial buffers. */ 1311 1312 int max_buffers = 1313 priv->network_cpus_count * priv->network_cpus_credits; 1314 1315 info->num_needed_small_buffers = 1316 min(LIPP_SMALL_BUFFERS, max_buffers); 1317 1318 info->num_needed_large_buffers = 1319 min(LIPP_LARGE_BUFFERS, max_buffers); 1320 1321 tile_net_provide_needed_buffers(info); 1322 1323 if (info->num_needed_small_buffers != 0 || 1324 info->num_needed_large_buffers != 0) 1325 panic("Insufficient memory for buffer stack!"); 1326 } 1327 1328 /* We are about to be active. */ 1329 priv->active = true; 1330 1331 /* Make sure "active" is visible to all tiles. */ 1332 mb(); 1333 1334 /* On each tile, enable NAPI and the ingress interrupt. */ 1335 on_each_cpu(tile_net_open_enable, (void *)dev, 1); 1336 1337 /* Start LIPP/LEPP and activate "ingress" at the shim. */ 1338 if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy, 1339 sizeof(dummy), NETIO_IPP_INPUT_INIT_OFF) < 0) 1340 panic("Failed to activate the LIPP Shim!\n"); 1341 1342 /* Start our transmit queue. */ 1343 netif_start_queue(dev); 1344 1345 return 0; 1346} 1347 1348 1349/* 1350 * Called periodically to retry bringing up the NetIO interface, 1351 * if it doesn't come up cleanly during tile_net_open(). 1352 */ 1353static void tile_net_open_retry(struct work_struct *w) 1354{ 1355 struct delayed_work *dw = 1356 container_of(w, struct delayed_work, work); 1357 1358 struct tile_net_priv *priv = 1359 container_of(dw, struct tile_net_priv, retry_work); 1360 1361 /* 1362 * Try to bring the NetIO interface up. If it fails, reschedule 1363 * ourselves to try again later; otherwise, tell Linux we now have 1364 * a working link. ISSUE: What if the return value is negative? 1365 */ 1366 if (tile_net_open_inner(priv->dev) != 0) 1367 schedule_delayed_work(&priv->retry_work, 1368 TILE_NET_RETRY_INTERVAL); 1369 else 1370 netif_carrier_on(priv->dev); 1371} 1372 1373 1374/* 1375 * Called when a network interface is made active. 1376 * 1377 * Returns 0 on success, negative value on failure. 1378 * 1379 * The open entry point is called when a network interface is made 1380 * active by the system (IFF_UP). At this point all resources needed 1381 * for transmit and receive operations are allocated, the interrupt 1382 * handler is registered with the OS (if needed), the watchdog timer 1383 * is started, and the stack is notified that the interface is ready. 1384 * 1385 * If the actual link is not available yet, then we tell Linux that 1386 * we have no carrier, and we keep checking until the link comes up. 1387 */ 1388static int tile_net_open(struct net_device *dev) 1389{ 1390 int ret = 0; 1391 struct tile_net_priv *priv = netdev_priv(dev); 1392 1393 /* 1394 * We rely on priv->partly_opened to tell us if this is the 1395 * first time this interface is being brought up. If it is 1396 * set, the IPP was already initialized and should not be 1397 * initialized again. 1398 */ 1399 if (!priv->partly_opened) { 1400 1401 int count; 1402 int credits; 1403 1404 /* Initialize LIPP/LEPP, and start the Shim. */ 1405 ret = tile_net_open_aux(dev); 1406 if (ret < 0) { 1407 pr_err("tile_net_open_aux failed: %d\n", ret); 1408 return ret; 1409 } 1410 1411 /* Analyze the network cpus. */ 1412 1413 if (network_cpus_used) 1414 cpumask_copy(&priv->network_cpus_map, 1415 &network_cpus_map); 1416 else 1417 cpumask_copy(&priv->network_cpus_map, cpu_online_mask); 1418 1419 1420 count = cpumask_weight(&priv->network_cpus_map); 1421 1422 /* Limit credits to available buffers, and apply min. */ 1423 credits = max(16, (LIPP_LARGE_BUFFERS / count) & ~1); 1424 1425 /* Apply "GBE" max limit. */ 1426 /* ISSUE: Use higher limit for XGBE? */ 1427 credits = min(NETIO_MAX_RECEIVE_PKTS, credits); 1428 1429 priv->network_cpus_count = count; 1430 priv->network_cpus_credits = credits; 1431 1432#ifdef TILE_NET_DEBUG 1433 pr_info("Using %d network cpus, with %d credits each\n", 1434 priv->network_cpus_count, priv->network_cpus_credits); 1435#endif 1436 1437 priv->partly_opened = true; 1438 1439 } else { 1440 /* FIXME: Is this possible? */ 1441 /* printk("Already partly opened.\n"); */ 1442 } 1443 1444 /* 1445 * Attempt to bring up the link. 1446 */ 1447 ret = tile_net_open_inner(dev); 1448 if (ret <= 0) { 1449 if (ret == 0) 1450 netif_carrier_on(dev); 1451 return ret; 1452 } 1453 1454 /* 1455 * We were unable to bring up the NetIO interface, but we want to 1456 * try again in a little bit. Tell Linux that we have no carrier 1457 * so it doesn't try to use the interface before the link comes up 1458 * and then remember to try again later. 1459 */ 1460 netif_carrier_off(dev); 1461 schedule_delayed_work(&priv->retry_work, TILE_NET_RETRY_INTERVAL); 1462 1463 return 0; 1464} 1465 1466 1467static int tile_net_drain_lipp_buffers(struct tile_net_priv *priv) 1468{ 1469 int n = 0; 1470 1471 /* Drain all the LIPP buffers. */ 1472 while (true) { 1473 unsigned int buffer; 1474 1475 /* NOTE: This should never fail. */ 1476 if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&buffer, 1477 sizeof(buffer), NETIO_IPP_DRAIN_OFF) < 0) 1478 break; 1479 1480 /* Stop when done. */ 1481 if (buffer == 0) 1482 break; 1483 1484 { 1485 /* Convert "linux_buffer_t" to "va". */ 1486 void *va = __va((phys_addr_t)(buffer >> 1) << 7); 1487 1488 /* Acquire the associated "skb". */ 1489 struct sk_buff **skb_ptr = va - sizeof(*skb_ptr); 1490 struct sk_buff *skb = *skb_ptr; 1491 1492 kfree_skb(skb); 1493 } 1494 1495 n++; 1496 } 1497 1498 return n; 1499} 1500 1501 1502/* 1503 * Disables a network interface. 1504 * 1505 * Returns 0, this is not allowed to fail. 1506 * 1507 * The close entry point is called when an interface is de-activated 1508 * by the OS. The hardware is still under the drivers control, but 1509 * needs to be disabled. A global MAC reset is issued to stop the 1510 * hardware, and all transmit and receive resources are freed. 1511 * 1512 * ISSUE: How closely does "netif_running(dev)" mirror "priv->active"? 1513 * 1514 * Before we are called by "__dev_close()", "netif_running()" will 1515 * have been cleared, so no NEW calls to "tile_net_poll()" will be 1516 * made by "netpoll_poll_dev()". 1517 * 1518 * Often, this can cause some tiles to still have packets in their 1519 * queues, so we must call "tile_net_discard_packets()" later. 1520 * 1521 * Note that some other tile may still be INSIDE "tile_net_poll()", 1522 * and in fact, many will be, if there is heavy network load. 1523 * 1524 * Calling "on_each_cpu(tile_net_stop_disable, (void *)dev, 1)" when 1525 * any tile is still "napi_schedule()"'d will induce a horrible crash 1526 * when "msleep()" is called. This includes tiles which are inside 1527 * "tile_net_poll()" which have not yet called "napi_complete()". 1528 * 1529 * So, we must first try to wait long enough for other tiles to finish 1530 * with any current "tile_net_poll()" call, and, hopefully, to clear 1531 * the "scheduled" flag. ISSUE: It is unclear what happens to tiles 1532 * which have called "napi_schedule()" but which had not yet tried to 1533 * call "tile_net_poll()", or which exhausted their budget inside 1534 * "tile_net_poll()" just before this function was called. 1535 */ 1536static int tile_net_stop(struct net_device *dev) 1537{ 1538 struct tile_net_priv *priv = netdev_priv(dev); 1539 1540 PDEBUG("tile_net_stop()\n"); 1541 1542 /* Start discarding packets. */ 1543 priv->active = false; 1544 1545 /* Make sure "active" is visible to all tiles. */ 1546 mb(); 1547 1548 /* 1549 * On each tile, make sure no NEW packets get delivered, and 1550 * disable the ingress interrupt. 1551 * 1552 * Note that the ingress interrupt can fire AFTER this, 1553 * presumably due to packets which were recently delivered, 1554 * but it will have no effect. 1555 */ 1556 on_each_cpu(tile_net_deregister, (void *)dev, 1); 1557 1558 /* Optimistically drain LIPP buffers. */ 1559 (void)tile_net_drain_lipp_buffers(priv); 1560 1561 /* ISSUE: Only needed if not yet fully open. */ 1562 cancel_delayed_work_sync(&priv->retry_work); 1563 1564 /* Can't transmit any more. */ 1565 netif_stop_queue(dev); 1566 1567 /* Disable NAPI on each tile. */ 1568 on_each_cpu(tile_net_stop_disable, (void *)dev, 1); 1569 1570 /* 1571 * Drain any remaining LIPP buffers. NOTE: This "printk()" 1572 * has never been observed, but in theory it could happen. 1573 */ 1574 if (tile_net_drain_lipp_buffers(priv) != 0) 1575 printk("Had to drain some extra LIPP buffers!\n"); 1576 1577 /* Stop LIPP/LEPP. */ 1578 tile_net_stop_aux(dev); 1579 1580 /* 1581 * ISSUE: It appears that, in practice anyway, by the time we 1582 * get here, there are no pending completions, but just in case, 1583 * we free (all of) them anyway. 1584 */ 1585 while (tile_net_lepp_free_comps(dev, true)) 1586 /* loop */; 1587 1588 /* Wipe the EPP queue, and wait till the stores hit the EPP. */ 1589 memset(priv->eq, 0, sizeof(lepp_queue_t)); 1590 mb(); 1591 1592 return 0; 1593} 1594 1595 1596/* 1597 * Prepare the "frags" info for the resulting LEPP command. 1598 * 1599 * If needed, flush the memory used by the frags. 1600 */ 1601static unsigned int tile_net_tx_frags(lepp_frag_t *frags, 1602 struct sk_buff *skb, 1603 void *b_data, unsigned int b_len) 1604{ 1605 unsigned int i, n = 0; 1606 1607 struct skb_shared_info *sh = skb_shinfo(skb); 1608 1609 phys_addr_t cpa; 1610 1611 if (b_len != 0) { 1612 1613 if (!hash_default) 1614 finv_buffer_remote(b_data, b_len, 0); 1615 1616 cpa = __pa(b_data); 1617 frags[n].cpa_lo = cpa; 1618 frags[n].cpa_hi = cpa >> 32; 1619 frags[n].length = b_len; 1620 frags[n].hash_for_home = hash_default; 1621 n++; 1622 } 1623 1624 for (i = 0; i < sh->nr_frags; i++) { 1625 1626 skb_frag_t *f = &sh->frags[i]; 1627 unsigned long pfn = page_to_pfn(skb_frag_page(f)); 1628 1629 /* FIXME: Compute "hash_for_home" properly. */ 1630 /* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */ 1631 int hash_for_home = hash_default; 1632 1633 /* FIXME: Hmmm. */ 1634 if (!hash_default) { 1635 void *va = pfn_to_kaddr(pfn) + f->page_offset; 1636 BUG_ON(PageHighMem(skb_frag_page(f))); 1637 finv_buffer_remote(va, skb_frag_size(f), 0); 1638 } 1639 1640 cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset; 1641 frags[n].cpa_lo = cpa; 1642 frags[n].cpa_hi = cpa >> 32; 1643 frags[n].length = skb_frag_size(f); 1644 frags[n].hash_for_home = hash_for_home; 1645 n++; 1646 } 1647 1648 return n; 1649} 1650 1651 1652/* 1653 * This function takes "skb", consisting of a header template and a 1654 * payload, and hands it to LEPP, to emit as one or more segments, 1655 * each consisting of a possibly modified header, plus a piece of the 1656 * payload, via a process known as "tcp segmentation offload". 1657 * 1658 * Usually, "data" will contain the header template, of size "sh_len", 1659 * and "sh->frags" will contain "skb->data_len" bytes of payload, and 1660 * there will be "sh->gso_segs" segments. 1661 * 1662 * Sometimes, if "sendfile()" requires copying, we will be called with 1663 * "data" containing the header and payload, with "frags" being empty. 1664 * 1665 * Sometimes, for example when using NFS over TCP, a single segment can 1666 * span 3 fragments, which must be handled carefully in LEPP. 1667 * 1668 * See "emulate_large_send_offload()" for some reference code, which 1669 * does not handle checksumming. 1670 * 1671 * ISSUE: How do we make sure that high memory DMA does not migrate? 1672 */ 1673static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev) 1674{ 1675 struct tile_net_priv *priv = netdev_priv(dev); 1676 int my_cpu = smp_processor_id(); 1677 struct tile_net_cpu *info = priv->cpu[my_cpu]; 1678 struct tile_net_stats_t *stats = &info->stats; 1679 1680 struct skb_shared_info *sh = skb_shinfo(skb); 1681 1682 unsigned char *data = skb->data; 1683 1684 /* The ip header follows the ethernet header. */ 1685 struct iphdr *ih = ip_hdr(skb); 1686 unsigned int ih_len = ih->ihl * 4; 1687 1688 /* Note that "nh == ih", by definition. */ 1689 unsigned char *nh = skb_network_header(skb); 1690 unsigned int eh_len = nh - data; 1691 1692 /* The tcp header follows the ip header. */ 1693 struct tcphdr *th = (struct tcphdr *)(nh + ih_len); 1694 unsigned int th_len = th->doff * 4; 1695 1696 /* The total number of header bytes. */ 1697 /* NOTE: This may be less than skb_headlen(skb). */ 1698 unsigned int sh_len = eh_len + ih_len + th_len; 1699 1700 /* The number of payload bytes at "skb->data + sh_len". */ 1701 /* This is non-zero for sendfile() without HIGHDMA. */ 1702 unsigned int b_len = skb_headlen(skb) - sh_len; 1703 1704 /* The total number of payload bytes. */ 1705 unsigned int d_len = b_len + skb->data_len; 1706 1707 /* The maximum payload size. */ 1708 unsigned int p_len = sh->gso_size; 1709 1710 /* The total number of segments. */ 1711 unsigned int num_segs = sh->gso_segs; 1712 1713 /* The temporary copy of the command. */ 1714 u32 cmd_body[(LEPP_MAX_CMD_SIZE + 3) / 4]; 1715 lepp_tso_cmd_t *cmd = (lepp_tso_cmd_t *)cmd_body; 1716 1717 /* Analyze the "frags". */ 1718 unsigned int num_frags = 1719 tile_net_tx_frags(cmd->frags, skb, data + sh_len, b_len); 1720 1721 /* The size of the command, including frags and header. */ 1722 size_t cmd_size = LEPP_TSO_CMD_SIZE(num_frags, sh_len); 1723 1724 /* The command header. */ 1725 lepp_tso_cmd_t cmd_init = { 1726 .tso = true, 1727 .header_size = sh_len, 1728 .ip_offset = eh_len, 1729 .tcp_offset = eh_len + ih_len, 1730 .payload_size = p_len, 1731 .num_frags = num_frags, 1732 }; 1733 1734 unsigned long irqflags; 1735 1736 lepp_queue_t *eq = priv->eq; 1737 1738 struct sk_buff *olds[8]; 1739 unsigned int wanted = 8; 1740 unsigned int i, nolds = 0; 1741 1742 unsigned int cmd_head, cmd_tail, cmd_next; 1743 unsigned int comp_tail; 1744 1745 1746 /* Paranoia. */ 1747 BUG_ON(skb->protocol != htons(ETH_P_IP)); 1748 BUG_ON(ih->protocol != IPPROTO_TCP); 1749 BUG_ON(skb->ip_summed != CHECKSUM_PARTIAL); 1750 BUG_ON(num_frags > LEPP_MAX_FRAGS); 1751 /*--BUG_ON(num_segs != (d_len + (p_len - 1)) / p_len); */ 1752 BUG_ON(num_segs <= 1); 1753 1754 1755 /* Finish preparing the command. */ 1756 1757 /* Copy the command header. */ 1758 *cmd = cmd_init; 1759 1760 /* Copy the "header". */ 1761 memcpy(&cmd->frags[num_frags], data, sh_len); 1762 1763 1764 /* Prefetch and wait, to minimize time spent holding the spinlock. */ 1765 prefetch_L1(&eq->comp_tail); 1766 prefetch_L1(&eq->cmd_tail); 1767 mb(); 1768 1769 1770 /* Enqueue the command. */ 1771 1772 spin_lock_irqsave(&priv->eq_lock, irqflags); 1773 1774 /* Handle completions if needed to make room. */ 1775 /* NOTE: Return NETDEV_TX_BUSY if there is still no room. */ 1776 if (lepp_num_free_comp_slots(eq) == 0) { 1777 nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0); 1778 if (nolds == 0) { 1779busy: 1780 spin_unlock_irqrestore(&priv->eq_lock, irqflags); 1781 return NETDEV_TX_BUSY; 1782 } 1783 } 1784 1785 cmd_head = eq->cmd_head; 1786 cmd_tail = eq->cmd_tail; 1787 1788 /* Prepare to advance, detecting full queue. */ 1789 /* NOTE: Return NETDEV_TX_BUSY if the queue is full. */ 1790 cmd_next = cmd_tail + cmd_size; 1791 if (cmd_tail < cmd_head && cmd_next >= cmd_head) 1792 goto busy; 1793 if (cmd_next > LEPP_CMD_LIMIT) { 1794 cmd_next = 0; 1795 if (cmd_next == cmd_head) 1796 goto busy; 1797 } 1798 1799 /* Copy the command. */ 1800 memcpy(&eq->cmds[cmd_tail], cmd, cmd_size); 1801 1802 /* Advance. */ 1803 cmd_tail = cmd_next; 1804 1805 /* Record "skb" for eventual freeing. */ 1806 comp_tail = eq->comp_tail; 1807 eq->comps[comp_tail] = skb; 1808 LEPP_QINC(comp_tail); 1809 eq->comp_tail = comp_tail; 1810 1811 /* Flush before allowing LEPP to handle the command. */ 1812 /* ISSUE: Is this the optimal location for the flush? */ 1813 __insn_mf(); 1814 1815 eq->cmd_tail = cmd_tail; 1816 1817 /* NOTE: Using "4" here is more efficient than "0" or "2", */ 1818 /* and, strangely, more efficient than pre-checking the number */ 1819 /* of available completions, and comparing it to 4. */ 1820 if (nolds == 0) 1821 nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 4); 1822 1823 spin_unlock_irqrestore(&priv->eq_lock, irqflags); 1824 1825 /* Handle completions. */ 1826 for (i = 0; i < nolds; i++) 1827 dev_consume_skb_any(olds[i]); 1828 1829 /* Update stats. */ 1830 u64_stats_update_begin(&stats->syncp); 1831 stats->tx_packets += num_segs; 1832 stats->tx_bytes += (num_segs * sh_len) + d_len; 1833 u64_stats_update_end(&stats->syncp); 1834 1835 /* Make sure the egress timer is scheduled. */ 1836 tile_net_schedule_egress_timer(info); 1837 1838 return NETDEV_TX_OK; 1839} 1840 1841 1842/* 1843 * Transmit a packet (called by the kernel via "hard_start_xmit" hook). 1844 */ 1845static int tile_net_tx(struct sk_buff *skb, struct net_device *dev) 1846{ 1847 struct tile_net_priv *priv = netdev_priv(dev); 1848 int my_cpu = smp_processor_id(); 1849 struct tile_net_cpu *info = priv->cpu[my_cpu]; 1850 struct tile_net_stats_t *stats = &info->stats; 1851 1852 unsigned long irqflags; 1853 1854 struct skb_shared_info *sh = skb_shinfo(skb); 1855 1856 unsigned int len = skb->len; 1857 unsigned char *data = skb->data; 1858 1859 unsigned int csum_start = skb_checksum_start_offset(skb); 1860 1861 lepp_frag_t frags[1 + MAX_SKB_FRAGS]; 1862 1863 unsigned int num_frags; 1864 1865 lepp_queue_t *eq = priv->eq; 1866 1867 struct sk_buff *olds[8]; 1868 unsigned int wanted = 8; 1869 unsigned int i, nolds = 0; 1870 1871 unsigned int cmd_size = sizeof(lepp_cmd_t); 1872 1873 unsigned int cmd_head, cmd_tail, cmd_next; 1874 unsigned int comp_tail; 1875 1876 lepp_cmd_t cmds[1 + MAX_SKB_FRAGS]; 1877 1878 1879 /* 1880 * This is paranoia, since we think that if the link doesn't come 1881 * up, telling Linux we have no carrier will keep it from trying 1882 * to transmit. If it does, though, we can't execute this routine, 1883 * since data structures we depend on aren't set up yet. 1884 */ 1885 if (!info->registered) 1886 return NETDEV_TX_BUSY; 1887 1888 1889 /* Save the timestamp. */ 1890 dev->trans_start = jiffies; 1891 1892 1893#ifdef TILE_NET_PARANOIA 1894#if CHIP_HAS_CBOX_HOME_MAP() 1895 if (hash_default) { 1896 HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)data); 1897 if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3) 1898 panic("Non-HFH egress buffer! VA=%p Mode=%d PTE=%llx", 1899 data, hv_pte_get_mode(pte), hv_pte_val(pte)); 1900 } 1901#endif 1902#endif 1903 1904 1905#ifdef TILE_NET_DUMP_PACKETS 1906 /* ISSUE: Does not dump the "frags". */ 1907 dump_packet(data, skb_headlen(skb), "tx"); 1908#endif /* TILE_NET_DUMP_PACKETS */ 1909 1910 1911 if (sh->gso_size != 0) 1912 return tile_net_tx_tso(skb, dev); 1913 1914 1915 /* Prepare the commands. */ 1916 1917 num_frags = tile_net_tx_frags(frags, skb, data, skb_headlen(skb)); 1918 1919 for (i = 0; i < num_frags; i++) { 1920 1921 bool final = (i == num_frags - 1); 1922 1923 lepp_cmd_t cmd = { 1924 .cpa_lo = frags[i].cpa_lo, 1925 .cpa_hi = frags[i].cpa_hi, 1926 .length = frags[i].length, 1927 .hash_for_home = frags[i].hash_for_home, 1928 .send_completion = final, 1929 .end_of_packet = final 1930 }; 1931 1932 if (i == 0 && skb->ip_summed == CHECKSUM_PARTIAL) { 1933 cmd.compute_checksum = 1; 1934 cmd.checksum_data.bits.start_byte = csum_start; 1935 cmd.checksum_data.bits.count = len - csum_start; 1936 cmd.checksum_data.bits.destination_byte = 1937 csum_start + skb->csum_offset; 1938 } 1939 1940 cmds[i] = cmd; 1941 } 1942 1943 1944 /* Prefetch and wait, to minimize time spent holding the spinlock. */ 1945 prefetch_L1(&eq->comp_tail); 1946 prefetch_L1(&eq->cmd_tail); 1947 mb(); 1948 1949 1950 /* Enqueue the commands. */ 1951 1952 spin_lock_irqsave(&priv->eq_lock, irqflags); 1953 1954 /* Handle completions if needed to make room. */ 1955 /* NOTE: Return NETDEV_TX_BUSY if there is still no room. */ 1956 if (lepp_num_free_comp_slots(eq) == 0) { 1957 nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 0); 1958 if (nolds == 0) { 1959busy: 1960 spin_unlock_irqrestore(&priv->eq_lock, irqflags); 1961 return NETDEV_TX_BUSY; 1962 } 1963 } 1964 1965 cmd_head = eq->cmd_head; 1966 cmd_tail = eq->cmd_tail; 1967 1968 /* Copy the commands, or fail. */ 1969 /* NOTE: Return NETDEV_TX_BUSY if the queue is full. */ 1970 for (i = 0; i < num_frags; i++) { 1971 1972 /* Prepare to advance, detecting full queue. */ 1973 cmd_next = cmd_tail + cmd_size; 1974 if (cmd_tail < cmd_head && cmd_next >= cmd_head) 1975 goto busy; 1976 if (cmd_next > LEPP_CMD_LIMIT) { 1977 cmd_next = 0; 1978 if (cmd_next == cmd_head) 1979 goto busy; 1980 } 1981 1982 /* Copy the command. */ 1983 *(lepp_cmd_t *)&eq->cmds[cmd_tail] = cmds[i]; 1984 1985 /* Advance. */ 1986 cmd_tail = cmd_next; 1987 } 1988 1989 /* Record "skb" for eventual freeing. */ 1990 comp_tail = eq->comp_tail; 1991 eq->comps[comp_tail] = skb; 1992 LEPP_QINC(comp_tail); 1993 eq->comp_tail = comp_tail; 1994 1995 /* Flush before allowing LEPP to handle the command. */ 1996 /* ISSUE: Is this the optimal location for the flush? */ 1997 __insn_mf(); 1998 1999 eq->cmd_tail = cmd_tail; 2000 2001 /* NOTE: Using "4" here is more efficient than "0" or "2", */ 2002 /* and, strangely, more efficient than pre-checking the number */ 2003 /* of available completions, and comparing it to 4. */ 2004 if (nolds == 0) 2005 nolds = tile_net_lepp_grab_comps(eq, olds, wanted, 4); 2006 2007 spin_unlock_irqrestore(&priv->eq_lock, irqflags); 2008 2009 /* Handle completions. */ 2010 for (i = 0; i < nolds; i++) 2011 dev_consume_skb_any(olds[i]); 2012 2013 /* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */ 2014 u64_stats_update_begin(&stats->syncp); 2015 stats->tx_packets++; 2016 stats->tx_bytes += ((len >= ETH_ZLEN) ? len : ETH_ZLEN); 2017 u64_stats_update_end(&stats->syncp); 2018 2019 /* Make sure the egress timer is scheduled. */ 2020 tile_net_schedule_egress_timer(info); 2021 2022 return NETDEV_TX_OK; 2023} 2024 2025 2026/* 2027 * Deal with a transmit timeout. 2028 */ 2029static void tile_net_tx_timeout(struct net_device *dev) 2030{ 2031 PDEBUG("tile_net_tx_timeout()\n"); 2032 PDEBUG("Transmit timeout at %ld, latency %ld\n", jiffies, 2033 jiffies - dev->trans_start); 2034 2035 /* XXX: ISSUE: This doesn't seem useful for us. */ 2036 netif_wake_queue(dev); 2037} 2038 2039 2040/* 2041 * Ioctl commands. 2042 */ 2043static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2044{ 2045 return -EOPNOTSUPP; 2046} 2047 2048 2049/* 2050 * Get System Network Statistics. 2051 * 2052 * Returns the address of the device statistics structure. 2053 */ 2054static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev, 2055 struct rtnl_link_stats64 *stats) 2056{ 2057 struct tile_net_priv *priv = netdev_priv(dev); 2058 u64 rx_packets = 0, tx_packets = 0; 2059 u64 rx_bytes = 0, tx_bytes = 0; 2060 u64 rx_errors = 0, rx_dropped = 0; 2061 int i; 2062 2063 for_each_online_cpu(i) { 2064 struct tile_net_stats_t *cpu_stats; 2065 u64 trx_packets, ttx_packets, trx_bytes, ttx_bytes; 2066 u64 trx_errors, trx_dropped; 2067 unsigned int start; 2068 2069 if (priv->cpu[i] == NULL) 2070 continue; 2071 cpu_stats = &priv->cpu[i]->stats; 2072 2073 do { 2074 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp); 2075 trx_packets = cpu_stats->rx_packets; 2076 ttx_packets = cpu_stats->tx_packets; 2077 trx_bytes = cpu_stats->rx_bytes; 2078 ttx_bytes = cpu_stats->tx_bytes; 2079 trx_errors = cpu_stats->rx_errors; 2080 trx_dropped = cpu_stats->rx_dropped; 2081 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start)); 2082 2083 rx_packets += trx_packets; 2084 tx_packets += ttx_packets; 2085 rx_bytes += trx_bytes; 2086 tx_bytes += ttx_bytes; 2087 rx_errors += trx_errors; 2088 rx_dropped += trx_dropped; 2089 } 2090 2091 stats->rx_packets = rx_packets; 2092 stats->tx_packets = tx_packets; 2093 stats->rx_bytes = rx_bytes; 2094 stats->tx_bytes = tx_bytes; 2095 stats->rx_errors = rx_errors; 2096 stats->rx_dropped = rx_dropped; 2097 2098 return stats; 2099} 2100 2101 2102/* 2103 * Change the "mtu". 2104 * 2105 * The "change_mtu" method is usually not needed. 2106 * If you need it, it must be like this. 2107 */ 2108static int tile_net_change_mtu(struct net_device *dev, int new_mtu) 2109{ 2110 PDEBUG("tile_net_change_mtu()\n"); 2111 2112 /* Check ranges. */ 2113 if ((new_mtu < 68) || (new_mtu > 1500)) 2114 return -EINVAL; 2115 2116 /* Accept the value. */ 2117 dev->mtu = new_mtu; 2118 2119 return 0; 2120} 2121 2122 2123/* 2124 * Change the Ethernet Address of the NIC. 2125 * 2126 * The hypervisor driver does not support changing MAC address. However, 2127 * the IPP does not do anything with the MAC address, so the address which 2128 * gets used on outgoing packets, and which is accepted on incoming packets, 2129 * is completely up to the NetIO program or kernel driver which is actually 2130 * handling them. 2131 * 2132 * Returns 0 on success, negative on failure. 2133 */ 2134static int tile_net_set_mac_address(struct net_device *dev, void *p) 2135{ 2136 struct sockaddr *addr = p; 2137 2138 if (!is_valid_ether_addr(addr->sa_data)) 2139 return -EADDRNOTAVAIL; 2140 2141 /* ISSUE: Note that "dev_addr" is now a pointer. */ 2142 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 2143 2144 return 0; 2145} 2146 2147 2148/* 2149 * Obtain the MAC address from the hypervisor. 2150 * This must be done before opening the device. 2151 */ 2152static int tile_net_get_mac(struct net_device *dev) 2153{ 2154 struct tile_net_priv *priv = netdev_priv(dev); 2155 2156 char hv_dev_name[32]; 2157 int len; 2158 2159 __netio_getset_offset_t offset = { .word = NETIO_IPP_PARAM_OFF }; 2160 2161 int ret; 2162 2163 /* For example, "xgbe0". */ 2164 strcpy(hv_dev_name, dev->name); 2165 len = strlen(hv_dev_name); 2166 2167 /* For example, "xgbe/0". */ 2168 hv_dev_name[len] = hv_dev_name[len - 1]; 2169 hv_dev_name[len - 1] = '/'; 2170 len++; 2171 2172 /* For example, "xgbe/0/native_hash". */ 2173 strcpy(hv_dev_name + len, hash_default ? "/native_hash" : "/native"); 2174 2175 /* Get the hypervisor handle for this device. */ 2176 priv->hv_devhdl = hv_dev_open((HV_VirtAddr)hv_dev_name, 0); 2177 PDEBUG("hv_dev_open(%s) returned %d %p\n", 2178 hv_dev_name, priv->hv_devhdl, &priv->hv_devhdl); 2179 if (priv->hv_devhdl < 0) { 2180 if (priv->hv_devhdl == HV_ENODEV) 2181 printk(KERN_DEBUG "Ignoring unconfigured device %s\n", 2182 hv_dev_name); 2183 else 2184 printk(KERN_DEBUG "hv_dev_open(%s) returned %d\n", 2185 hv_dev_name, priv->hv_devhdl); 2186 return -1; 2187 } 2188 2189 /* 2190 * Read the hardware address from the hypervisor. 2191 * ISSUE: Note that "dev_addr" is now a pointer. 2192 */ 2193 offset.bits.class = NETIO_PARAM; 2194 offset.bits.addr = NETIO_PARAM_MAC; 2195 ret = hv_dev_pread(priv->hv_devhdl, 0, 2196 (HV_VirtAddr)dev->dev_addr, dev->addr_len, 2197 offset.word); 2198 PDEBUG("hv_dev_pread(NETIO_PARAM_MAC) returned %d\n", ret); 2199 if (ret <= 0) { 2200 printk(KERN_DEBUG "hv_dev_pread(NETIO_PARAM_MAC) %s failed\n", 2201 dev->name); 2202 /* 2203 * Since the device is configured by the hypervisor but we 2204 * can't get its MAC address, we are most likely running 2205 * the simulator, so let's generate a random MAC address. 2206 */ 2207 eth_hw_addr_random(dev); 2208 } 2209 2210 return 0; 2211} 2212 2213 2214#ifdef CONFIG_NET_POLL_CONTROLLER 2215/* 2216 * Polling 'interrupt' - used by things like netconsole to send skbs 2217 * without having to re-enable interrupts. It's not called while 2218 * the interrupt routine is executing. 2219 */ 2220static void tile_net_netpoll(struct net_device *dev) 2221{ 2222 struct tile_net_priv *priv = netdev_priv(dev); 2223 disable_percpu_irq(priv->intr_id); 2224 tile_net_handle_ingress_interrupt(priv->intr_id, dev); 2225 enable_percpu_irq(priv->intr_id, 0); 2226} 2227#endif 2228 2229 2230static const struct net_device_ops tile_net_ops = { 2231 .ndo_open = tile_net_open, 2232 .ndo_stop = tile_net_stop, 2233 .ndo_start_xmit = tile_net_tx, 2234 .ndo_do_ioctl = tile_net_ioctl, 2235 .ndo_get_stats64 = tile_net_get_stats64, 2236 .ndo_change_mtu = tile_net_change_mtu, 2237 .ndo_tx_timeout = tile_net_tx_timeout, 2238 .ndo_set_mac_address = tile_net_set_mac_address, 2239#ifdef CONFIG_NET_POLL_CONTROLLER 2240 .ndo_poll_controller = tile_net_netpoll, 2241#endif 2242}; 2243 2244 2245/* 2246 * The setup function. 2247 * 2248 * This uses ether_setup() to assign various fields in dev, including 2249 * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields. 2250 */ 2251static void tile_net_setup(struct net_device *dev) 2252{ 2253 netdev_features_t features = 0; 2254 2255 ether_setup(dev); 2256 dev->netdev_ops = &tile_net_ops; 2257 dev->watchdog_timeo = TILE_NET_TIMEOUT; 2258 dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN; 2259 dev->mtu = TILE_NET_MTU; 2260 2261 features |= NETIF_F_HW_CSUM; 2262 features |= NETIF_F_SG; 2263 2264 /* We support TSO iff the HV supports sufficient frags. */ 2265 if (LEPP_MAX_FRAGS >= 1 + MAX_SKB_FRAGS) 2266 features |= NETIF_F_TSO; 2267 2268 /* We can't support HIGHDMA without hash_default, since we need 2269 * to be able to finv() with a VA if we don't have hash_default. 2270 */ 2271 if (hash_default) 2272 features |= NETIF_F_HIGHDMA; 2273 2274 dev->hw_features |= features; 2275 dev->vlan_features |= features; 2276 dev->features |= features; 2277} 2278 2279 2280/* 2281 * Allocate the device structure, register the device, and obtain the 2282 * MAC address from the hypervisor. 2283 */ 2284static struct net_device *tile_net_dev_init(const char *name) 2285{ 2286 int ret; 2287 struct net_device *dev; 2288 struct tile_net_priv *priv; 2289 2290 /* 2291 * Allocate the device structure. This allocates "priv", calls 2292 * tile_net_setup(), and saves "name". Normally, "name" is a 2293 * template, instantiated by register_netdev(), but not for us. 2294 */ 2295 dev = alloc_netdev(sizeof(*priv), name, NET_NAME_UNKNOWN, 2296 tile_net_setup); 2297 if (!dev) { 2298 pr_err("alloc_netdev(%s) failed\n", name); 2299 return NULL; 2300 } 2301 2302 priv = netdev_priv(dev); 2303 2304 /* Initialize "priv". */ 2305 2306 memset(priv, 0, sizeof(*priv)); 2307 2308 /* Save "dev" for "tile_net_open_retry()". */ 2309 priv->dev = dev; 2310 2311 INIT_DELAYED_WORK(&priv->retry_work, tile_net_open_retry); 2312 2313 spin_lock_init(&priv->eq_lock); 2314 2315 /* Allocate "eq". */ 2316 priv->eq_pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, EQ_ORDER); 2317 if (!priv->eq_pages) { 2318 free_netdev(dev); 2319 return NULL; 2320 } 2321 priv->eq = page_address(priv->eq_pages); 2322 2323 /* Register the network device. */ 2324 ret = register_netdev(dev); 2325 if (ret) { 2326 pr_err("register_netdev %s failed %d\n", dev->name, ret); 2327 __free_pages(priv->eq_pages, EQ_ORDER); 2328 free_netdev(dev); 2329 return NULL; 2330 } 2331 2332 /* Get the MAC address. */ 2333 ret = tile_net_get_mac(dev); 2334 if (ret < 0) { 2335 unregister_netdev(dev); 2336 __free_pages(priv->eq_pages, EQ_ORDER); 2337 free_netdev(dev); 2338 return NULL; 2339 } 2340 2341 return dev; 2342} 2343 2344 2345/* 2346 * Module cleanup. 2347 * 2348 * FIXME: If compiled as a module, this module cannot be "unloaded", 2349 * because the "ingress interrupt handler" is registered permanently. 2350 */ 2351static void tile_net_cleanup(void) 2352{ 2353 int i; 2354 2355 for (i = 0; i < TILE_NET_DEVS; i++) { 2356 if (tile_net_devs[i]) { 2357 struct net_device *dev = tile_net_devs[i]; 2358 struct tile_net_priv *priv = netdev_priv(dev); 2359 unregister_netdev(dev); 2360 finv_buffer_remote(priv->eq, EQ_SIZE, 0); 2361 __free_pages(priv->eq_pages, EQ_ORDER); 2362 free_netdev(dev); 2363 } 2364 } 2365} 2366 2367 2368/* 2369 * Module initialization. 2370 */ 2371static int tile_net_init_module(void) 2372{ 2373 pr_info("Tilera Network Driver\n"); 2374 2375 tile_net_devs[0] = tile_net_dev_init("xgbe0"); 2376 tile_net_devs[1] = tile_net_dev_init("xgbe1"); 2377 tile_net_devs[2] = tile_net_dev_init("gbe0"); 2378 tile_net_devs[3] = tile_net_dev_init("gbe1"); 2379 2380 return 0; 2381} 2382 2383 2384module_init(tile_net_init_module); 2385module_exit(tile_net_cleanup); 2386 2387 2388#ifndef MODULE 2389 2390/* 2391 * The "network_cpus" boot argument specifies the cpus that are dedicated 2392 * to handle ingress packets. 2393 * 2394 * The parameter should be in the form "network_cpus=m-n[,x-y]", where 2395 * m, n, x, y are integer numbers that represent the cpus that can be 2396 * neither a dedicated cpu nor a dataplane cpu. 2397 */ 2398static int __init network_cpus_setup(char *str) 2399{ 2400 int rc = cpulist_parse_crop(str, &network_cpus_map); 2401 if (rc != 0) { 2402 pr_warn("network_cpus=%s: malformed cpu list\n", str); 2403 } else { 2404 2405 /* Remove dedicated cpus. */ 2406 cpumask_and(&network_cpus_map, &network_cpus_map, 2407 cpu_possible_mask); 2408 2409 2410 if (cpumask_empty(&network_cpus_map)) { 2411 pr_warn("Ignoring network_cpus='%s'\n", str); 2412 } else { 2413 pr_info("Linux network CPUs: %*pbl\n", 2414 cpumask_pr_args(&network_cpus_map)); 2415 network_cpus_used = true; 2416 } 2417 } 2418 2419 return 0; 2420} 2421__setup("network_cpus=", network_cpus_setup); 2422 2423#endif 2424