root/drivers/net/ethernet/cavium/liquidio/octeon_network.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. recv_buffer_alloc
  2. recv_buffer_fast_alloc
  3. recv_buffer_recycle
  4. recv_buffer_reuse
  5. recv_buffer_destroy
  6. recv_buffer_free
  7. recv_buffer_fast_free
  8. tx_buffer_free
  9. get_rbd
  10. lio_map_ring
  11. lio_unmap_ring
  12. octeon_fast_packet_alloc
  13. octeon_fast_packet_next
  14. ifstate_check
  15. ifstate_set
  16. ifstate_reset
  17. wait_for_pending_requests
  18. stop_txqs
  19. wake_txqs
  20. start_txqs
  21. skb_iq
  22. lio_list_delete_head

   1 /**********************************************************************
   2  * Author: Cavium, Inc.
   3  *
   4  * Contact: support@cavium.com
   5  *          Please include "LiquidIO" in the subject.
   6  *
   7  * Copyright (c) 2003-2016 Cavium, Inc.
   8  *
   9  * This file is free software; you can redistribute it and/or modify
  10  * it under the terms of the GNU General Public License, Version 2, as
  11  * published by the Free Software Foundation.
  12  *
  13  * This file is distributed in the hope that it will be useful, but
  14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16  * NONINFRINGEMENT.  See the GNU General Public License for more
  17  * details.
  18  **********************************************************************/
  19 
  20 /*!  \file  octeon_network.h
  21  *   \brief Host NIC Driver: Structure and Macro definitions used by NIC Module.
  22  */
  23 
  24 #ifndef __OCTEON_NETWORK_H__
  25 #define __OCTEON_NETWORK_H__
  26 #include <linux/ptp_clock_kernel.h>
  27 
  28 #define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)
  29 #define LIO_MIN_MTU_SIZE ETH_MIN_MTU
  30 
  31 /* Bit mask values for lio->ifstate */
  32 #define   LIO_IFSTATE_DROQ_OPS             0x01
  33 #define   LIO_IFSTATE_REGISTERED           0x02
  34 #define   LIO_IFSTATE_RUNNING              0x04
  35 #define   LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
  36 #define   LIO_IFSTATE_RESETTING            0x10
  37 
  38 struct liquidio_if_cfg_resp {
  39         u64 rh;
  40         struct liquidio_if_cfg_info cfg_info;
  41         u64 status;
  42 };
  43 
  44 #define LIO_IFCFG_WAIT_TIME    3000 /* In milli seconds */
  45 #define LIQUIDIO_NDEV_STATS_POLL_TIME_MS 200
  46 
  47 /* Structure of a node in list of gather components maintained by
  48  * NIC driver for each network device.
  49  */
  50 struct octnic_gather {
  51         /* List manipulation. Next and prev pointers. */
  52         struct list_head list;
  53 
  54         /* Size of the gather component at sg in bytes. */
  55         int sg_size;
  56 
  57         /* Number of bytes that sg was adjusted to make it 8B-aligned. */
  58         int adjust;
  59 
  60         /* Gather component that can accommodate max sized fragment list
  61          * received from the IP layer.
  62          */
  63         struct octeon_sg_entry *sg;
  64 
  65         dma_addr_t sg_dma_ptr;
  66 };
  67 
  68 struct oct_nic_stats_resp {
  69         u64     rh;
  70         struct oct_link_stats stats;
  71         u64     status;
  72 };
  73 
  74 struct oct_nic_vf_stats_resp {
  75         u64     rh;
  76         u64     spoofmac_cnt;
  77         u64     status;
  78 };
  79 
  80 struct oct_nic_stats_ctrl {
  81         struct completion complete;
  82         struct net_device *netdev;
  83 };
  84 
  85 struct oct_nic_seapi_resp {
  86         u64 rh;
  87         union {
  88                 u32 fec_setting;
  89                 u32 speed;
  90         };
  91         u64 status;
  92 };
  93 
  94 /** LiquidIO per-interface network private data */
  95 struct lio {
  96         /** State of the interface. Rx/Tx happens only in the RUNNING state.  */
  97         atomic_t ifstate;
  98 
  99         /** Octeon Interface index number. This device will be represented as
 100          *  oct<ifidx> in the system.
 101          */
 102         int ifidx;
 103 
 104         /** Octeon Input queue to use to transmit for this network interface. */
 105         int txq;
 106 
 107         /** Octeon Output queue from which pkts arrive
 108          * for this network interface.
 109          */
 110         int rxq;
 111 
 112         /** Guards each glist */
 113         spinlock_t *glist_lock;
 114 
 115         /** Array of gather component linked lists */
 116         struct list_head *glist;
 117         void **glists_virt_base;
 118         dma_addr_t *glists_dma_base;
 119         u32 glist_entry_size;
 120 
 121         /** Pointer to the NIC properties for the Octeon device this network
 122          *  interface is associated with.
 123          */
 124         struct octdev_props *octprops;
 125 
 126         /** Pointer to the octeon device structure. */
 127         struct octeon_device *oct_dev;
 128 
 129         struct net_device *netdev;
 130 
 131         /** Link information sent by the core application for this interface. */
 132         struct oct_link_info linfo;
 133 
 134         /** counter of link changes */
 135         u64 link_changes;
 136 
 137         /** Size of Tx queue for this octeon device. */
 138         u32 tx_qsize;
 139 
 140         /** Size of Rx queue for this octeon device. */
 141         u32 rx_qsize;
 142 
 143         /** Size of MTU this octeon device. */
 144         u32 mtu;
 145 
 146         /** msg level flag per interface. */
 147         u32 msg_enable;
 148 
 149         /** Copy of Interface capabilities: TSO, TSO6, LRO, Chescksums . */
 150         u64 dev_capability;
 151 
 152         /* Copy of transmit encapsulation capabilities:
 153          * TSO, TSO6, Checksums for this device for Kernel
 154          * 3.10.0 onwards
 155          */
 156         u64 enc_dev_capability;
 157 
 158         /** Copy of beacaon reg in phy */
 159         u32 phy_beacon_val;
 160 
 161         /** Copy of ctrl reg in phy */
 162         u32 led_ctrl_val;
 163 
 164         /* PTP clock information */
 165         struct ptp_clock_info ptp_info;
 166         struct ptp_clock *ptp_clock;
 167         s64 ptp_adjust;
 168 
 169         /* for atomic access to Octeon PTP reg and data struct */
 170         spinlock_t ptp_lock;
 171 
 172         /* Interface info */
 173         u32     intf_open;
 174 
 175         /* work queue for  txq status */
 176         struct cavium_wq        txq_status_wq;
 177 
 178         /* work queue for  rxq oom status */
 179         struct cavium_wq rxq_status_wq[MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES];
 180 
 181         /* work queue for  link status */
 182         struct cavium_wq        link_status_wq;
 183 
 184         /* work queue to regularly send local time to octeon firmware */
 185         struct cavium_wq        sync_octeon_time_wq;
 186 
 187         int netdev_uc_count;
 188         struct cavium_wk stats_wk;
 189 };
 190 
 191 #define LIO_SIZE         (sizeof(struct lio))
 192 #define GET_LIO(netdev)  ((struct lio *)netdev_priv(netdev))
 193 
 194 #define LIO_MAX_CORES                16
 195 
 196 /**
 197  * \brief Enable or disable feature
 198  * @param netdev    pointer to network device
 199  * @param cmd       Command that just requires acknowledgment
 200  * @param param1    Parameter to command
 201  */
 202 int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1);
 203 
 204 int setup_rx_oom_poll_fn(struct net_device *netdev);
 205 
 206 void cleanup_rx_oom_poll_fn(struct net_device *netdev);
 207 
 208 /**
 209  * \brief Link control command completion callback
 210  * @param nctrl_ptr pointer to control packet structure
 211  *
 212  * This routine is called by the callback function when a ctrl pkt sent to
 213  * core app completes. The nctrl_ptr contains a copy of the command type
 214  * and data sent to the core app. This routine is only called if the ctrl
 215  * pkt was sent successfully to the core app.
 216  */
 217 void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr);
 218 
 219 int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
 220                              u32 num_iqs, u32 num_oqs);
 221 
 222 irqreturn_t liquidio_msix_intr_handler(int irq __attribute__((unused)),
 223                                        void *dev);
 224 
 225 int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs);
 226 
 227 void lio_fetch_stats(struct work_struct *work);
 228 
 229 int lio_wait_for_clean_oq(struct octeon_device *oct);
 230 /**
 231  * \brief Register ethtool operations
 232  * @param netdev    pointer to network device
 233  */
 234 void liquidio_set_ethtool_ops(struct net_device *netdev);
 235 
 236 void lio_delete_glists(struct lio *lio);
 237 
 238 int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_qs);
 239 
 240 int liquidio_get_speed(struct lio *lio);
 241 int liquidio_set_speed(struct lio *lio, int speed);
 242 int liquidio_get_fec(struct lio *lio);
 243 int liquidio_set_fec(struct lio *lio, int on_off);
 244 
 245 /**
 246  * \brief Net device change_mtu
 247  * @param netdev network device
 248  */
 249 int liquidio_change_mtu(struct net_device *netdev, int new_mtu);
 250 #define LIO_CHANGE_MTU_SUCCESS 1
 251 #define LIO_CHANGE_MTU_FAIL    2
 252 
 253 #define SKB_ADJ_MASK  0x3F
 254 #define SKB_ADJ       (SKB_ADJ_MASK + 1)
 255 
 256 #define MIN_SKB_SIZE       256 /* 8 bytes and more - 8 bytes for PTP */
 257 #define LIO_RXBUFFER_SZ    2048
 258 
 259 static inline void
 260 *recv_buffer_alloc(struct octeon_device *oct,
 261                    struct octeon_skb_page_info *pg_info)
 262 {
 263         struct page *page;
 264         struct sk_buff *skb;
 265         struct octeon_skb_page_info *skb_pg_info;
 266 
 267         page = alloc_page(GFP_ATOMIC);
 268         if (unlikely(!page))
 269                 return NULL;
 270 
 271         skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
 272         if (unlikely(!skb)) {
 273                 __free_page(page);
 274                 pg_info->page = NULL;
 275                 return NULL;
 276         }
 277 
 278         if ((unsigned long)skb->data & SKB_ADJ_MASK) {
 279                 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
 280 
 281                 skb_reserve(skb, r);
 282         }
 283 
 284         skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
 285         /* Get DMA info */
 286         pg_info->dma = dma_map_page(&oct->pci_dev->dev, page, 0,
 287                                     PAGE_SIZE, DMA_FROM_DEVICE);
 288 
 289         /* Mapping failed!! */
 290         if (dma_mapping_error(&oct->pci_dev->dev, pg_info->dma)) {
 291                 __free_page(page);
 292                 dev_kfree_skb_any((struct sk_buff *)skb);
 293                 pg_info->page = NULL;
 294                 return NULL;
 295         }
 296 
 297         pg_info->page = page;
 298         pg_info->page_offset = 0;
 299         skb_pg_info->page = page;
 300         skb_pg_info->page_offset = 0;
 301         skb_pg_info->dma = pg_info->dma;
 302 
 303         return (void *)skb;
 304 }
 305 
 306 static inline void
 307 *recv_buffer_fast_alloc(u32 size)
 308 {
 309         struct sk_buff *skb;
 310         struct octeon_skb_page_info *skb_pg_info;
 311 
 312         skb = dev_alloc_skb(size + SKB_ADJ);
 313         if (unlikely(!skb))
 314                 return NULL;
 315 
 316         if ((unsigned long)skb->data & SKB_ADJ_MASK) {
 317                 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
 318 
 319                 skb_reserve(skb, r);
 320         }
 321 
 322         skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
 323         skb_pg_info->page = NULL;
 324         skb_pg_info->page_offset = 0;
 325         skb_pg_info->dma = 0;
 326 
 327         return skb;
 328 }
 329 
 330 static inline int
 331 recv_buffer_recycle(struct octeon_device *oct, void *buf)
 332 {
 333         struct octeon_skb_page_info *pg_info = buf;
 334 
 335         if (!pg_info->page) {
 336                 dev_err(&oct->pci_dev->dev, "%s: pg_info->page NULL\n",
 337                         __func__);
 338                 return -ENOMEM;
 339         }
 340 
 341         if (unlikely(page_count(pg_info->page) != 1) ||
 342             unlikely(page_to_nid(pg_info->page) != numa_node_id())) {
 343                 dma_unmap_page(&oct->pci_dev->dev,
 344                                pg_info->dma, (PAGE_SIZE << 0),
 345                                DMA_FROM_DEVICE);
 346                 pg_info->dma = 0;
 347                 pg_info->page = NULL;
 348                 pg_info->page_offset = 0;
 349                 return -ENOMEM;
 350         }
 351 
 352         /* Flip to other half of the buffer */
 353         if (pg_info->page_offset == 0)
 354                 pg_info->page_offset = LIO_RXBUFFER_SZ;
 355         else
 356                 pg_info->page_offset = 0;
 357         page_ref_inc(pg_info->page);
 358 
 359         return 0;
 360 }
 361 
 362 static inline void
 363 *recv_buffer_reuse(struct octeon_device *oct, void *buf)
 364 {
 365         struct octeon_skb_page_info *pg_info = buf, *skb_pg_info;
 366         struct sk_buff *skb;
 367 
 368         skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
 369         if (unlikely(!skb)) {
 370                 dma_unmap_page(&oct->pci_dev->dev,
 371                                pg_info->dma, (PAGE_SIZE << 0),
 372                                DMA_FROM_DEVICE);
 373                 return NULL;
 374         }
 375 
 376         if ((unsigned long)skb->data & SKB_ADJ_MASK) {
 377                 u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
 378 
 379                 skb_reserve(skb, r);
 380         }
 381 
 382         skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
 383         skb_pg_info->page = pg_info->page;
 384         skb_pg_info->page_offset = pg_info->page_offset;
 385         skb_pg_info->dma = pg_info->dma;
 386 
 387         return skb;
 388 }
 389 
 390 static inline void
 391 recv_buffer_destroy(void *buffer, struct octeon_skb_page_info *pg_info)
 392 {
 393         struct sk_buff *skb = (struct sk_buff *)buffer;
 394 
 395         put_page(pg_info->page);
 396         pg_info->dma = 0;
 397         pg_info->page = NULL;
 398         pg_info->page_offset = 0;
 399 
 400         if (skb)
 401                 dev_kfree_skb_any(skb);
 402 }
 403 
 404 static inline void recv_buffer_free(void *buffer)
 405 {
 406         struct sk_buff *skb = (struct sk_buff *)buffer;
 407         struct octeon_skb_page_info *pg_info;
 408 
 409         pg_info = ((struct octeon_skb_page_info *)(skb->cb));
 410 
 411         if (pg_info->page) {
 412                 put_page(pg_info->page);
 413                 pg_info->dma = 0;
 414                 pg_info->page = NULL;
 415                 pg_info->page_offset = 0;
 416         }
 417 
 418         dev_kfree_skb_any((struct sk_buff *)buffer);
 419 }
 420 
 421 static inline void
 422 recv_buffer_fast_free(void *buffer)
 423 {
 424         dev_kfree_skb_any((struct sk_buff *)buffer);
 425 }
 426 
 427 static inline void tx_buffer_free(void *buffer)
 428 {
 429         dev_kfree_skb_any((struct sk_buff *)buffer);
 430 }
 431 
 432 #define lio_dma_alloc(oct, size, dma_addr) \
 433         dma_alloc_coherent(&(oct)->pci_dev->dev, size, dma_addr, GFP_KERNEL)
 434 #define lio_dma_free(oct, size, virt_addr, dma_addr) \
 435         dma_free_coherent(&(oct)->pci_dev->dev, size, virt_addr, dma_addr)
 436 
 437 static inline
 438 void *get_rbd(struct sk_buff *skb)
 439 {
 440         struct octeon_skb_page_info *pg_info;
 441         unsigned char *va;
 442 
 443         pg_info = ((struct octeon_skb_page_info *)(skb->cb));
 444         va = page_address(pg_info->page) + pg_info->page_offset;
 445 
 446         return va;
 447 }
 448 
 449 static inline u64
 450 lio_map_ring(void *buf)
 451 {
 452         dma_addr_t dma_addr;
 453 
 454         struct sk_buff *skb = (struct sk_buff *)buf;
 455         struct octeon_skb_page_info *pg_info;
 456 
 457         pg_info = ((struct octeon_skb_page_info *)(skb->cb));
 458         if (!pg_info->page) {
 459                 pr_err("%s: pg_info->page NULL\n", __func__);
 460                 WARN_ON(1);
 461         }
 462 
 463         /* Get DMA info */
 464         dma_addr = pg_info->dma;
 465         if (!pg_info->dma) {
 466                 pr_err("%s: ERROR it should be already available\n",
 467                        __func__);
 468                 WARN_ON(1);
 469         }
 470         dma_addr += pg_info->page_offset;
 471 
 472         return (u64)dma_addr;
 473 }
 474 
 475 static inline void
 476 lio_unmap_ring(struct pci_dev *pci_dev,
 477                u64 buf_ptr)
 478 
 479 {
 480         dma_unmap_page(&pci_dev->dev,
 481                        buf_ptr, (PAGE_SIZE << 0),
 482                        DMA_FROM_DEVICE);
 483 }
 484 
 485 static inline void *octeon_fast_packet_alloc(u32 size)
 486 {
 487         return recv_buffer_fast_alloc(size);
 488 }
 489 
 490 static inline void octeon_fast_packet_next(struct octeon_droq *droq,
 491                                            struct sk_buff *nicbuf,
 492                                            int copy_len,
 493                                            int idx)
 494 {
 495         skb_put_data(nicbuf, get_rbd(droq->recv_buf_list[idx].buffer),
 496                      copy_len);
 497 }
 498 
 499 /**
 500  * \brief check interface state
 501  * @param lio per-network private data
 502  * @param state_flag flag state to check
 503  */
 504 static inline int ifstate_check(struct lio *lio, int state_flag)
 505 {
 506         return atomic_read(&lio->ifstate) & state_flag;
 507 }
 508 
 509 /**
 510  * \brief set interface state
 511  * @param lio per-network private data
 512  * @param state_flag flag state to set
 513  */
 514 static inline void ifstate_set(struct lio *lio, int state_flag)
 515 {
 516         atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
 517 }
 518 
 519 /**
 520  * \brief clear interface state
 521  * @param lio per-network private data
 522  * @param state_flag flag state to clear
 523  */
 524 static inline void ifstate_reset(struct lio *lio, int state_flag)
 525 {
 526         atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
 527 }
 528 
 529 /**
 530  * \brief wait for all pending requests to complete
 531  * @param oct Pointer to Octeon device
 532  *
 533  * Called during shutdown sequence
 534  */
 535 static inline int wait_for_pending_requests(struct octeon_device *oct)
 536 {
 537         int i, pcount = 0;
 538 
 539         for (i = 0; i < MAX_IO_PENDING_PKT_COUNT; i++) {
 540                 pcount = atomic_read(
 541                     &oct->response_list[OCTEON_ORDERED_SC_LIST]
 542                          .pending_req_count);
 543                 if (pcount)
 544                         schedule_timeout_uninterruptible(HZ / 10);
 545                 else
 546                         break;
 547         }
 548 
 549         if (pcount)
 550                 return 1;
 551 
 552         return 0;
 553 }
 554 
 555 /**
 556  * \brief Stop Tx queues
 557  * @param netdev network device
 558  */
 559 static inline void stop_txqs(struct net_device *netdev)
 560 {
 561         int i;
 562 
 563         for (i = 0; i < netdev->real_num_tx_queues; i++)
 564                 netif_stop_subqueue(netdev, i);
 565 }
 566 
 567 /**
 568  * \brief Wake Tx queues
 569  * @param netdev network device
 570  */
 571 static inline void wake_txqs(struct net_device *netdev)
 572 {
 573         struct lio *lio = GET_LIO(netdev);
 574         int i, qno;
 575 
 576         for (i = 0; i < netdev->real_num_tx_queues; i++) {
 577                 qno = lio->linfo.txpciq[i % lio->oct_dev->num_iqs].s.q_no;
 578 
 579                 if (__netif_subqueue_stopped(netdev, i)) {
 580                         INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
 581                                                   tx_restart, 1);
 582                         netif_wake_subqueue(netdev, i);
 583                 }
 584         }
 585 }
 586 
 587 /**
 588  * \brief Start Tx queues
 589  * @param netdev network device
 590  */
 591 static inline void start_txqs(struct net_device *netdev)
 592 {
 593         struct lio *lio = GET_LIO(netdev);
 594         int i;
 595 
 596         if (lio->linfo.link.s.link_up) {
 597                 for (i = 0; i < netdev->real_num_tx_queues; i++)
 598                         netif_start_subqueue(netdev, i);
 599         }
 600 }
 601 
 602 static inline int skb_iq(struct octeon_device *oct, struct sk_buff *skb)
 603 {
 604         return skb->queue_mapping % oct->num_iqs;
 605 }
 606 
 607 /**
 608  * Remove the node at the head of the list. The list would be empty at
 609  * the end of this call if there are no more nodes in the list.
 610  */
 611 static inline struct list_head *lio_list_delete_head(struct list_head *root)
 612 {
 613         struct list_head *node;
 614 
 615         if (root->prev == root && root->next == root)
 616                 node = NULL;
 617         else
 618                 node = root->next;
 619 
 620         if (node)
 621                 list_del(node);
 622 
 623         return node;
 624 }
 625 
 626 #endif

/* [<][>][^][v][top][bottom][index][help] */