root/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ixgbe_fcoe_clear_ddp
  2. ixgbe_fcoe_ddp_put
  3. ixgbe_fcoe_ddp_setup
  4. ixgbe_fcoe_ddp_get
  5. ixgbe_fcoe_ddp_target
  6. ixgbe_fcoe_ddp
  7. ixgbe_fso
  8. ixgbe_fcoe_dma_pool_free
  9. ixgbe_fcoe_dma_pool_alloc
  10. ixgbe_configure_fcoe
  11. ixgbe_free_fcoe_ddp_resources
  12. ixgbe_setup_fcoe_ddp_resources
  13. ixgbe_fcoe_ddp_enable
  14. ixgbe_fcoe_ddp_disable
  15. ixgbe_fcoe_enable
  16. ixgbe_fcoe_disable
  17. ixgbe_fcoe_get_wwn
  18. ixgbe_fcoe_get_hbainfo
  19. ixgbe_fcoe_get_tc

   1 // SPDX-License-Identifier: GPL-2.0
   2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
   3 
   4 #include "ixgbe.h"
   5 #include <linux/if_ether.h>
   6 #include <linux/gfp.h>
   7 #include <linux/if_vlan.h>
   8 #include <scsi/scsi_cmnd.h>
   9 #include <scsi/scsi_device.h>
  10 #include <scsi/fc/fc_fs.h>
  11 #include <scsi/fc/fc_fcoe.h>
  12 #include <scsi/libfc.h>
  13 #include <scsi/libfcoe.h>
  14 
  15 /**
  16  * ixgbe_fcoe_clear_ddp - clear the given ddp context
  17  * @ddp: ptr to the ixgbe_fcoe_ddp
  18  *
  19  * Returns : none
  20  *
  21  */
  22 static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp)
  23 {
  24         ddp->len = 0;
  25         ddp->err = 1;
  26         ddp->udl = NULL;
  27         ddp->udp = 0UL;
  28         ddp->sgl = NULL;
  29         ddp->sgc = 0;
  30 }
  31 
  32 /**
  33  * ixgbe_fcoe_ddp_put - free the ddp context for a given xid
  34  * @netdev: the corresponding net_device
  35  * @xid: the xid that corresponding ddp will be freed
  36  *
  37  * This is the implementation of net_device_ops.ndo_fcoe_ddp_done
  38  * and it is expected to be called by ULD, i.e., FCP layer of libfc
  39  * to release the corresponding ddp context when the I/O is done.
  40  *
  41  * Returns : data length already ddp-ed in bytes
  42  */
  43 int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
  44 {
  45         int len;
  46         struct ixgbe_fcoe *fcoe;
  47         struct ixgbe_adapter *adapter;
  48         struct ixgbe_fcoe_ddp *ddp;
  49         struct ixgbe_hw *hw;
  50         u32 fcbuff;
  51 
  52         if (!netdev)
  53                 return 0;
  54 
  55         if (xid >= netdev->fcoe_ddp_xid)
  56                 return 0;
  57 
  58         adapter = netdev_priv(netdev);
  59         fcoe = &adapter->fcoe;
  60         ddp = &fcoe->ddp[xid];
  61         if (!ddp->udl)
  62                 return 0;
  63 
  64         hw = &adapter->hw;
  65         len = ddp->len;
  66         /* if no error then skip ddp context invalidation */
  67         if (!ddp->err)
  68                 goto skip_ddpinv;
  69 
  70         if (hw->mac.type == ixgbe_mac_X550) {
  71                 /* X550 does not require DDP FCoE lock */
  72 
  73                 IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), 0);
  74                 IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid),
  75                                 (xid | IXGBE_FCFLTRW_WE));
  76 
  77                 /* program FCBUFF */
  78                 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), 0);
  79 
  80                 /* program FCDMARW */
  81                 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid),
  82                                 (xid | IXGBE_FCDMARW_WE));
  83 
  84                 /* read FCBUFF to check context invalidated */
  85                 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid),
  86                                 (xid | IXGBE_FCDMARW_RE));
  87                 fcbuff = IXGBE_READ_REG(hw, IXGBE_FCDDC(2, xid));
  88         } else {
  89                 /* other hardware requires DDP FCoE lock */
  90                 spin_lock_bh(&fcoe->lock);
  91                 IXGBE_WRITE_REG(hw, IXGBE_FCFLT, 0);
  92                 IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW,
  93                                 (xid | IXGBE_FCFLTRW_WE));
  94                 IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, 0);
  95                 IXGBE_WRITE_REG(hw, IXGBE_FCDMARW,
  96                                 (xid | IXGBE_FCDMARW_WE));
  97 
  98                 /* guaranteed to be invalidated after 100us */
  99                 IXGBE_WRITE_REG(hw, IXGBE_FCDMARW,
 100                                 (xid | IXGBE_FCDMARW_RE));
 101                 fcbuff = IXGBE_READ_REG(hw, IXGBE_FCBUFF);
 102                 spin_unlock_bh(&fcoe->lock);
 103                 }
 104 
 105         if (fcbuff & IXGBE_FCBUFF_VALID)
 106                 usleep_range(100, 150);
 107 
 108 skip_ddpinv:
 109         if (ddp->sgl)
 110                 dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc,
 111                              DMA_FROM_DEVICE);
 112         if (ddp->pool) {
 113                 dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
 114                 ddp->pool = NULL;
 115         }
 116 
 117         ixgbe_fcoe_clear_ddp(ddp);
 118 
 119         return len;
 120 }
 121 
 122 /**
 123  * ixgbe_fcoe_ddp_setup - called to set up ddp context
 124  * @netdev: the corresponding net_device
 125  * @xid: the exchange id requesting ddp
 126  * @sgl: the scatter-gather list for this request
 127  * @sgc: the number of scatter-gather items
 128  * @target_mode: 1 to setup target mode, 0 to setup initiator mode
 129  *
 130  * Returns : 1 for success and 0 for no ddp
 131  */
 132 static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
 133                                 struct scatterlist *sgl, unsigned int sgc,
 134                                 int target_mode)
 135 {
 136         struct ixgbe_adapter *adapter;
 137         struct ixgbe_hw *hw;
 138         struct ixgbe_fcoe *fcoe;
 139         struct ixgbe_fcoe_ddp *ddp;
 140         struct ixgbe_fcoe_ddp_pool *ddp_pool;
 141         struct scatterlist *sg;
 142         unsigned int i, j, dmacount;
 143         unsigned int len;
 144         static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
 145         unsigned int firstoff = 0;
 146         unsigned int lastsize;
 147         unsigned int thisoff = 0;
 148         unsigned int thislen = 0;
 149         u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
 150         dma_addr_t addr = 0;
 151 
 152         if (!netdev || !sgl)
 153                 return 0;
 154 
 155         adapter = netdev_priv(netdev);
 156         if (xid >= netdev->fcoe_ddp_xid) {
 157                 e_warn(drv, "xid=0x%x out-of-range\n", xid);
 158                 return 0;
 159         }
 160 
 161         /* no DDP if we are already down or resetting */
 162         if (test_bit(__IXGBE_DOWN, &adapter->state) ||
 163             test_bit(__IXGBE_RESETTING, &adapter->state))
 164                 return 0;
 165 
 166         fcoe = &adapter->fcoe;
 167         ddp = &fcoe->ddp[xid];
 168         if (ddp->sgl) {
 169                 e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
 170                       xid, ddp->sgl, ddp->sgc);
 171                 return 0;
 172         }
 173         ixgbe_fcoe_clear_ddp(ddp);
 174 
 175 
 176         if (!fcoe->ddp_pool) {
 177                 e_warn(drv, "No ddp_pool resources allocated\n");
 178                 return 0;
 179         }
 180 
 181         ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu());
 182         if (!ddp_pool->pool) {
 183                 e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
 184                 goto out_noddp;
 185         }
 186 
 187         /* setup dma from scsi command sgl */
 188         dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
 189         if (dmacount == 0) {
 190                 e_err(drv, "xid 0x%x DMA map error\n", xid);
 191                 goto out_noddp;
 192         }
 193 
 194         /* alloc the udl from per cpu ddp pool */
 195         ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_KERNEL, &ddp->udp);
 196         if (!ddp->udl) {
 197                 e_err(drv, "failed allocated ddp context\n");
 198                 goto out_noddp_unmap;
 199         }
 200         ddp->pool = ddp_pool->pool;
 201         ddp->sgl = sgl;
 202         ddp->sgc = sgc;
 203 
 204         j = 0;
 205         for_each_sg(sgl, sg, dmacount, i) {
 206                 addr = sg_dma_address(sg);
 207                 len = sg_dma_len(sg);
 208                 while (len) {
 209                         /* max number of buffers allowed in one DDP context */
 210                         if (j >= IXGBE_BUFFCNT_MAX) {
 211                                 ddp_pool->noddp++;
 212                                 goto out_noddp_free;
 213                         }
 214 
 215                         /* get the offset of length of current buffer */
 216                         thisoff = addr & ((dma_addr_t)bufflen - 1);
 217                         thislen = min((bufflen - thisoff), len);
 218                         /*
 219                          * all but the 1st buffer (j == 0)
 220                          * must be aligned on bufflen
 221                          */
 222                         if ((j != 0) && (thisoff))
 223                                 goto out_noddp_free;
 224                         /*
 225                          * all but the last buffer
 226                          * ((i == (dmacount - 1)) && (thislen == len))
 227                          * must end at bufflen
 228                          */
 229                         if (((i != (dmacount - 1)) || (thislen != len))
 230                             && ((thislen + thisoff) != bufflen))
 231                                 goto out_noddp_free;
 232 
 233                         ddp->udl[j] = (u64)(addr - thisoff);
 234                         /* only the first buffer may have none-zero offset */
 235                         if (j == 0)
 236                                 firstoff = thisoff;
 237                         len -= thislen;
 238                         addr += thislen;
 239                         j++;
 240                 }
 241         }
 242         /* only the last buffer may have non-full bufflen */
 243         lastsize = thisoff + thislen;
 244 
 245         /*
 246          * lastsize can not be buffer len.
 247          * If it is then adding another buffer with lastsize = 1.
 248          */
 249         if (lastsize == bufflen) {
 250                 if (j >= IXGBE_BUFFCNT_MAX) {
 251                         ddp_pool->noddp_ext_buff++;
 252                         goto out_noddp_free;
 253                 }
 254 
 255                 ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma);
 256                 j++;
 257                 lastsize = 1;
 258         }
 259         put_cpu();
 260 
 261         fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
 262         fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
 263         fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
 264         /* Set WRCONTX bit to allow DDP for target */
 265         if (target_mode)
 266                 fcbuff |= (IXGBE_FCBUFF_WRCONTX);
 267         fcbuff |= (IXGBE_FCBUFF_VALID);
 268 
 269         fcdmarw = xid;
 270         fcdmarw |= IXGBE_FCDMARW_WE;
 271         fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT);
 272 
 273         fcfltrw = xid;
 274         fcfltrw |= IXGBE_FCFLTRW_WE;
 275 
 276         /* program DMA context */
 277         hw = &adapter->hw;
 278 
 279         /* turn on last frame indication for target mode as FCP_RSPtarget is
 280          * supposed to send FCP_RSP when it is done. */
 281         if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) {
 282                 set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode);
 283                 fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL);
 284                 fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH;
 285                 IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl);
 286         }
 287 
 288         if (hw->mac.type == ixgbe_mac_X550) {
 289                 /* X550 does not require DDP lock */
 290 
 291                 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(0, xid),
 292                                 ddp->udp & DMA_BIT_MASK(32));
 293                 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(1, xid), (u64)ddp->udp >> 32);
 294                 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), fcbuff);
 295                 IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), fcdmarw);
 296                 /* program filter context */
 297                 IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), IXGBE_FCFLT_VALID);
 298                 IXGBE_WRITE_REG(hw, IXGBE_FCDFC(1, xid), 0);
 299                 IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), fcfltrw);
 300         } else {
 301                 /* DDP lock for indirect DDP context access */
 302                 spin_lock_bh(&fcoe->lock);
 303 
 304                 IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32));
 305                 IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32);
 306                 IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff);
 307                 IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw);
 308                 /* program filter context */
 309                 IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0);
 310                 IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID);
 311                 IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw);
 312 
 313                 spin_unlock_bh(&fcoe->lock);
 314         }
 315 
 316         return 1;
 317 
 318 out_noddp_free:
 319         dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
 320         ixgbe_fcoe_clear_ddp(ddp);
 321 
 322 out_noddp_unmap:
 323         dma_unmap_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
 324 out_noddp:
 325         put_cpu();
 326         return 0;
 327 }
 328 
 329 /**
 330  * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode
 331  * @netdev: the corresponding net_device
 332  * @xid: the exchange id requesting ddp
 333  * @sgl: the scatter-gather list for this request
 334  * @sgc: the number of scatter-gather items
 335  *
 336  * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
 337  * and is expected to be called from ULD, e.g., FCP layer of libfc
 338  * to set up ddp for the corresponding xid of the given sglist for
 339  * the corresponding I/O.
 340  *
 341  * Returns : 1 for success and 0 for no ddp
 342  */
 343 int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
 344                        struct scatterlist *sgl, unsigned int sgc)
 345 {
 346         return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0);
 347 }
 348 
 349 /**
 350  * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode
 351  * @netdev: the corresponding net_device
 352  * @xid: the exchange id requesting ddp
 353  * @sgl: the scatter-gather list for this request
 354  * @sgc: the number of scatter-gather items
 355  *
 356  * This is the implementation of net_device_ops.ndo_fcoe_ddp_target
 357  * and is expected to be called from ULD, e.g., FCP layer of libfc
 358  * to set up ddp for the corresponding xid of the given sglist for
 359  * the corresponding I/O. The DDP in target mode is a write I/O request
 360  * from the initiator.
 361  *
 362  * Returns : 1 for success and 0 for no ddp
 363  */
 364 int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
 365                             struct scatterlist *sgl, unsigned int sgc)
 366 {
 367         return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1);
 368 }
 369 
 370 /**
 371  * ixgbe_fcoe_ddp - check ddp status and mark it done
 372  * @adapter: ixgbe adapter
 373  * @rx_desc: advanced rx descriptor
 374  * @skb: the skb holding the received data
 375  *
 376  * This checks ddp status.
 377  *
 378  * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates
 379  * not passing the skb to ULD, > 0 indicates is the length of data
 380  * being ddped.
 381  */
 382 int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
 383                    union ixgbe_adv_rx_desc *rx_desc,
 384                    struct sk_buff *skb)
 385 {
 386         int rc = -EINVAL;
 387         struct ixgbe_fcoe *fcoe;
 388         struct ixgbe_fcoe_ddp *ddp;
 389         struct fc_frame_header *fh;
 390         struct fcoe_crc_eof *crc;
 391         __le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR);
 392         __le32 ddp_err;
 393         int ddp_max;
 394         u32 fctl;
 395         u16 xid;
 396 
 397         if (fcerr == cpu_to_le32(IXGBE_FCERR_BADCRC))
 398                 skb->ip_summed = CHECKSUM_NONE;
 399         else
 400                 skb->ip_summed = CHECKSUM_UNNECESSARY;
 401 
 402         if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
 403                 fh = (struct fc_frame_header *)(skb->data +
 404                         sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr));
 405         else
 406                 fh = (struct fc_frame_header *)(skb->data +
 407                         sizeof(struct fcoe_hdr));
 408 
 409         fctl = ntoh24(fh->fh_f_ctl);
 410         if (fctl & FC_FC_EX_CTX)
 411                 xid =  be16_to_cpu(fh->fh_ox_id);
 412         else
 413                 xid =  be16_to_cpu(fh->fh_rx_id);
 414 
 415         ddp_max = IXGBE_FCOE_DDP_MAX;
 416         /* X550 has different DDP Max limit */
 417         if (adapter->hw.mac.type == ixgbe_mac_X550)
 418                 ddp_max = IXGBE_FCOE_DDP_MAX_X550;
 419         if (xid >= ddp_max)
 420                 return -EINVAL;
 421 
 422         fcoe = &adapter->fcoe;
 423         ddp = &fcoe->ddp[xid];
 424         if (!ddp->udl)
 425                 return -EINVAL;
 426 
 427         ddp_err = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCEOFE |
 428                                               IXGBE_RXDADV_ERR_FCERR);
 429         if (ddp_err)
 430                 return -EINVAL;
 431 
 432         switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) {
 433         /* return 0 to bypass going to ULD for DDPed data */
 434         case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP):
 435                 /* update length of DDPed data */
 436                 ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
 437                 rc = 0;
 438                 break;
 439         /* unmap the sg list when FCPRSP is received */
 440         case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
 441                 dma_unmap_sg(&adapter->pdev->dev, ddp->sgl,
 442                              ddp->sgc, DMA_FROM_DEVICE);
 443                 ddp->err = (__force u32)ddp_err;
 444                 ddp->sgl = NULL;
 445                 ddp->sgc = 0;
 446                 /* fall through */
 447         /* if DDP length is present pass it through to ULD */
 448         case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP):
 449                 /* update length of DDPed data */
 450                 ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
 451                 if (ddp->len)
 452                         rc = ddp->len;
 453                 break;
 454         /* no match will return as an error */
 455         case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH):
 456         default:
 457                 break;
 458         }
 459 
 460         /* In target mode, check the last data frame of the sequence.
 461          * For DDP in target mode, data is already DDPed but the header
 462          * indication of the last data frame ould allow is to tell if we
 463          * got all the data and the ULP can send FCP_RSP back, as this is
 464          * not a full fcoe frame, we fill the trailer here so it won't be
 465          * dropped by the ULP stack.
 466          */
 467         if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) &&
 468             (fctl & FC_FC_END_SEQ)) {
 469                 skb_linearize(skb);
 470                 crc = skb_put(skb, sizeof(*crc));
 471                 crc->fcoe_eof = FC_EOF_T;
 472         }
 473 
 474         return rc;
 475 }
 476 
 477 /**
 478  * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO)
 479  * @tx_ring: tx desc ring
 480  * @first: first tx_buffer structure containing skb, tx_flags, and protocol
 481  * @hdr_len: hdr_len to be returned
 482  *
 483  * This sets up large send offload for FCoE
 484  *
 485  * Returns : 0 indicates success, < 0 for error
 486  */
 487 int ixgbe_fso(struct ixgbe_ring *tx_ring,
 488               struct ixgbe_tx_buffer *first,
 489               u8 *hdr_len)
 490 {
 491         struct sk_buff *skb = first->skb;
 492         struct fc_frame_header *fh;
 493         u32 vlan_macip_lens;
 494         u32 fcoe_sof_eof = 0;
 495         u32 mss_l4len_idx;
 496         u32 type_tucmd = IXGBE_ADVTXT_TUCMD_FCOE;
 497         u8 sof, eof;
 498 
 499         if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) {
 500                 dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
 501                         skb_shinfo(skb)->gso_type);
 502                 return -EINVAL;
 503         }
 504 
 505         /* resets the header to point fcoe/fc */
 506         skb_set_network_header(skb, skb->mac_len);
 507         skb_set_transport_header(skb, skb->mac_len +
 508                                  sizeof(struct fcoe_hdr));
 509 
 510         /* sets up SOF and ORIS */
 511         sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof;
 512         switch (sof) {
 513         case FC_SOF_I2:
 514                 fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS;
 515                 break;
 516         case FC_SOF_I3:
 517                 fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF |
 518                                IXGBE_ADVTXD_FCOEF_ORIS;
 519                 break;
 520         case FC_SOF_N2:
 521                 break;
 522         case FC_SOF_N3:
 523                 fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF;
 524                 break;
 525         default:
 526                 dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof);
 527                 return -EINVAL;
 528         }
 529 
 530         /* the first byte of the last dword is EOF */
 531         skb_copy_bits(skb, skb->len - 4, &eof, 1);
 532         /* sets up EOF and ORIE */
 533         switch (eof) {
 534         case FC_EOF_N:
 535                 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N;
 536                 break;
 537         case FC_EOF_T:
 538                 /* lso needs ORIE */
 539                 if (skb_is_gso(skb))
 540                         fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N |
 541                                         IXGBE_ADVTXD_FCOEF_ORIE;
 542                 else
 543                         fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T;
 544                 break;
 545         case FC_EOF_NI:
 546                 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI;
 547                 break;
 548         case FC_EOF_A:
 549                 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A;
 550                 break;
 551         default:
 552                 dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof);
 553                 return -EINVAL;
 554         }
 555 
 556         /* sets up PARINC indicating data offset */
 557         fh = (struct fc_frame_header *)skb_transport_header(skb);
 558         if (fh->fh_f_ctl[2] & FC_FC_REL_OFF)
 559                 fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC;
 560 
 561         /* include trailer in headlen as it is replicated per frame */
 562         *hdr_len = sizeof(struct fcoe_crc_eof);
 563 
 564         /* hdr_len includes fc_hdr if FCoE LSO is enabled */
 565         if (skb_is_gso(skb)) {
 566                 *hdr_len += skb_transport_offset(skb) +
 567                             sizeof(struct fc_frame_header);
 568                 /* update gso_segs and bytecount */
 569                 first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len,
 570                                                skb_shinfo(skb)->gso_size);
 571                 first->bytecount += (first->gso_segs - 1) * *hdr_len;
 572                 first->tx_flags |= IXGBE_TX_FLAGS_TSO;
 573                 /* Hardware expects L4T to be RSV for FCoE TSO */
 574                 type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_RSV;
 575         }
 576 
 577         /* set flag indicating FCOE to ixgbe_tx_map call */
 578         first->tx_flags |= IXGBE_TX_FLAGS_FCOE | IXGBE_TX_FLAGS_CC;
 579 
 580         /* mss_l4len_id: use 0 for FSO as TSO, no need for L4LEN */
 581         mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
 582 
 583         /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
 584         vlan_macip_lens = skb_transport_offset(skb) +
 585                           sizeof(struct fc_frame_header);
 586         vlan_macip_lens |= (skb_transport_offset(skb) - 4)
 587                            << IXGBE_ADVTXD_MACLEN_SHIFT;
 588         vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
 589 
 590         /* write context desc */
 591         ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof,
 592                           type_tucmd, mss_l4len_idx);
 593 
 594         return 0;
 595 }
 596 
 597 static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu)
 598 {
 599         struct ixgbe_fcoe_ddp_pool *ddp_pool;
 600 
 601         ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
 602         dma_pool_destroy(ddp_pool->pool);
 603         ddp_pool->pool = NULL;
 604 }
 605 
 606 static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe,
 607                                      struct device *dev,
 608                                      unsigned int cpu)
 609 {
 610         struct ixgbe_fcoe_ddp_pool *ddp_pool;
 611         struct dma_pool *pool;
 612         char pool_name[32];
 613 
 614         snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%u", cpu);
 615 
 616         pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX,
 617                                IXGBE_FCPTR_ALIGN, PAGE_SIZE);
 618         if (!pool)
 619                 return -ENOMEM;
 620 
 621         ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
 622         ddp_pool->pool = pool;
 623         ddp_pool->noddp = 0;
 624         ddp_pool->noddp_ext_buff = 0;
 625 
 626         return 0;
 627 }
 628 
 629 /**
 630  * ixgbe_configure_fcoe - configures registers for fcoe at start
 631  * @adapter: ptr to ixgbe adapter
 632  *
 633  * This sets up FCoE related registers
 634  *
 635  * Returns : none
 636  */
 637 void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
 638 {
 639         struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
 640         struct ixgbe_hw *hw = &adapter->hw;
 641         int i, fcoe_q, fcoe_i, fcoe_q_h = 0;
 642         int fcreta_size;
 643         u32 etqf;
 644 
 645         /* Minimal functionality for FCoE requires at least CRC offloads */
 646         if (!(adapter->netdev->features & NETIF_F_FCOE_CRC))
 647                 return;
 648 
 649         /* Enable L2 EtherType filter for FCoE, needed for FCoE CRC and DDP */
 650         etqf = ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN;
 651         if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
 652                 etqf |= IXGBE_ETQF_POOL_ENABLE;
 653                 etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT;
 654         }
 655         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), etqf);
 656         IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
 657 
 658         /* leave registers un-configured if FCoE is disabled */
 659         if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
 660                 return;
 661 
 662         /* Use one or more Rx queues for FCoE by redirection table */
 663         fcreta_size = IXGBE_FCRETA_SIZE;
 664         if (adapter->hw.mac.type == ixgbe_mac_X550)
 665                 fcreta_size = IXGBE_FCRETA_SIZE_X550;
 666 
 667         for (i = 0; i < fcreta_size; i++) {
 668                 if (adapter->hw.mac.type == ixgbe_mac_X550) {
 669                         int fcoe_i_h = fcoe->offset + ((i + fcreta_size) %
 670                                                         fcoe->indices);
 671                         fcoe_q_h = adapter->rx_ring[fcoe_i_h]->reg_idx;
 672                         fcoe_q_h = (fcoe_q_h << IXGBE_FCRETA_ENTRY_HIGH_SHIFT) &
 673                                    IXGBE_FCRETA_ENTRY_HIGH_MASK;
 674                 }
 675 
 676                 fcoe_i = fcoe->offset + (i % fcoe->indices);
 677                 fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
 678                 fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
 679                 fcoe_q |= fcoe_q_h;
 680                 IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
 681         }
 682         IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
 683 
 684         /* Enable L2 EtherType filter for FIP */
 685         etqf = ETH_P_FIP | IXGBE_ETQF_FILTER_EN;
 686         if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
 687                 etqf |= IXGBE_ETQF_POOL_ENABLE;
 688                 etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT;
 689         }
 690         IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), etqf);
 691 
 692         /* Send FIP frames to the first FCoE queue */
 693         fcoe_q = adapter->rx_ring[fcoe->offset]->reg_idx;
 694         IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
 695                         IXGBE_ETQS_QUEUE_EN |
 696                         (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
 697 
 698         /* Configure FCoE Rx control */
 699         IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL,
 700                         IXGBE_FCRXCTRL_FCCRCBO |
 701                         (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
 702 }
 703 
 704 /**
 705  * ixgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources
 706  * @adapter : ixgbe adapter
 707  *
 708  * Cleans up outstanding ddp context resources
 709  *
 710  * Returns : none
 711  */
 712 void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
 713 {
 714         struct ixgbe_fcoe *fcoe = &adapter->fcoe;
 715         int cpu, i, ddp_max;
 716 
 717         /* do nothing if no DDP pools were allocated */
 718         if (!fcoe->ddp_pool)
 719                 return;
 720 
 721         ddp_max = IXGBE_FCOE_DDP_MAX;
 722         /* X550 has different DDP Max limit */
 723         if (adapter->hw.mac.type == ixgbe_mac_X550)
 724                 ddp_max = IXGBE_FCOE_DDP_MAX_X550;
 725 
 726         for (i = 0; i < ddp_max; i++)
 727                 ixgbe_fcoe_ddp_put(adapter->netdev, i);
 728 
 729         for_each_possible_cpu(cpu)
 730                 ixgbe_fcoe_dma_pool_free(fcoe, cpu);
 731 
 732         dma_unmap_single(&adapter->pdev->dev,
 733                          fcoe->extra_ddp_buffer_dma,
 734                          IXGBE_FCBUFF_MIN,
 735                          DMA_FROM_DEVICE);
 736         kfree(fcoe->extra_ddp_buffer);
 737 
 738         fcoe->extra_ddp_buffer = NULL;
 739         fcoe->extra_ddp_buffer_dma = 0;
 740 }
 741 
 742 /**
 743  * ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources
 744  * @adapter: ixgbe adapter
 745  *
 746  * Sets up ddp context resouces
 747  *
 748  * Returns : 0 indicates success or -EINVAL on failure
 749  */
 750 int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
 751 {
 752         struct ixgbe_fcoe *fcoe = &adapter->fcoe;
 753         struct device *dev = &adapter->pdev->dev;
 754         void *buffer;
 755         dma_addr_t dma;
 756         unsigned int cpu;
 757 
 758         /* do nothing if no DDP pools were allocated */
 759         if (!fcoe->ddp_pool)
 760                 return 0;
 761 
 762         /* Extra buffer to be shared by all DDPs for HW work around */
 763         buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_KERNEL);
 764         if (!buffer)
 765                 return -ENOMEM;
 766 
 767         dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE);
 768         if (dma_mapping_error(dev, dma)) {
 769                 e_err(drv, "failed to map extra DDP buffer\n");
 770                 kfree(buffer);
 771                 return -ENOMEM;
 772         }
 773 
 774         fcoe->extra_ddp_buffer = buffer;
 775         fcoe->extra_ddp_buffer_dma = dma;
 776 
 777         /* allocate pci pool for each cpu */
 778         for_each_possible_cpu(cpu) {
 779                 int err = ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu);
 780                 if (!err)
 781                         continue;
 782 
 783                 e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
 784                 ixgbe_free_fcoe_ddp_resources(adapter);
 785                 return -ENOMEM;
 786         }
 787 
 788         return 0;
 789 }
 790 
 791 static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter)
 792 {
 793         struct ixgbe_fcoe *fcoe = &adapter->fcoe;
 794 
 795         if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
 796                 return -EINVAL;
 797 
 798         fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool);
 799 
 800         if (!fcoe->ddp_pool) {
 801                 e_err(drv, "failed to allocate percpu DDP resources\n");
 802                 return -ENOMEM;
 803         }
 804 
 805         adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
 806         /* X550 has different DDP Max limit */
 807         if (adapter->hw.mac.type == ixgbe_mac_X550)
 808                 adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX_X550 - 1;
 809 
 810         return 0;
 811 }
 812 
 813 static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter)
 814 {
 815         struct ixgbe_fcoe *fcoe = &adapter->fcoe;
 816 
 817         adapter->netdev->fcoe_ddp_xid = 0;
 818 
 819         if (!fcoe->ddp_pool)
 820                 return;
 821 
 822         free_percpu(fcoe->ddp_pool);
 823         fcoe->ddp_pool = NULL;
 824 }
 825 
 826 /**
 827  * ixgbe_fcoe_enable - turn on FCoE offload feature
 828  * @netdev: the corresponding netdev
 829  *
 830  * Turns on FCoE offload feature in 82599.
 831  *
 832  * Returns : 0 indicates success or -EINVAL on failure
 833  */
 834 int ixgbe_fcoe_enable(struct net_device *netdev)
 835 {
 836         struct ixgbe_adapter *adapter = netdev_priv(netdev);
 837         struct ixgbe_fcoe *fcoe = &adapter->fcoe;
 838 
 839         atomic_inc(&fcoe->refcnt);
 840 
 841         if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
 842                 return -EINVAL;
 843 
 844         if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
 845                 return -EINVAL;
 846 
 847         e_info(drv, "Enabling FCoE offload features.\n");
 848 
 849         if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
 850                 e_warn(probe, "Enabling FCoE on PF will disable legacy VFs\n");
 851 
 852         if (netif_running(netdev))
 853                 netdev->netdev_ops->ndo_stop(netdev);
 854 
 855         /* Allocate per CPU memory to track DDP pools */
 856         ixgbe_fcoe_ddp_enable(adapter);
 857 
 858         /* enable FCoE and notify stack */
 859         adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
 860         netdev->features |= NETIF_F_FCOE_MTU;
 861         netdev_features_change(netdev);
 862 
 863         /* release existing queues and reallocate them */
 864         ixgbe_clear_interrupt_scheme(adapter);
 865         ixgbe_init_interrupt_scheme(adapter);
 866 
 867         if (netif_running(netdev))
 868                 netdev->netdev_ops->ndo_open(netdev);
 869 
 870         return 0;
 871 }
 872 
 873 /**
 874  * ixgbe_fcoe_disable - turn off FCoE offload feature
 875  * @netdev: the corresponding netdev
 876  *
 877  * Turns off FCoE offload feature in 82599.
 878  *
 879  * Returns : 0 indicates success or -EINVAL on failure
 880  */
 881 int ixgbe_fcoe_disable(struct net_device *netdev)
 882 {
 883         struct ixgbe_adapter *adapter = netdev_priv(netdev);
 884 
 885         if (!atomic_dec_and_test(&adapter->fcoe.refcnt))
 886                 return -EINVAL;
 887 
 888         if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
 889                 return -EINVAL;
 890 
 891         e_info(drv, "Disabling FCoE offload features.\n");
 892         if (netif_running(netdev))
 893                 netdev->netdev_ops->ndo_stop(netdev);
 894 
 895         /* Free per CPU memory to track DDP pools */
 896         ixgbe_fcoe_ddp_disable(adapter);
 897 
 898         /* disable FCoE and notify stack */
 899         adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
 900         netdev->features &= ~NETIF_F_FCOE_MTU;
 901 
 902         netdev_features_change(netdev);
 903 
 904         /* release existing queues and reallocate them */
 905         ixgbe_clear_interrupt_scheme(adapter);
 906         ixgbe_init_interrupt_scheme(adapter);
 907 
 908         if (netif_running(netdev))
 909                 netdev->netdev_ops->ndo_open(netdev);
 910 
 911         return 0;
 912 }
 913 
 914 /**
 915  * ixgbe_fcoe_get_wwn - get world wide name for the node or the port
 916  * @netdev : ixgbe adapter
 917  * @wwn : the world wide name
 918  * @type: the type of world wide name
 919  *
 920  * Returns the node or port world wide name if both the prefix and the san
 921  * mac address are valid, then the wwn is formed based on the NAA-2 for
 922  * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3).
 923  *
 924  * Returns : 0 on success
 925  */
 926 int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
 927 {
 928         u16 prefix = 0xffff;
 929         struct ixgbe_adapter *adapter = netdev_priv(netdev);
 930         struct ixgbe_mac_info *mac = &adapter->hw.mac;
 931 
 932         switch (type) {
 933         case NETDEV_FCOE_WWNN:
 934                 prefix = mac->wwnn_prefix;
 935                 break;
 936         case NETDEV_FCOE_WWPN:
 937                 prefix = mac->wwpn_prefix;
 938                 break;
 939         default:
 940                 break;
 941         }
 942 
 943         if ((prefix != 0xffff) &&
 944             is_valid_ether_addr(mac->san_addr)) {
 945                 *wwn = ((u64) prefix << 48) |
 946                        ((u64) mac->san_addr[0] << 40) |
 947                        ((u64) mac->san_addr[1] << 32) |
 948                        ((u64) mac->san_addr[2] << 24) |
 949                        ((u64) mac->san_addr[3] << 16) |
 950                        ((u64) mac->san_addr[4] << 8)  |
 951                        ((u64) mac->san_addr[5]);
 952                 return 0;
 953         }
 954         return -EINVAL;
 955 }
 956 
 957 /**
 958  * ixgbe_fcoe_get_hbainfo - get FCoE HBA information
 959  * @netdev : ixgbe adapter
 960  * @info : HBA information
 961  *
 962  * Returns ixgbe HBA information
 963  *
 964  * Returns : 0 on success
 965  */
 966 int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
 967                            struct netdev_fcoe_hbainfo *info)
 968 {
 969         struct ixgbe_adapter *adapter = netdev_priv(netdev);
 970         struct ixgbe_hw *hw = &adapter->hw;
 971         int i, pos;
 972         u8 buf[8];
 973 
 974         if (!info)
 975                 return -EINVAL;
 976 
 977         /* Don't return information on unsupported devices */
 978         if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
 979                 return -EINVAL;
 980 
 981         /* Manufacturer */
 982         snprintf(info->manufacturer, sizeof(info->manufacturer),
 983                  "Intel Corporation");
 984 
 985         /* Serial Number */
 986 
 987         /* Get the PCI-e Device Serial Number Capability */
 988         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_DSN);
 989         if (pos) {
 990                 pos += 4;
 991                 for (i = 0; i < 8; i++)
 992                         pci_read_config_byte(adapter->pdev, pos + i, &buf[i]);
 993 
 994                 snprintf(info->serial_number, sizeof(info->serial_number),
 995                          "%02X%02X%02X%02X%02X%02X%02X%02X",
 996                          buf[7], buf[6], buf[5], buf[4],
 997                          buf[3], buf[2], buf[1], buf[0]);
 998         } else
 999                 snprintf(info->serial_number, sizeof(info->serial_number),
1000                          "Unknown");
1001 
1002         /* Hardware Version */
1003         snprintf(info->hardware_version,
1004                  sizeof(info->hardware_version),
1005                  "Rev %d", hw->revision_id);
1006         /* Driver Name/Version */
1007         snprintf(info->driver_version,
1008                  sizeof(info->driver_version),
1009                  "%s v%s",
1010                  ixgbe_driver_name,
1011                  ixgbe_driver_version);
1012         /* Firmware Version */
1013         strlcpy(info->firmware_version, adapter->eeprom_id,
1014                 sizeof(info->firmware_version));
1015 
1016         /* Model */
1017         if (hw->mac.type == ixgbe_mac_82599EB) {
1018                 snprintf(info->model,
1019                          sizeof(info->model),
1020                          "Intel 82599");
1021         } else if (hw->mac.type == ixgbe_mac_X550) {
1022                 snprintf(info->model,
1023                          sizeof(info->model),
1024                          "Intel X550");
1025         } else {
1026                 snprintf(info->model,
1027                          sizeof(info->model),
1028                          "Intel X540");
1029         }
1030 
1031         /* Model Description */
1032         snprintf(info->model_description,
1033                  sizeof(info->model_description),
1034                  "%s",
1035                  ixgbe_default_device_descr);
1036 
1037         return 0;
1038 }
1039 
1040 /**
1041  * ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to
1042  * @adapter: pointer to the device adapter structure
1043  *
1044  * Return : TC that FCoE is mapped to
1045  */
1046 u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter)
1047 {
1048 #ifdef CONFIG_IXGBE_DCB
1049         return netdev_get_prio_tc_map(adapter->netdev, adapter->fcoe.up);
1050 #else
1051         return 0;
1052 #endif
1053 }

/* [<][>][^][v][top][bottom][index][help] */