root/drivers/net/ethernet/cisco/enic/vnic_rq.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. vnic_rq_desc_avail
  2. vnic_rq_desc_used
  3. vnic_rq_next_desc
  4. vnic_rq_next_index
  5. vnic_rq_post
  6. vnic_rq_return_descs
  7. vnic_rq_service
  8. vnic_rq_fill

   1 /*
   2  * Copyright 2008-2010 Cisco Systems, Inc.  All rights reserved.
   3  * Copyright 2007 Nuova Systems, Inc.  All rights reserved.
   4  *
   5  * This program is free software; you may redistribute it and/or modify
   6  * it under the terms of the GNU General Public License as published by
   7  * the Free Software Foundation; version 2 of the License.
   8  *
   9  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16  * SOFTWARE.
  17  *
  18  */
  19 
  20 #ifndef _VNIC_RQ_H_
  21 #define _VNIC_RQ_H_
  22 
  23 #include <linux/pci.h>
  24 #include <linux/netdevice.h>
  25 
  26 #include "vnic_dev.h"
  27 #include "vnic_cq.h"
  28 
  29 /* Receive queue control */
  30 struct vnic_rq_ctrl {
  31         u64 ring_base;                  /* 0x00 */
  32         u32 ring_size;                  /* 0x08 */
  33         u32 pad0;
  34         u32 posted_index;               /* 0x10 */
  35         u32 pad1;
  36         u32 cq_index;                   /* 0x18 */
  37         u32 pad2;
  38         u32 enable;                     /* 0x20 */
  39         u32 pad3;
  40         u32 running;                    /* 0x28 */
  41         u32 pad4;
  42         u32 fetch_index;                /* 0x30 */
  43         u32 pad5;
  44         u32 error_interrupt_enable;     /* 0x38 */
  45         u32 pad6;
  46         u32 error_interrupt_offset;     /* 0x40 */
  47         u32 pad7;
  48         u32 error_status;               /* 0x48 */
  49         u32 pad8;
  50         u32 dropped_packet_count;       /* 0x50 */
  51         u32 pad9;
  52         u32 dropped_packet_count_rc;    /* 0x58 */
  53         u32 pad10;
  54 };
  55 
  56 /* Break the vnic_rq_buf allocations into blocks of 32/64 entries */
  57 #define VNIC_RQ_BUF_MIN_BLK_ENTRIES 32
  58 #define VNIC_RQ_BUF_DFLT_BLK_ENTRIES 64
  59 #define VNIC_RQ_BUF_BLK_ENTRIES(entries) \
  60         ((unsigned int)((entries < VNIC_RQ_BUF_DFLT_BLK_ENTRIES) ? \
  61         VNIC_RQ_BUF_MIN_BLK_ENTRIES : VNIC_RQ_BUF_DFLT_BLK_ENTRIES))
  62 #define VNIC_RQ_BUF_BLK_SZ(entries) \
  63         (VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf))
  64 #define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
  65         DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries))
  66 #define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
  67 
  68 struct vnic_rq_buf {
  69         struct vnic_rq_buf *next;
  70         dma_addr_t dma_addr;
  71         void *os_buf;
  72         unsigned int os_buf_index;
  73         unsigned int len;
  74         unsigned int index;
  75         void *desc;
  76         uint64_t wr_id;
  77 };
  78 
  79 enum enic_poll_state {
  80         ENIC_POLL_STATE_IDLE,
  81         ENIC_POLL_STATE_NAPI,
  82         ENIC_POLL_STATE_POLL
  83 };
  84 
  85 struct vnic_rq {
  86         unsigned int index;
  87         struct vnic_dev *vdev;
  88         struct vnic_rq_ctrl __iomem *ctrl;              /* memory-mapped */
  89         struct vnic_dev_ring ring;
  90         struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX];
  91         struct vnic_rq_buf *to_use;
  92         struct vnic_rq_buf *to_clean;
  93         void *os_buf_head;
  94         unsigned int pkts_outstanding;
  95 };
  96 
  97 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
  98 {
  99         /* how many does SW own? */
 100         return rq->ring.desc_avail;
 101 }
 102 
 103 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
 104 {
 105         /* how many does HW own? */
 106         return rq->ring.desc_count - rq->ring.desc_avail - 1;
 107 }
 108 
 109 static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
 110 {
 111         return rq->to_use->desc;
 112 }
 113 
 114 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
 115 {
 116         return rq->to_use->index;
 117 }
 118 
 119 static inline void vnic_rq_post(struct vnic_rq *rq,
 120         void *os_buf, unsigned int os_buf_index,
 121         dma_addr_t dma_addr, unsigned int len,
 122         uint64_t wrid)
 123 {
 124         struct vnic_rq_buf *buf = rq->to_use;
 125 
 126         buf->os_buf = os_buf;
 127         buf->os_buf_index = os_buf_index;
 128         buf->dma_addr = dma_addr;
 129         buf->len = len;
 130         buf->wr_id = wrid;
 131 
 132         buf = buf->next;
 133         rq->to_use = buf;
 134         rq->ring.desc_avail--;
 135 
 136         /* Move the posted_index every nth descriptor
 137          */
 138 
 139 #ifndef VNIC_RQ_RETURN_RATE
 140 #define VNIC_RQ_RETURN_RATE             0xf     /* keep 2^n - 1 */
 141 #endif
 142 
 143         if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
 144                 /* Adding write memory barrier prevents compiler and/or CPU
 145                  * reordering, thus avoiding descriptor posting before
 146                  * descriptor is initialized. Otherwise, hardware can read
 147                  * stale descriptor fields.
 148                  */
 149                 wmb();
 150                 iowrite32(buf->index, &rq->ctrl->posted_index);
 151         }
 152 }
 153 
 154 static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
 155 {
 156         rq->ring.desc_avail += count;
 157 }
 158 
 159 enum desc_return_options {
 160         VNIC_RQ_RETURN_DESC,
 161         VNIC_RQ_DEFER_RETURN_DESC,
 162 };
 163 
 164 static inline void vnic_rq_service(struct vnic_rq *rq,
 165         struct cq_desc *cq_desc, u16 completed_index,
 166         int desc_return, void (*buf_service)(struct vnic_rq *rq,
 167         struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
 168         int skipped, void *opaque), void *opaque)
 169 {
 170         struct vnic_rq_buf *buf;
 171         int skipped;
 172 
 173         buf = rq->to_clean;
 174         while (1) {
 175 
 176                 skipped = (buf->index != completed_index);
 177 
 178                 (*buf_service)(rq, cq_desc, buf, skipped, opaque);
 179 
 180                 if (desc_return == VNIC_RQ_RETURN_DESC)
 181                         rq->ring.desc_avail++;
 182 
 183                 rq->to_clean = buf->next;
 184 
 185                 if (!skipped)
 186                         break;
 187 
 188                 buf = rq->to_clean;
 189         }
 190 }
 191 
 192 static inline int vnic_rq_fill(struct vnic_rq *rq,
 193         int (*buf_fill)(struct vnic_rq *rq))
 194 {
 195         int err;
 196 
 197         while (vnic_rq_desc_avail(rq) > 0) {
 198 
 199                 err = (*buf_fill)(rq);
 200                 if (err)
 201                         return err;
 202         }
 203 
 204         return 0;
 205 }
 206 
 207 void vnic_rq_free(struct vnic_rq *rq);
 208 int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
 209         unsigned int desc_count, unsigned int desc_size);
 210 void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
 211         unsigned int error_interrupt_enable,
 212         unsigned int error_interrupt_offset);
 213 unsigned int vnic_rq_error_status(struct vnic_rq *rq);
 214 void vnic_rq_enable(struct vnic_rq *rq);
 215 int vnic_rq_disable(struct vnic_rq *rq);
 216 void vnic_rq_clean(struct vnic_rq *rq,
 217         void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
 218 
 219 #endif /* _VNIC_RQ_H_ */

/* [<][>][^][v][top][bottom][index][help] */