Lines Matching refs:rx_ring
1031 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) in ql_get_curr_lbuf() argument
1033 struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx]; in ql_get_curr_lbuf()
1034 rx_ring->lbq_curr_idx++; in ql_get_curr_lbuf()
1035 if (rx_ring->lbq_curr_idx == rx_ring->lbq_len) in ql_get_curr_lbuf()
1036 rx_ring->lbq_curr_idx = 0; in ql_get_curr_lbuf()
1037 rx_ring->lbq_free_cnt++; in ql_get_curr_lbuf()
1042 struct rx_ring *rx_ring) in ql_get_curr_lchunk() argument
1044 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring); in ql_get_curr_lchunk()
1048 rx_ring->lbq_buf_size, in ql_get_curr_lchunk()
1054 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size) in ql_get_curr_lchunk()
1064 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring) in ql_get_curr_sbuf() argument
1066 struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx]; in ql_get_curr_sbuf()
1067 rx_ring->sbq_curr_idx++; in ql_get_curr_sbuf()
1068 if (rx_ring->sbq_curr_idx == rx_ring->sbq_len) in ql_get_curr_sbuf()
1069 rx_ring->sbq_curr_idx = 0; in ql_get_curr_sbuf()
1070 rx_ring->sbq_free_cnt++; in ql_get_curr_sbuf()
1075 static void ql_update_cq(struct rx_ring *rx_ring) in ql_update_cq() argument
1077 rx_ring->cnsmr_idx++; in ql_update_cq()
1078 rx_ring->curr_entry++; in ql_update_cq()
1079 if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) { in ql_update_cq()
1080 rx_ring->cnsmr_idx = 0; in ql_update_cq()
1081 rx_ring->curr_entry = rx_ring->cq_base; in ql_update_cq()
1085 static void ql_write_cq_idx(struct rx_ring *rx_ring) in ql_write_cq_idx() argument
1087 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg); in ql_write_cq_idx()
1090 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring, in ql_get_next_chunk() argument
1093 if (!rx_ring->pg_chunk.page) { in ql_get_next_chunk()
1095 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP | in ql_get_next_chunk()
1098 if (unlikely(!rx_ring->pg_chunk.page)) { in ql_get_next_chunk()
1103 rx_ring->pg_chunk.offset = 0; in ql_get_next_chunk()
1104 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page, in ql_get_next_chunk()
1108 __free_pages(rx_ring->pg_chunk.page, in ql_get_next_chunk()
1110 rx_ring->pg_chunk.page = NULL; in ql_get_next_chunk()
1115 rx_ring->pg_chunk.map = map; in ql_get_next_chunk()
1116 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page); in ql_get_next_chunk()
1122 lbq_desc->p.pg_chunk = rx_ring->pg_chunk; in ql_get_next_chunk()
1127 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size; in ql_get_next_chunk()
1128 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) { in ql_get_next_chunk()
1129 rx_ring->pg_chunk.page = NULL; in ql_get_next_chunk()
1132 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size; in ql_get_next_chunk()
1133 get_page(rx_ring->pg_chunk.page); in ql_get_next_chunk()
1139 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) in ql_update_lbq() argument
1141 u32 clean_idx = rx_ring->lbq_clean_idx; in ql_update_lbq()
1147 while (rx_ring->lbq_free_cnt > 32) { in ql_update_lbq()
1148 for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) { in ql_update_lbq()
1152 lbq_desc = &rx_ring->lbq[clean_idx]; in ql_update_lbq()
1153 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) { in ql_update_lbq()
1154 rx_ring->lbq_clean_idx = clean_idx; in ql_update_lbq()
1165 rx_ring->lbq_buf_size); in ql_update_lbq()
1169 rx_ring->lbq_buf_size, in ql_update_lbq()
1172 if (clean_idx == rx_ring->lbq_len) in ql_update_lbq()
1176 rx_ring->lbq_clean_idx = clean_idx; in ql_update_lbq()
1177 rx_ring->lbq_prod_idx += 16; in ql_update_lbq()
1178 if (rx_ring->lbq_prod_idx == rx_ring->lbq_len) in ql_update_lbq()
1179 rx_ring->lbq_prod_idx = 0; in ql_update_lbq()
1180 rx_ring->lbq_free_cnt -= 16; in ql_update_lbq()
1186 rx_ring->lbq_prod_idx); in ql_update_lbq()
1187 ql_write_db_reg(rx_ring->lbq_prod_idx, in ql_update_lbq()
1188 rx_ring->lbq_prod_idx_db_reg); in ql_update_lbq()
1193 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) in ql_update_sbq() argument
1195 u32 clean_idx = rx_ring->sbq_clean_idx; in ql_update_sbq()
1201 while (rx_ring->sbq_free_cnt > 16) { in ql_update_sbq()
1202 for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) { in ql_update_sbq()
1203 sbq_desc = &rx_ring->sbq[clean_idx]; in ql_update_sbq()
1216 rx_ring->sbq_clean_idx = clean_idx; in ql_update_sbq()
1222 rx_ring->sbq_buf_size, in ql_update_sbq()
1227 rx_ring->sbq_clean_idx = clean_idx; in ql_update_sbq()
1234 rx_ring->sbq_buf_size); in ql_update_sbq()
1239 if (clean_idx == rx_ring->sbq_len) in ql_update_sbq()
1242 rx_ring->sbq_clean_idx = clean_idx; in ql_update_sbq()
1243 rx_ring->sbq_prod_idx += 16; in ql_update_sbq()
1244 if (rx_ring->sbq_prod_idx == rx_ring->sbq_len) in ql_update_sbq()
1245 rx_ring->sbq_prod_idx = 0; in ql_update_sbq()
1246 rx_ring->sbq_free_cnt -= 16; in ql_update_sbq()
1252 rx_ring->sbq_prod_idx); in ql_update_sbq()
1253 ql_write_db_reg(rx_ring->sbq_prod_idx, in ql_update_sbq()
1254 rx_ring->sbq_prod_idx_db_reg); in ql_update_sbq()
1259 struct rx_ring *rx_ring) in ql_update_buffer_queues() argument
1261 ql_update_sbq(qdev, rx_ring); in ql_update_buffer_queues()
1262 ql_update_lbq(qdev, rx_ring); in ql_update_buffer_queues()
1438 struct rx_ring *rx_ring) in ql_categorize_rx_err() argument
1443 rx_ring->rx_errors++; in ql_categorize_rx_err()
1493 struct rx_ring *rx_ring, in ql_process_mac_rx_gro_page() argument
1499 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); in ql_process_mac_rx_gro_page()
1500 struct napi_struct *napi = &rx_ring->napi; in ql_process_mac_rx_gro_page()
1504 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); in ql_process_mac_rx_gro_page()
1514 rx_ring->rx_dropped++; in ql_process_mac_rx_gro_page()
1529 rx_ring->rx_packets++; in ql_process_mac_rx_gro_page()
1530 rx_ring->rx_bytes += length; in ql_process_mac_rx_gro_page()
1532 skb_record_rx_queue(skb, rx_ring->cq_id); in ql_process_mac_rx_gro_page()
1540 struct rx_ring *rx_ring, in ql_process_mac_rx_page() argument
1548 struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); in ql_process_mac_rx_page()
1549 struct napi_struct *napi = &rx_ring->napi; in ql_process_mac_rx_page()
1554 rx_ring->rx_dropped++; in ql_process_mac_rx_page()
1564 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); in ql_process_mac_rx_page()
1577 rx_ring->rx_dropped++; in ql_process_mac_rx_page()
1591 rx_ring->rx_packets++; in ql_process_mac_rx_page()
1592 rx_ring->rx_bytes += skb->len; in ql_process_mac_rx_page()
1618 skb_record_rx_queue(skb, rx_ring->cq_id); in ql_process_mac_rx_page()
1633 struct rx_ring *rx_ring, in ql_process_mac_rx_skb() argument
1641 struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring); in ql_process_mac_rx_skb()
1647 rx_ring->rx_dropped++; in ql_process_mac_rx_skb()
1667 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); in ql_process_mac_rx_skb()
1684 rx_ring->rx_dropped++; in ql_process_mac_rx_skb()
1703 rx_ring->rx_packets++; in ql_process_mac_rx_skb()
1704 rx_ring->rx_bytes += skb->len; in ql_process_mac_rx_skb()
1732 skb_record_rx_queue(skb, rx_ring->cq_id); in ql_process_mac_rx_skb()
1736 napi_gro_receive(&rx_ring->napi, skb); in ql_process_mac_rx_skb()
1761 struct rx_ring *rx_ring, in ql_build_rx_skb() argument
1781 sbq_desc = ql_get_curr_sbuf(rx_ring); in ql_build_rx_skb()
1813 sbq_desc = ql_get_curr_sbuf(rx_ring); in ql_build_rx_skb()
1834 sbq_desc = ql_get_curr_sbuf(rx_ring); in ql_build_rx_skb()
1856 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); in ql_build_rx_skb()
1872 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); in ql_build_rx_skb()
1914 sbq_desc = ql_get_curr_sbuf(rx_ring); in ql_build_rx_skb()
1937 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring); in ql_build_rx_skb()
1938 size = (length < rx_ring->lbq_buf_size) ? length : in ql_build_rx_skb()
1939 rx_ring->lbq_buf_size; in ql_build_rx_skb()
1963 struct rx_ring *rx_ring, in ql_process_mac_split_rx_intr() argument
1972 skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp); in ql_process_mac_split_rx_intr()
1976 rx_ring->rx_dropped++; in ql_process_mac_split_rx_intr()
1982 ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring); in ql_process_mac_split_rx_intr()
1992 rx_ring->rx_dropped++; in ql_process_mac_split_rx_intr()
2012 rx_ring->rx_multicast++; in ql_process_mac_split_rx_intr()
2045 rx_ring->rx_packets++; in ql_process_mac_split_rx_intr()
2046 rx_ring->rx_bytes += skb->len; in ql_process_mac_split_rx_intr()
2047 skb_record_rx_queue(skb, rx_ring->cq_id); in ql_process_mac_split_rx_intr()
2051 napi_gro_receive(&rx_ring->napi, skb); in ql_process_mac_split_rx_intr()
2058 struct rx_ring *rx_ring, in ql_process_mac_rx_intr() argument
2073 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, in ql_process_mac_rx_intr()
2080 ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp, in ql_process_mac_rx_intr()
2088 ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp, in ql_process_mac_rx_intr()
2094 ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp, in ql_process_mac_rx_intr()
2100 ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp, in ql_process_mac_rx_intr()
2206 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring) in ql_clean_outbound_rx_ring() argument
2208 struct ql_adapter *qdev = rx_ring->qdev; in ql_clean_outbound_rx_ring()
2209 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); in ql_clean_outbound_rx_ring()
2215 while (prod != rx_ring->cnsmr_idx) { in ql_clean_outbound_rx_ring()
2219 rx_ring->cq_id, prod, rx_ring->cnsmr_idx); in ql_clean_outbound_rx_ring()
2221 net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry; in ql_clean_outbound_rx_ring()
2235 ql_update_cq(rx_ring); in ql_clean_outbound_rx_ring()
2236 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); in ql_clean_outbound_rx_ring()
2240 ql_write_cq_idx(rx_ring); in ql_clean_outbound_rx_ring()
2254 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget) in ql_clean_inbound_rx_ring() argument
2256 struct ql_adapter *qdev = rx_ring->qdev; in ql_clean_inbound_rx_ring()
2257 u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); in ql_clean_inbound_rx_ring()
2262 while (prod != rx_ring->cnsmr_idx) { in ql_clean_inbound_rx_ring()
2266 rx_ring->cq_id, prod, rx_ring->cnsmr_idx); in ql_clean_inbound_rx_ring()
2268 net_rsp = rx_ring->curr_entry; in ql_clean_inbound_rx_ring()
2272 ql_process_mac_rx_intr(qdev, rx_ring, in ql_clean_inbound_rx_ring()
2288 ql_update_cq(rx_ring); in ql_clean_inbound_rx_ring()
2289 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg); in ql_clean_inbound_rx_ring()
2293 ql_update_buffer_queues(qdev, rx_ring); in ql_clean_inbound_rx_ring()
2294 ql_write_cq_idx(rx_ring); in ql_clean_inbound_rx_ring()
2300 struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi); in ql_napi_poll_msix() local
2301 struct ql_adapter *qdev = rx_ring->qdev; in ql_napi_poll_msix()
2302 struct rx_ring *trx_ring; in ql_napi_poll_msix()
2304 struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id]; in ql_napi_poll_msix()
2307 "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id); in ql_napi_poll_msix()
2312 trx_ring = &qdev->rx_ring[i]; in ql_napi_poll_msix()
2329 if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) != in ql_napi_poll_msix()
2330 rx_ring->cnsmr_idx) { in ql_napi_poll_msix()
2333 __func__, rx_ring->cq_id); in ql_napi_poll_msix()
2334 work_done = ql_clean_inbound_rx_ring(rx_ring, budget); in ql_napi_poll_msix()
2339 ql_enable_completion_interrupt(qdev, rx_ring->irq); in ql_napi_poll_msix()
2495 struct rx_ring *rx_ring = dev_id; in qlge_msix_rx_isr() local
2496 napi_schedule(&rx_ring->napi); in qlge_msix_rx_isr()
2507 struct rx_ring *rx_ring = dev_id; in qlge_isr() local
2508 struct ql_adapter *qdev = rx_ring->qdev; in qlge_isr()
2564 napi_schedule(&rx_ring->napi); in qlge_isr()
2830 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) in ql_free_lbq_buffers() argument
2836 curr_idx = rx_ring->lbq_curr_idx; in ql_free_lbq_buffers()
2837 clean_idx = rx_ring->lbq_clean_idx; in ql_free_lbq_buffers()
2839 lbq_desc = &rx_ring->lbq[curr_idx]; in ql_free_lbq_buffers()
2852 if (++curr_idx == rx_ring->lbq_len) in ql_free_lbq_buffers()
2856 if (rx_ring->pg_chunk.page) { in ql_free_lbq_buffers()
2857 pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map, in ql_free_lbq_buffers()
2859 put_page(rx_ring->pg_chunk.page); in ql_free_lbq_buffers()
2860 rx_ring->pg_chunk.page = NULL; in ql_free_lbq_buffers()
2864 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) in ql_free_sbq_buffers() argument
2869 for (i = 0; i < rx_ring->sbq_len; i++) { in ql_free_sbq_buffers()
2870 sbq_desc = &rx_ring->sbq[i]; in ql_free_sbq_buffers()
2893 struct rx_ring *rx_ring; in ql_free_rx_buffers() local
2896 rx_ring = &qdev->rx_ring[i]; in ql_free_rx_buffers()
2897 if (rx_ring->lbq) in ql_free_rx_buffers()
2898 ql_free_lbq_buffers(qdev, rx_ring); in ql_free_rx_buffers()
2899 if (rx_ring->sbq) in ql_free_rx_buffers()
2900 ql_free_sbq_buffers(qdev, rx_ring); in ql_free_rx_buffers()
2906 struct rx_ring *rx_ring; in ql_alloc_rx_buffers() local
2910 rx_ring = &qdev->rx_ring[i]; in ql_alloc_rx_buffers()
2911 if (rx_ring->type != TX_Q) in ql_alloc_rx_buffers()
2912 ql_update_buffer_queues(qdev, rx_ring); in ql_alloc_rx_buffers()
2917 struct rx_ring *rx_ring) in ql_init_lbq_ring() argument
2921 __le64 *bq = rx_ring->lbq_base; in ql_init_lbq_ring()
2923 memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc)); in ql_init_lbq_ring()
2924 for (i = 0; i < rx_ring->lbq_len; i++) { in ql_init_lbq_ring()
2925 lbq_desc = &rx_ring->lbq[i]; in ql_init_lbq_ring()
2934 struct rx_ring *rx_ring) in ql_init_sbq_ring() argument
2938 __le64 *bq = rx_ring->sbq_base; in ql_init_sbq_ring()
2940 memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc)); in ql_init_sbq_ring()
2941 for (i = 0; i < rx_ring->sbq_len; i++) { in ql_init_sbq_ring()
2942 sbq_desc = &rx_ring->sbq[i]; in ql_init_sbq_ring()
2951 struct rx_ring *rx_ring) in ql_free_rx_resources() argument
2954 if (rx_ring->sbq_base) { in ql_free_rx_resources()
2956 rx_ring->sbq_size, in ql_free_rx_resources()
2957 rx_ring->sbq_base, rx_ring->sbq_base_dma); in ql_free_rx_resources()
2958 rx_ring->sbq_base = NULL; in ql_free_rx_resources()
2962 kfree(rx_ring->sbq); in ql_free_rx_resources()
2963 rx_ring->sbq = NULL; in ql_free_rx_resources()
2966 if (rx_ring->lbq_base) { in ql_free_rx_resources()
2968 rx_ring->lbq_size, in ql_free_rx_resources()
2969 rx_ring->lbq_base, rx_ring->lbq_base_dma); in ql_free_rx_resources()
2970 rx_ring->lbq_base = NULL; in ql_free_rx_resources()
2974 kfree(rx_ring->lbq); in ql_free_rx_resources()
2975 rx_ring->lbq = NULL; in ql_free_rx_resources()
2978 if (rx_ring->cq_base) { in ql_free_rx_resources()
2980 rx_ring->cq_size, in ql_free_rx_resources()
2981 rx_ring->cq_base, rx_ring->cq_base_dma); in ql_free_rx_resources()
2982 rx_ring->cq_base = NULL; in ql_free_rx_resources()
2989 struct rx_ring *rx_ring) in ql_alloc_rx_resources() argument
2995 rx_ring->cq_base = in ql_alloc_rx_resources()
2996 pci_alloc_consistent(qdev->pdev, rx_ring->cq_size, in ql_alloc_rx_resources()
2997 &rx_ring->cq_base_dma); in ql_alloc_rx_resources()
2999 if (rx_ring->cq_base == NULL) { in ql_alloc_rx_resources()
3004 if (rx_ring->sbq_len) { in ql_alloc_rx_resources()
3008 rx_ring->sbq_base = in ql_alloc_rx_resources()
3009 pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size, in ql_alloc_rx_resources()
3010 &rx_ring->sbq_base_dma); in ql_alloc_rx_resources()
3012 if (rx_ring->sbq_base == NULL) { in ql_alloc_rx_resources()
3021 rx_ring->sbq = kmalloc_array(rx_ring->sbq_len, in ql_alloc_rx_resources()
3024 if (rx_ring->sbq == NULL) in ql_alloc_rx_resources()
3027 ql_init_sbq_ring(qdev, rx_ring); in ql_alloc_rx_resources()
3030 if (rx_ring->lbq_len) { in ql_alloc_rx_resources()
3034 rx_ring->lbq_base = in ql_alloc_rx_resources()
3035 pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size, in ql_alloc_rx_resources()
3036 &rx_ring->lbq_base_dma); in ql_alloc_rx_resources()
3038 if (rx_ring->lbq_base == NULL) { in ql_alloc_rx_resources()
3046 rx_ring->lbq = kmalloc_array(rx_ring->lbq_len, in ql_alloc_rx_resources()
3049 if (rx_ring->lbq == NULL) in ql_alloc_rx_resources()
3052 ql_init_lbq_ring(qdev, rx_ring); in ql_alloc_rx_resources()
3058 ql_free_rx_resources(qdev, rx_ring); in ql_alloc_rx_resources()
3097 ql_free_rx_resources(qdev, &qdev->rx_ring[i]); in ql_free_mem_resources()
3110 if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) { in ql_alloc_mem_resources()
3135 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring) in ql_start_rx_ring() argument
3137 struct cqicb *cqicb = &rx_ring->cqicb; in ql_start_rx_ring()
3139 (rx_ring->cq_id * RX_RING_SHADOW_SPACE); in ql_start_rx_ring()
3141 (rx_ring->cq_id * RX_RING_SHADOW_SPACE); in ql_start_rx_ring()
3143 qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id)); in ql_start_rx_ring()
3151 rx_ring->prod_idx_sh_reg = shadow_reg; in ql_start_rx_ring()
3152 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma; in ql_start_rx_ring()
3153 *rx_ring->prod_idx_sh_reg = 0; in ql_start_rx_ring()
3156 rx_ring->lbq_base_indirect = shadow_reg; in ql_start_rx_ring()
3157 rx_ring->lbq_base_indirect_dma = shadow_reg_dma; in ql_start_rx_ring()
3158 shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); in ql_start_rx_ring()
3159 shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); in ql_start_rx_ring()
3160 rx_ring->sbq_base_indirect = shadow_reg; in ql_start_rx_ring()
3161 rx_ring->sbq_base_indirect_dma = shadow_reg_dma; in ql_start_rx_ring()
3164 rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area; in ql_start_rx_ring()
3165 rx_ring->cnsmr_idx = 0; in ql_start_rx_ring()
3166 rx_ring->curr_entry = rx_ring->cq_base; in ql_start_rx_ring()
3169 rx_ring->valid_db_reg = doorbell_area + 0x04; in ql_start_rx_ring()
3172 rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18); in ql_start_rx_ring()
3175 rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c); in ql_start_rx_ring()
3178 cqicb->msix_vect = rx_ring->irq; in ql_start_rx_ring()
3180 bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len; in ql_start_rx_ring()
3183 cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma); in ql_start_rx_ring()
3185 cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma); in ql_start_rx_ring()
3193 if (rx_ring->lbq_len) { in ql_start_rx_ring()
3195 tmp = (u64)rx_ring->lbq_base_dma; in ql_start_rx_ring()
3196 base_indirect_ptr = rx_ring->lbq_base_indirect; in ql_start_rx_ring()
3203 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len)); in ql_start_rx_ring()
3205 cpu_to_le64(rx_ring->lbq_base_indirect_dma); in ql_start_rx_ring()
3206 bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 : in ql_start_rx_ring()
3207 (u16) rx_ring->lbq_buf_size; in ql_start_rx_ring()
3209 bq_len = (rx_ring->lbq_len == 65536) ? 0 : in ql_start_rx_ring()
3210 (u16) rx_ring->lbq_len; in ql_start_rx_ring()
3212 rx_ring->lbq_prod_idx = 0; in ql_start_rx_ring()
3213 rx_ring->lbq_curr_idx = 0; in ql_start_rx_ring()
3214 rx_ring->lbq_clean_idx = 0; in ql_start_rx_ring()
3215 rx_ring->lbq_free_cnt = rx_ring->lbq_len; in ql_start_rx_ring()
3217 if (rx_ring->sbq_len) { in ql_start_rx_ring()
3219 tmp = (u64)rx_ring->sbq_base_dma; in ql_start_rx_ring()
3220 base_indirect_ptr = rx_ring->sbq_base_indirect; in ql_start_rx_ring()
3227 } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len)); in ql_start_rx_ring()
3229 cpu_to_le64(rx_ring->sbq_base_indirect_dma); in ql_start_rx_ring()
3231 cpu_to_le16((u16)(rx_ring->sbq_buf_size)); in ql_start_rx_ring()
3232 bq_len = (rx_ring->sbq_len == 65536) ? 0 : in ql_start_rx_ring()
3233 (u16) rx_ring->sbq_len; in ql_start_rx_ring()
3235 rx_ring->sbq_prod_idx = 0; in ql_start_rx_ring()
3236 rx_ring->sbq_curr_idx = 0; in ql_start_rx_ring()
3237 rx_ring->sbq_clean_idx = 0; in ql_start_rx_ring()
3238 rx_ring->sbq_free_cnt = rx_ring->sbq_len; in ql_start_rx_ring()
3240 switch (rx_ring->type) { in ql_start_rx_ring()
3249 netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix, in ql_start_rx_ring()
3256 "Invalid rx_ring->type = %d.\n", rx_ring->type); in ql_start_rx_ring()
3259 CFG_LCQ, rx_ring->cq_id); in ql_start_rx_ring()
3404 qdev->rx_ring[i].irq = vect; in ql_set_tx_vect()
3412 qdev->rx_ring[i].irq = 0; in ql_set_tx_vect()
3430 ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id); in ql_set_irq_mask()
3435 (1 << qdev->rx_ring[qdev->rss_ring_count + in ql_set_irq_mask()
3443 ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id); in ql_set_irq_mask()
3464 qdev->rx_ring[i].irq = i; in ql_resolve_queues_to_irqs()
3550 &qdev->rx_ring[i]); in ql_free_irq()
3552 free_irq(qdev->pdev->irq, &qdev->rx_ring[0]); in ql_free_irq()
3575 &qdev->rx_ring[i]); in ql_request_irq()
3592 &qdev->rx_ring[0]); in ql_request_irq()
3598 intr_context->name, &qdev->rx_ring[0]); in ql_request_irq()
3605 qdev->rx_ring[0].type == DEFAULT_Q ? in ql_request_irq()
3607 qdev->rx_ring[0].type == TX_Q ? "TX_Q" : in ql_request_irq()
3608 qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "", in ql_request_irq()
3822 status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]); in ql_adapter_initialize()
3866 napi_enable(&qdev->rx_ring[i].napi); in ql_adapter_initialize()
4006 napi_disable(&qdev->rx_ring[i].napi); in ql_adapter_down()
4017 netif_napi_del(&qdev->rx_ring[i].napi); in ql_adapter_down()
4109 struct rx_ring *rx_ring; in ql_configure_rings() local
4148 rx_ring = &qdev->rx_ring[i]; in ql_configure_rings()
4149 memset((void *)rx_ring, 0, sizeof(*rx_ring)); in ql_configure_rings()
4150 rx_ring->qdev = qdev; in ql_configure_rings()
4151 rx_ring->cq_id = i; in ql_configure_rings()
4152 rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */ in ql_configure_rings()
4157 rx_ring->cq_len = qdev->rx_ring_size; in ql_configure_rings()
4158 rx_ring->cq_size = in ql_configure_rings()
4159 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); in ql_configure_rings()
4160 rx_ring->lbq_len = NUM_LARGE_BUFFERS; in ql_configure_rings()
4161 rx_ring->lbq_size = in ql_configure_rings()
4162 rx_ring->lbq_len * sizeof(__le64); in ql_configure_rings()
4163 rx_ring->lbq_buf_size = (u16)lbq_buf_len; in ql_configure_rings()
4164 rx_ring->sbq_len = NUM_SMALL_BUFFERS; in ql_configure_rings()
4165 rx_ring->sbq_size = in ql_configure_rings()
4166 rx_ring->sbq_len * sizeof(__le64); in ql_configure_rings()
4167 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE; in ql_configure_rings()
4168 rx_ring->type = RX_Q; in ql_configure_rings()
4174 rx_ring->cq_len = qdev->tx_ring_size; in ql_configure_rings()
4175 rx_ring->cq_size = in ql_configure_rings()
4176 rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb); in ql_configure_rings()
4177 rx_ring->lbq_len = 0; in ql_configure_rings()
4178 rx_ring->lbq_size = 0; in ql_configure_rings()
4179 rx_ring->lbq_buf_size = 0; in ql_configure_rings()
4180 rx_ring->sbq_len = 0; in ql_configure_rings()
4181 rx_ring->sbq_size = 0; in ql_configure_rings()
4182 rx_ring->sbq_buf_size = 0; in ql_configure_rings()
4183 rx_ring->type = TX_Q; in ql_configure_rings()
4219 struct rx_ring *rx_ring; in ql_change_rx_buffers() local
4250 rx_ring = &qdev->rx_ring[i]; in ql_change_rx_buffers()
4252 rx_ring->lbq_buf_size = lbq_buf_len; in ql_change_rx_buffers()
4302 struct rx_ring *rx_ring = &qdev->rx_ring[0]; in qlge_get_stats() local
4309 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) { in qlge_get_stats()
4310 pkts += rx_ring->rx_packets; in qlge_get_stats()
4311 bytes += rx_ring->rx_bytes; in qlge_get_stats()
4312 dropped += rx_ring->rx_dropped; in qlge_get_stats()
4313 errors += rx_ring->rx_errors; in qlge_get_stats()
4314 mcast += rx_ring->rx_multicast; in qlge_get_stats()
4820 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget) in ql_clean_lb_rx_ring() argument
4822 return ql_clean_inbound_rx_ring(rx_ring, budget); in ql_clean_lb_rx_ring()
4853 netif_napi_del(&qdev->rx_ring[i].napi); in ql_eeh_close()