Lines Matching refs:adap
390 static void free_tx_desc(struct adapter *adap, struct sge_txq *q, in free_tx_desc() argument
395 struct device *dev = adap->pdev_dev; in free_tx_desc()
434 static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, in reclaim_completed_tx() argument
447 free_tx_desc(adap, q, avail, unmap); in reclaim_completed_tx()
492 static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n) in free_rx_bufs() argument
498 dma_unmap_page(adap->pdev_dev, get_buf_addr(d), in free_rx_bufs()
499 get_buf_size(adap, d), in free_rx_bufs()
520 static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q) in unmap_rx_buf() argument
525 dma_unmap_page(adap->pdev_dev, get_buf_addr(d), in unmap_rx_buf()
526 get_buf_size(adap, d), PCI_DMA_FROMDEVICE); in unmap_rx_buf()
533 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) in ring_fl_db() argument
537 if (is_t4(adap->params.chip)) in ring_fl_db()
550 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), in ring_fl_db()
586 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, in refill_fl() argument
589 struct sge *s = &adap->sge; in refill_fl()
598 node = dev_to_node(adap->pdev_dev); in refill_fl()
613 mapping = dma_map_page(adap->pdev_dev, pg, 0, in refill_fl()
616 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { in refill_fl()
643 mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE, in refill_fl()
645 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { in refill_fl()
664 ring_fl_db(adap, q); in refill_fl()
666 if (unlikely(fl_starving(adap, q))) { in refill_fl()
668 set_bit(q->cntxt_id - adap->sge.egr_start, in refill_fl()
669 adap->sge.starving_fl); in refill_fl()
675 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) in __refill_fl() argument
677 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail), in __refill_fl()
931 static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) in ring_tx_db() argument
947 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), in ring_tx_db()
1094 cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap, in cxgb_fcoe_offload() argument
1111 if (!cxgb_fcoe_sof_eof_supported(adap, skb)) in cxgb_fcoe_offload()
1138 struct adapter *adap; in t4_eth_xmit() local
1160 adap = pi->adapter; in t4_eth_xmit()
1162 q = &adap->sge.ethtxq[qidx + pi->first_qset]; in t4_eth_xmit()
1164 reclaim_completed_tx(adap, &q->q, true); in t4_eth_xmit()
1168 err = cxgb_fcoe_offload(skb, adap, pi, &cntrl); in t4_eth_xmit()
1179 dev_err(adap->pdev_dev, in t4_eth_xmit()
1189 unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) { in t4_eth_xmit()
1225 if (is_t4(adap->params.chip)) in t4_eth_xmit()
1257 TXPKT_INTF(pi->tx_chan) | TXPKT_PF(adap->fn)); in t4_eth_xmit()
1281 ring_tx_db(adap, &q->q, ndesc); in t4_eth_xmit()
1372 ring_tx_db(q->adap, &q->q, ndesc); in ctrl_xmit()
1421 ring_tx_db(q->adap, &q->q, written); in restart_ctrlq()
1428 ring_tx_db(q->adap, &q->q, written); in restart_ctrlq()
1439 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb) in t4_mgmt_tx() argument
1444 ret = ctrl_xmit(&adap->sge.ctrlq[0], skb); in t4_mgmt_tx()
1496 set_bit(q->q.cntxt_id - q->adap->sge.egr_start, in txq_stop_maperr()
1497 q->adap->sge.txq_maperr); in txq_stop_maperr()
1539 reclaim_completed_tx(q->adap, &q->q, false); in service_ofldq()
1551 else if (map_skb(q->adap->pdev_dev, skb, in service_ofldq()
1564 skb->dev = q->adap->port[0]; in service_ofldq()
1576 ring_tx_db(q->adap, &q->q, written); in service_ofldq()
1586 ring_tx_db(q->adap, &q->q, written); in service_ofldq()
1647 static inline int ofld_send(struct adapter *adap, struct sk_buff *skb) in ofld_send() argument
1653 if (adap->tids.nsftids) in ofld_send()
1655 return ctrl_xmit(&adap->sge.ctrlq[idx], skb); in ofld_send()
1657 return ofld_xmit(&adap->sge.ofldtxq[idx], skb); in ofld_send()
1669 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb) in t4_ofld_send() argument
1674 ret = ofld_send(adap, skb); in t4_ofld_send()
1773 static noinline int handle_trace_pkt(struct adapter *adap, in handle_trace_pkt() argument
1784 if (is_t4(adap->params.chip)) in handle_trace_pkt()
1791 skb->dev = adap->port[0]; in handle_trace_pkt()
1799 struct adapter *adapter = rxq->rspq.adap; in do_gro()
1850 struct sge *s = &q->adap->sge; in t4_ethrx_handler()
1851 int cpl_trace_pkt = is_t4(q->adap->params.chip) ? in t4_ethrx_handler()
1858 return handle_trace_pkt(q->adap, si); in t4_ethrx_handler()
2005 struct adapter *adapter = q->adap; in process_responses()
2023 free_rx_bufs(q->adap, &rxq->fl, 1); in process_responses()
2040 unmap_rx_buf(q->adap, &rxq->fl); in process_responses()
2047 dma_sync_single_for_cpu(q->adap->pdev_dev, in process_responses()
2078 __refill_fl(q->adap, &rxq->fl); in process_responses()
2101 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A), in cxgb_busy_poll()
2166 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A), in napi_rx_handler()
2192 static unsigned int process_intrq(struct adapter *adap) in process_intrq() argument
2196 struct sge_rspq *q = &adap->sge.intrq; in process_intrq()
2199 spin_lock(&adap->sge.intrq_lock); in process_intrq()
2209 qid -= adap->sge.ingr_start; in process_intrq()
2210 napi_schedule(&adap->sge.ingr_map[qid]->napi); in process_intrq()
2222 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), in process_intrq()
2229 spin_unlock(&adap->sge.intrq_lock); in process_intrq()
2239 struct adapter *adap = cookie; in t4_intr_msi() local
2241 if (adap->flags & MASTER_PF) in t4_intr_msi()
2242 t4_slow_intr_handler(adap); in t4_intr_msi()
2243 process_intrq(adap); in t4_intr_msi()
2254 struct adapter *adap = cookie; in t4_intr_intx() local
2256 t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0); in t4_intr_intx()
2257 if (((adap->flags & MASTER_PF) && t4_slow_intr_handler(adap)) | in t4_intr_intx()
2258 process_intrq(adap)) in t4_intr_intx()
2270 irq_handler_t t4_intr_handler(struct adapter *adap) in t4_intr_handler() argument
2272 if (adap->flags & USING_MSIX) in t4_intr_handler()
2274 if (adap->flags & USING_MSI) in t4_intr_handler()
2283 struct adapter *adap = (struct adapter *)data; in sge_rx_timer_cb() local
2284 struct sge *s = &adap->sge; in sge_rx_timer_cb()
2295 if (fl_starving(adap, fl)) { in sge_rx_timer_cb()
2304 t4_write_reg(adap, SGE_DEBUG_INDEX_A, 13); in sge_rx_timer_cb()
2305 idma_same_state_cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH_A); in sge_rx_timer_cb()
2306 idma_same_state_cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A); in sge_rx_timer_cb()
2319 CH_WARN(adap, "SGE idma%d, queue%u,resumed after %d sec\n", in sge_rx_timer_cb()
2350 t4_write_reg(adap, SGE_DEBUG_INDEX_A, 0); in sge_rx_timer_cb()
2351 debug0 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A); in sge_rx_timer_cb()
2354 t4_write_reg(adap, SGE_DEBUG_INDEX_A, 11); in sge_rx_timer_cb()
2355 debug11 = t4_read_reg(adap, SGE_DEBUG_DATA_LOW_A); in sge_rx_timer_cb()
2358 CH_WARN(adap, "SGE idma%u, queue%u, maybe stuck state%u %dsecs (debug0=%#x, debug11=%#x)\n", in sge_rx_timer_cb()
2361 t4_sge_decode_idma_state(adap, s->idma_state[i]); in sge_rx_timer_cb()
2371 struct adapter *adap = (struct adapter *)data; in sge_tx_timer_cb() local
2372 struct sge *s = &adap->sge; in sge_tx_timer_cb()
2397 free_tx_desc(adap, &q->q, avail, true); in sge_tx_timer_cb()
2440 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, in t4_sge_alloc_rxq() argument
2446 struct sge *s = &adap->sge; in t4_sge_alloc_rxq()
2452 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0, in t4_sge_alloc_rxq()
2460 FW_IQ_CMD_PFN_V(adap->fn) | FW_IQ_CMD_VFN_V(0)); in t4_sge_alloc_rxq()
2477 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64), in t4_sge_alloc_rxq()
2494 ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); in t4_sge_alloc_rxq()
2506 iq->bar2_addr = bar2_address(adap, in t4_sge_alloc_rxq()
2517 adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq; in t4_sge_alloc_rxq()
2524 adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl; in t4_sge_alloc_rxq()
2529 fl->bar2_addr = bar2_address(adap, in t4_sge_alloc_rxq()
2533 refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL); in t4_sge_alloc_rxq()
2541 dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len, in t4_sge_alloc_rxq()
2548 dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc), in t4_sge_alloc_rxq()
2555 static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) in init_txq() argument
2558 q->bar2_addr = bar2_address(adap, in init_txq()
2567 adap->sge.egr_map[id - adap->sge.egr_start] = q; in init_txq()
2570 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, in t4_sge_alloc_eth_txq() argument
2576 struct sge *s = &adap->sge; in t4_sge_alloc_eth_txq()
2582 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, in t4_sge_alloc_eth_txq()
2592 FW_EQ_ETH_CMD_PFN_V(adap->fn) | in t4_sge_alloc_eth_txq()
2608 ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); in t4_sge_alloc_eth_txq()
2612 dma_free_coherent(adap->pdev_dev, in t4_sge_alloc_eth_txq()
2619 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd))); in t4_sge_alloc_eth_txq()
2626 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, in t4_sge_alloc_ctrl_txq() argument
2632 struct sge *s = &adap->sge; in t4_sge_alloc_ctrl_txq()
2638 txq->q.desc = alloc_ring(adap->pdev_dev, nentries, in t4_sge_alloc_ctrl_txq()
2646 FW_EQ_CTRL_CMD_PFN_V(adap->fn) | in t4_sge_alloc_ctrl_txq()
2662 ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); in t4_sge_alloc_ctrl_txq()
2664 dma_free_coherent(adap->pdev_dev, in t4_sge_alloc_ctrl_txq()
2671 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid))); in t4_sge_alloc_ctrl_txq()
2672 txq->adap = adap; in t4_sge_alloc_ctrl_txq()
2679 int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, in t4_sge_alloc_ofld_txq() argument
2684 struct sge *s = &adap->sge; in t4_sge_alloc_ofld_txq()
2690 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, in t4_sge_alloc_ofld_txq()
2700 FW_EQ_OFLD_CMD_PFN_V(adap->fn) | in t4_sge_alloc_ofld_txq()
2714 ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c); in t4_sge_alloc_ofld_txq()
2718 dma_free_coherent(adap->pdev_dev, in t4_sge_alloc_ofld_txq()
2725 init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd))); in t4_sge_alloc_ofld_txq()
2726 txq->adap = adap; in t4_sge_alloc_ofld_txq()
2734 static void free_txq(struct adapter *adap, struct sge_txq *q) in free_txq() argument
2736 struct sge *s = &adap->sge; in free_txq()
2738 dma_free_coherent(adap->pdev_dev, in free_txq()
2746 static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, in free_rspq_fl() argument
2749 struct sge *s = &adap->sge; in free_rspq_fl()
2752 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL; in free_rspq_fl()
2753 t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP, in free_rspq_fl()
2755 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, in free_rspq_fl()
2764 free_rx_bufs(adap, fl, fl->avail); in free_rspq_fl()
2765 dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len, in free_rspq_fl()
2782 void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q) in t4_free_ofld_rxqs() argument
2786 free_rspq_fl(adap, &q->rspq, in t4_free_ofld_rxqs()
2796 void t4_free_sge_resources(struct adapter *adap) in t4_free_sge_resources() argument
2799 struct sge_eth_rxq *eq = adap->sge.ethrxq; in t4_free_sge_resources()
2800 struct sge_eth_txq *etq = adap->sge.ethtxq; in t4_free_sge_resources()
2803 for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) { in t4_free_sge_resources()
2805 free_rspq_fl(adap, &eq->rspq, in t4_free_sge_resources()
2808 t4_eth_eq_free(adap, adap->fn, adap->fn, 0, in t4_free_sge_resources()
2810 free_tx_desc(adap, &etq->q, etq->q.in_use, true); in t4_free_sge_resources()
2812 free_txq(adap, &etq->q); in t4_free_sge_resources()
2817 t4_free_ofld_rxqs(adap, adap->sge.ofldqsets, adap->sge.ofldrxq); in t4_free_sge_resources()
2818 t4_free_ofld_rxqs(adap, adap->sge.rdmaqs, adap->sge.rdmarxq); in t4_free_sge_resources()
2819 t4_free_ofld_rxqs(adap, adap->sge.rdmaciqs, adap->sge.rdmaciq); in t4_free_sge_resources()
2822 for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) { in t4_free_sge_resources()
2823 struct sge_ofld_txq *q = &adap->sge.ofldtxq[i]; in t4_free_sge_resources()
2827 t4_ofld_eq_free(adap, adap->fn, adap->fn, 0, in t4_free_sge_resources()
2829 free_tx_desc(adap, &q->q, q->q.in_use, false); in t4_free_sge_resources()
2832 free_txq(adap, &q->q); in t4_free_sge_resources()
2837 for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { in t4_free_sge_resources()
2838 struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; in t4_free_sge_resources()
2842 t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0, in t4_free_sge_resources()
2845 free_txq(adap, &cq->q); in t4_free_sge_resources()
2849 if (adap->sge.fw_evtq.desc) in t4_free_sge_resources()
2850 free_rspq_fl(adap, &adap->sge.fw_evtq, NULL); in t4_free_sge_resources()
2852 if (adap->sge.intrq.desc) in t4_free_sge_resources()
2853 free_rspq_fl(adap, &adap->sge.intrq, NULL); in t4_free_sge_resources()
2856 memset(adap->sge.egr_map, 0, in t4_free_sge_resources()
2857 adap->sge.egr_sz * sizeof(*adap->sge.egr_map)); in t4_free_sge_resources()
2860 void t4_sge_start(struct adapter *adap) in t4_sge_start() argument
2862 adap->sge.ethtxq_rover = 0; in t4_sge_start()
2863 mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD); in t4_sge_start()
2864 mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD); in t4_sge_start()
2875 void t4_sge_stop(struct adapter *adap) in t4_sge_stop() argument
2878 struct sge *s = &adap->sge; in t4_sge_stop()
2910 static int t4_sge_init_soft(struct adapter *adap) in t4_sge_init_soft() argument
2912 struct sge *s = &adap->sge; in t4_sge_init_soft()
2922 if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) != in t4_sge_init_soft()
2924 dev_err(adap->pdev_dev, "bad SGE CPL MODE\n"); in t4_sge_init_soft()
2937 t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32)) in t4_sge_init_soft()
2957 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", in t4_sge_init_soft()
2964 if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) || in t4_sge_init_soft()
2965 fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) { in t4_sge_init_soft()
2966 dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n", in t4_sge_init_soft()
2975 timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A); in t4_sge_init_soft()
2976 timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A); in t4_sge_init_soft()
2977 timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A); in t4_sge_init_soft()
2978 s->timer_val[0] = core_ticks_to_us(adap, in t4_sge_init_soft()
2980 s->timer_val[1] = core_ticks_to_us(adap, in t4_sge_init_soft()
2982 s->timer_val[2] = core_ticks_to_us(adap, in t4_sge_init_soft()
2984 s->timer_val[3] = core_ticks_to_us(adap, in t4_sge_init_soft()
2986 s->timer_val[4] = core_ticks_to_us(adap, in t4_sge_init_soft()
2988 s->timer_val[5] = core_ticks_to_us(adap, in t4_sge_init_soft()
2991 ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A); in t4_sge_init_soft()
3007 int t4_sge_init(struct adapter *adap) in t4_sge_init() argument
3009 struct sge *s = &adap->sge; in t4_sge_init()
3018 sge_control = t4_read_reg(adap, SGE_CONTROL_A); in t4_sge_init()
3030 if (is_t4(adap->params.chip)) { in t4_sge_init()
3036 sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A); in t4_sge_init()
3047 ret = t4_sge_init_soft(adap); in t4_sge_init()
3063 sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A); in t4_sge_init()
3064 if (is_t4(adap->params.chip)) in t4_sge_init()
3070 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap); in t4_sge_init()
3071 setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap); in t4_sge_init()
3072 s->idma_1s_thresh = core_ticks_per_usec(adap) * 1000000; /* 1 s */ in t4_sge_init()