Lines Matching refs:adap
380 static void free_tx_desc(struct adapter *adap, struct sge_txq *q, in free_tx_desc() argument
385 struct device *dev = adap->pdev_dev; in free_tx_desc()
424 static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, in reclaim_completed_tx() argument
437 free_tx_desc(adap, q, avail, unmap); in reclaim_completed_tx()
482 static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n) in free_rx_bufs() argument
488 dma_unmap_page(adap->pdev_dev, get_buf_addr(d), in free_rx_bufs()
489 get_buf_size(adap, d), in free_rx_bufs()
510 static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q) in unmap_rx_buf() argument
515 dma_unmap_page(adap->pdev_dev, get_buf_addr(d), in unmap_rx_buf()
516 get_buf_size(adap, d), PCI_DMA_FROMDEVICE); in unmap_rx_buf()
523 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) in ring_fl_db() argument
526 u32 val = adap->params.arch.sge_fl_db; in ring_fl_db()
528 if (is_t4(adap->params.chip)) in ring_fl_db()
543 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), in ring_fl_db()
579 static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, in refill_fl() argument
582 struct sge *s = &adap->sge; in refill_fl()
591 if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl)) in refill_fl()
596 node = dev_to_node(adap->pdev_dev); in refill_fl()
611 mapping = dma_map_page(adap->pdev_dev, pg, 0, in refill_fl()
614 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { in refill_fl()
641 mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE, in refill_fl()
643 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { in refill_fl()
662 ring_fl_db(adap, q); in refill_fl()
664 if (unlikely(fl_starving(adap, q))) { in refill_fl()
666 set_bit(q->cntxt_id - adap->sge.egr_start, in refill_fl()
667 adap->sge.starving_fl); in refill_fl()
673 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) in __refill_fl() argument
675 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail), in __refill_fl()
929 static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) in ring_tx_db() argument
948 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), in ring_tx_db()
1101 cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap, in cxgb_fcoe_offload() argument
1118 if (!cxgb_fcoe_sof_eof_supported(adap, skb)) in cxgb_fcoe_offload()
1144 struct adapter *adap; in t4_eth_xmit() local
1174 adap = pi->adapter; in t4_eth_xmit()
1176 q = &adap->sge.ethtxq[qidx + pi->first_qset]; in t4_eth_xmit()
1178 reclaim_completed_tx(adap, &q->q, true); in t4_eth_xmit()
1182 err = cxgb_fcoe_offload(skb, adap, pi, &cntrl); in t4_eth_xmit()
1193 dev_err(adap->pdev_dev, in t4_eth_xmit()
1203 unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) { in t4_eth_xmit()
1239 if (is_t4(adap->params.chip)) in t4_eth_xmit()
1245 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) in t4_eth_xmit()
1261 cntrl = hwcsum(adap->params.chip, skb) | in t4_eth_xmit()
1278 TXPKT_PF_V(adap->pf); in t4_eth_xmit()
1280 if (is_t4(adap->params.chip)) in t4_eth_xmit()
1309 ring_tx_db(adap, &q->q, ndesc); in t4_eth_xmit()
1400 ring_tx_db(q->adap, &q->q, ndesc); in ctrl_xmit()
1448 ring_tx_db(q->adap, &q->q, written); in restart_ctrlq()
1455 ring_tx_db(q->adap, &q->q, written); in restart_ctrlq()
1466 int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb) in t4_mgmt_tx() argument
1471 ret = ctrl_xmit(&adap->sge.ctrlq[0], skb); in t4_mgmt_tx()
1523 set_bit(q->q.cntxt_id - q->adap->sge.egr_start, in txq_stop_maperr()
1524 q->adap->sge.txq_maperr); in txq_stop_maperr()
1566 reclaim_completed_tx(q->adap, &q->q, false); in service_ofldq()
1578 else if (map_skb(q->adap->pdev_dev, skb, in service_ofldq()
1591 skb->dev = q->adap->port[0]; in service_ofldq()
1603 ring_tx_db(q->adap, &q->q, written); in service_ofldq()
1613 ring_tx_db(q->adap, &q->q, written); in service_ofldq()
1674 static inline int ofld_send(struct adapter *adap, struct sk_buff *skb) in ofld_send() argument
1680 if (adap->tids.nsftids) in ofld_send()
1682 return ctrl_xmit(&adap->sge.ctrlq[idx], skb); in ofld_send()
1684 return ofld_xmit(&adap->sge.ofldtxq[idx], skb); in ofld_send()
1696 int t4_ofld_send(struct adapter *adap, struct sk_buff *skb) in t4_ofld_send() argument
1701 ret = ofld_send(adap, skb); in t4_ofld_send()
1800 static noinline int handle_trace_pkt(struct adapter *adap, in handle_trace_pkt() argument
1811 if (is_t4(adap->params.chip)) in handle_trace_pkt()
1818 skb->dev = adap->port[0]; in handle_trace_pkt()
1832 static void cxgb4_sgetim_to_hwtstamp(struct adapter *adap, in cxgb4_sgetim_to_hwtstamp() argument
1837 u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2); in cxgb4_sgetim_to_hwtstamp()
1839 ns = div_u64(tmp, adap->params.vpd.cclk); in cxgb4_sgetim_to_hwtstamp()
1848 struct adapter *adapter = rxq->rspq.adap; in do_gro()
1904 struct sge *s = &q->adap->sge; in t4_ethrx_handler()
1905 int cpl_trace_pkt = is_t4(q->adap->params.chip) ? in t4_ethrx_handler()
1910 return handle_trace_pkt(q->adap, si); in t4_ethrx_handler()
1940 cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb), in t4_ethrx_handler()
2060 struct adapter *adapter = q->adap; in process_responses()
2078 free_rx_bufs(q->adap, &rxq->fl, 1); in process_responses()
2095 unmap_rx_buf(q->adap, &rxq->fl); in process_responses()
2104 dma_sync_single_for_cpu(q->adap->pdev_dev, in process_responses()
2135 __refill_fl(q->adap, &rxq->fl); in process_responses()
2158 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A), in cxgb_busy_poll()
2224 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A), in napi_rx_handler()
2250 static unsigned int process_intrq(struct adapter *adap) in process_intrq() argument
2254 struct sge_rspq *q = &adap->sge.intrq; in process_intrq()
2257 spin_lock(&adap->sge.intrq_lock); in process_intrq()
2267 qid -= adap->sge.ingr_start; in process_intrq()
2268 napi_schedule(&adap->sge.ingr_map[qid]->napi); in process_intrq()
2280 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), in process_intrq()
2287 spin_unlock(&adap->sge.intrq_lock); in process_intrq()
2297 struct adapter *adap = cookie; in t4_intr_msi() local
2299 if (adap->flags & MASTER_PF) in t4_intr_msi()
2300 t4_slow_intr_handler(adap); in t4_intr_msi()
2301 process_intrq(adap); in t4_intr_msi()
2312 struct adapter *adap = cookie; in t4_intr_intx() local
2314 t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI_A), 0); in t4_intr_intx()
2315 if (((adap->flags & MASTER_PF) && t4_slow_intr_handler(adap)) | in t4_intr_intx()
2316 process_intrq(adap)) in t4_intr_intx()
2328 irq_handler_t t4_intr_handler(struct adapter *adap) in t4_intr_handler() argument
2330 if (adap->flags & USING_MSIX) in t4_intr_handler()
2332 if (adap->flags & USING_MSI) in t4_intr_handler()
2341 struct adapter *adap = (struct adapter *)data; in sge_rx_timer_cb() local
2342 struct sge *s = &adap->sge; in sge_rx_timer_cb()
2353 if (fl_starving(adap, fl)) { in sge_rx_timer_cb()
2365 if (!(adap->flags & MASTER_PF)) in sge_rx_timer_cb()
2368 t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD); in sge_rx_timer_cb()
2378 struct adapter *adap = (struct adapter *)data; in sge_tx_timer_cb() local
2379 struct sge *s = &adap->sge; in sge_tx_timer_cb()
2404 free_tx_desc(adap, &q->q, avail, true); in sge_tx_timer_cb()
2450 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, in t4_sge_alloc_rxq() argument
2456 struct sge *s = &adap->sge; in t4_sge_alloc_rxq()
2462 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0, in t4_sge_alloc_rxq()
2470 FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0)); in t4_sge_alloc_rxq()
2489 enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip); in t4_sge_alloc_rxq()
2501 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64), in t4_sge_alloc_rxq()
2526 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); in t4_sge_alloc_rxq()
2538 iq->bar2_addr = bar2_address(adap, in t4_sge_alloc_rxq()
2549 adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq; in t4_sge_alloc_rxq()
2556 adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl; in t4_sge_alloc_rxq()
2561 fl->bar2_addr = bar2_address(adap, in t4_sge_alloc_rxq()
2565 refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL); in t4_sge_alloc_rxq()
2576 if (!is_t4(adap->params.chip) && cong >= 0) { in t4_sge_alloc_rxq()
2594 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, in t4_sge_alloc_rxq()
2597 dev_warn(adap->pdev_dev, "Failed to set Congestion" in t4_sge_alloc_rxq()
2608 dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len, in t4_sge_alloc_rxq()
2615 dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc), in t4_sge_alloc_rxq()
2622 static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) in init_txq() argument
2625 q->bar2_addr = bar2_address(adap, in init_txq()
2634 adap->sge.egr_map[id - adap->sge.egr_start] = q; in init_txq()
2637 int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq, in t4_sge_alloc_eth_txq() argument
2643 struct sge *s = &adap->sge; in t4_sge_alloc_eth_txq()
2649 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, in t4_sge_alloc_eth_txq()
2659 FW_EQ_ETH_CMD_PFN_V(adap->pf) | in t4_sge_alloc_eth_txq()
2676 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); in t4_sge_alloc_eth_txq()
2680 dma_free_coherent(adap->pdev_dev, in t4_sge_alloc_eth_txq()
2687 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd))); in t4_sge_alloc_eth_txq()
2694 int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq, in t4_sge_alloc_ctrl_txq() argument
2700 struct sge *s = &adap->sge; in t4_sge_alloc_ctrl_txq()
2706 txq->q.desc = alloc_ring(adap->pdev_dev, nentries, in t4_sge_alloc_ctrl_txq()
2708 NULL, 0, dev_to_node(adap->pdev_dev)); in t4_sge_alloc_ctrl_txq()
2714 FW_EQ_CTRL_CMD_PFN_V(adap->pf) | in t4_sge_alloc_ctrl_txq()
2731 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); in t4_sge_alloc_ctrl_txq()
2733 dma_free_coherent(adap->pdev_dev, in t4_sge_alloc_ctrl_txq()
2740 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid))); in t4_sge_alloc_ctrl_txq()
2741 txq->adap = adap; in t4_sge_alloc_ctrl_txq()
2748 int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq, in t4_sge_alloc_ofld_txq() argument
2753 struct sge *s = &adap->sge; in t4_sge_alloc_ofld_txq()
2759 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, in t4_sge_alloc_ofld_txq()
2769 FW_EQ_OFLD_CMD_PFN_V(adap->pf) | in t4_sge_alloc_ofld_txq()
2784 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); in t4_sge_alloc_ofld_txq()
2788 dma_free_coherent(adap->pdev_dev, in t4_sge_alloc_ofld_txq()
2795 init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd))); in t4_sge_alloc_ofld_txq()
2796 txq->adap = adap; in t4_sge_alloc_ofld_txq()
2804 static void free_txq(struct adapter *adap, struct sge_txq *q) in free_txq() argument
2806 struct sge *s = &adap->sge; in free_txq()
2808 dma_free_coherent(adap->pdev_dev, in free_txq()
2816 static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, in free_rspq_fl() argument
2819 struct sge *s = &adap->sge; in free_rspq_fl()
2822 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL; in free_rspq_fl()
2823 t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP, in free_rspq_fl()
2825 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, in free_rspq_fl()
2834 free_rx_bufs(adap, fl, fl->avail); in free_rspq_fl()
2835 dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len, in free_rspq_fl()
2852 void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q) in t4_free_ofld_rxqs() argument
2856 free_rspq_fl(adap, &q->rspq, in t4_free_ofld_rxqs()
2866 void t4_free_sge_resources(struct adapter *adap) in t4_free_sge_resources() argument
2869 struct sge_eth_rxq *eq = adap->sge.ethrxq; in t4_free_sge_resources()
2870 struct sge_eth_txq *etq = adap->sge.ethtxq; in t4_free_sge_resources()
2873 for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) { in t4_free_sge_resources()
2875 free_rspq_fl(adap, &eq->rspq, in t4_free_sge_resources()
2878 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, in t4_free_sge_resources()
2880 free_tx_desc(adap, &etq->q, etq->q.in_use, true); in t4_free_sge_resources()
2882 free_txq(adap, &etq->q); in t4_free_sge_resources()
2887 t4_free_ofld_rxqs(adap, adap->sge.ofldqsets, adap->sge.ofldrxq); in t4_free_sge_resources()
2888 t4_free_ofld_rxqs(adap, adap->sge.rdmaqs, adap->sge.rdmarxq); in t4_free_sge_resources()
2889 t4_free_ofld_rxqs(adap, adap->sge.rdmaciqs, adap->sge.rdmaciq); in t4_free_sge_resources()
2892 for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) { in t4_free_sge_resources()
2893 struct sge_ofld_txq *q = &adap->sge.ofldtxq[i]; in t4_free_sge_resources()
2897 t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0, in t4_free_sge_resources()
2899 free_tx_desc(adap, &q->q, q->q.in_use, false); in t4_free_sge_resources()
2902 free_txq(adap, &q->q); in t4_free_sge_resources()
2907 for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { in t4_free_sge_resources()
2908 struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; in t4_free_sge_resources()
2912 t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0, in t4_free_sge_resources()
2915 free_txq(adap, &cq->q); in t4_free_sge_resources()
2919 if (adap->sge.fw_evtq.desc) in t4_free_sge_resources()
2920 free_rspq_fl(adap, &adap->sge.fw_evtq, NULL); in t4_free_sge_resources()
2922 if (adap->sge.intrq.desc) in t4_free_sge_resources()
2923 free_rspq_fl(adap, &adap->sge.intrq, NULL); in t4_free_sge_resources()
2926 memset(adap->sge.egr_map, 0, in t4_free_sge_resources()
2927 adap->sge.egr_sz * sizeof(*adap->sge.egr_map)); in t4_free_sge_resources()
2930 void t4_sge_start(struct adapter *adap) in t4_sge_start() argument
2932 adap->sge.ethtxq_rover = 0; in t4_sge_start()
2933 mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD); in t4_sge_start()
2934 mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD); in t4_sge_start()
2945 void t4_sge_stop(struct adapter *adap) in t4_sge_stop() argument
2948 struct sge *s = &adap->sge; in t4_sge_stop()
2980 static int t4_sge_init_soft(struct adapter *adap) in t4_sge_init_soft() argument
2982 struct sge *s = &adap->sge; in t4_sge_init_soft()
2992 if ((t4_read_reg(adap, SGE_CONTROL_A) & RXPKTCPLMODE_F) != in t4_sge_init_soft()
2994 dev_err(adap->pdev_dev, "bad SGE CPL MODE\n"); in t4_sge_init_soft()
3007 t4_read_reg(adap, SGE_FL_BUFFER_SIZE0_A+(x)*sizeof(u32)) in t4_sge_init_soft()
3027 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", in t4_sge_init_soft()
3034 if (fl_small_mtu < FL_MTU_SMALL_BUFSIZE(adap) || in t4_sge_init_soft()
3035 fl_large_mtu < FL_MTU_LARGE_BUFSIZE(adap)) { in t4_sge_init_soft()
3036 dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n", in t4_sge_init_soft()
3045 timer_value_0_and_1 = t4_read_reg(adap, SGE_TIMER_VALUE_0_AND_1_A); in t4_sge_init_soft()
3046 timer_value_2_and_3 = t4_read_reg(adap, SGE_TIMER_VALUE_2_AND_3_A); in t4_sge_init_soft()
3047 timer_value_4_and_5 = t4_read_reg(adap, SGE_TIMER_VALUE_4_AND_5_A); in t4_sge_init_soft()
3048 s->timer_val[0] = core_ticks_to_us(adap, in t4_sge_init_soft()
3050 s->timer_val[1] = core_ticks_to_us(adap, in t4_sge_init_soft()
3052 s->timer_val[2] = core_ticks_to_us(adap, in t4_sge_init_soft()
3054 s->timer_val[3] = core_ticks_to_us(adap, in t4_sge_init_soft()
3056 s->timer_val[4] = core_ticks_to_us(adap, in t4_sge_init_soft()
3058 s->timer_val[5] = core_ticks_to_us(adap, in t4_sge_init_soft()
3061 ingress_rx_threshold = t4_read_reg(adap, SGE_INGRESS_RX_THRESHOLD_A); in t4_sge_init_soft()
3077 int t4_sge_init(struct adapter *adap) in t4_sge_init() argument
3079 struct sge *s = &adap->sge; in t4_sge_init()
3088 sge_control = t4_read_reg(adap, SGE_CONTROL_A); in t4_sge_init()
3104 if (is_t4(adap->params.chip)) { in t4_sge_init()
3110 sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A); in t4_sge_init()
3121 ret = t4_sge_init_soft(adap); in t4_sge_init()
3137 sge_conm_ctrl = t4_read_reg(adap, SGE_CONM_CTRL_A); in t4_sge_init()
3138 if (is_t4(adap->params.chip)) in t4_sge_init()
3144 t4_idma_monitor_init(adap, &s->idma_monitor); in t4_sge_init()
3149 setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap); in t4_sge_init()
3150 setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap); in t4_sge_init()