Lines Matching refs:adap
480 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) in ring_fl_db() argument
485 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); in ring_fl_db()
500 static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) in refill_fl() argument
511 if (unlikely(alloc_pg_chunk(adap, q, sd, gfp, in refill_fl()
520 pci_dma_sync_single_for_device(adap->pdev, mapping, in refill_fl()
533 q->gen, adap->pdev); in refill_fl()
535 clear_rx_desc(adap->pdev, q, sd); in refill_fl()
553 ring_fl_db(adap, q); in refill_fl()
558 static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl) in __refill_fl() argument
560 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits), in __refill_fl()
573 static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q, in recycle_rx_buf() argument
593 ring_fl_db(adap, q); in recycle_rx_buf()
645 if (q->adap && in t3_reset_qset()
646 !(q->adap->flags & NAPI_INIT)) { in t3_reset_qset()
651 q->adap = NULL; in t3_reset_qset()
779 static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl, in get_packet() argument
792 pci_dma_sync_single_for_cpu(adap->pdev, in get_packet()
796 pci_dma_sync_single_for_device(adap->pdev, in get_packet()
802 recycle_rx_buf(adap, fl, fl->cidx); in get_packet()
807 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1), in get_packet()
812 pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr), in get_packet()
816 __refill_fl(adap, fl); in get_packet()
838 static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl, in get_packet_pg() argument
852 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, in get_packet_pg()
855 pci_dma_sync_single_for_device(adap->pdev, dma_addr, in get_packet_pg()
862 recycle_rx_buf(adap, fl, fl->cidx); in get_packet_pg()
881 pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, in get_packet_pg()
885 pci_unmap_page(adap->pdev, in get_packet_pg()
1007 static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q) in check_ring_tx_db() argument
1013 t3_write_reg(adap, A_SG_KDOORBELL, in check_ring_tx_db()
1018 t3_write_reg(adap, A_SG_KDOORBELL, in check_ring_tx_db()
1137 static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, in write_tx_pkt_wr() argument
1199 sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev); in write_tx_pkt_wr()
1226 struct adapter *adap = pi->adapter; in t3_eth_xmit() local
1245 reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); in t3_eth_xmit()
1252 dev_err(&adap->pdev->dev, in t3_eth_xmit()
1315 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl); in t3_eth_xmit()
1316 check_ring_tx_db(adap, q); in t3_eth_xmit()
1370 static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q, in check_desc_avail() argument
1425 static int ctrl_xmit(struct adapter *adap, struct sge_txq *q, in ctrl_xmit() argument
1443 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL); in ctrl_xmit()
1461 t3_write_reg(adap, A_SG_KDOORBELL, in ctrl_xmit()
1505 t3_write_reg(qs->adap, A_SG_KDOORBELL, in restart_ctrlq()
1512 int t3_mgmt_tx(struct adapter *adap, struct sk_buff *skb) in t3_mgmt_tx() argument
1516 ret = ctrl_xmit(adap, &adap->sge.qs[0].txq[TXQ_CTRL], skb); in t3_mgmt_tx()
1578 static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, in write_ofld_wr() argument
1604 adap->pdev); in write_ofld_wr()
1606 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); in write_ofld_wr()
1643 static int ofld_xmit(struct adapter *adap, struct sge_txq *q, in ofld_xmit() argument
1650 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); in ofld_xmit()
1652 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD); in ofld_xmit()
1672 write_ofld_wr(adap, skb, q, pidx, gen, ndesc); in ofld_xmit()
1673 check_ring_tx_db(adap, q); in ofld_xmit()
1689 struct adapter *adap = pi->adapter; in restart_offloadq() local
1692 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); in restart_offloadq()
1720 write_ofld_wr(adap, skb, q, pidx, gen, ndesc); in restart_offloadq()
1730 t3_write_reg(adap, A_SG_KDOORBELL, in restart_offloadq()
1769 struct adapter *adap = tdev2adap(tdev); in t3_offload_tx() local
1770 struct sge_qset *qs = &adap->sge.qs[queue_set(skb)]; in t3_offload_tx()
1773 return ctrl_xmit(adap, &qs->txq[TXQ_CTRL], skb); in t3_offload_tx()
1775 return ofld_xmit(adap, &qs->txq[TXQ_OFLD], skb); in t3_offload_tx()
1834 struct adapter *adapter = qs->adap; in ofld_poll()
2013 static void rx_eth(struct adapter *adap, struct sge_rspq *rq, in rx_eth() argument
2021 skb->protocol = eth_type_trans(skb, adap->port[p->iff]); in rx_eth()
2029 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]); in rx_eth()
2063 static void lro_add_page(struct adapter *adap, struct sge_qset *qs, in lro_add_page() argument
2081 pci_dma_sync_single_for_cpu(adap->pdev, in lro_add_page()
2088 pci_unmap_page(adap->pdev, in lro_add_page()
2131 skb_record_rx_queue(skb, qs - &adap->sge.qs[pi->first_qset]); in lro_add_page()
2185 static void check_ring_db(struct adapter *adap, struct sge_qset *qs, in check_ring_db() argument
2194 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | in check_ring_db()
2205 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | in check_ring_db()
2255 static int process_responses(struct adapter *adap, struct sge_qset *qs, in process_responses() argument
2314 __refill_fl(adap, fl); in process_responses()
2316 lro_add_page(adap, qs, fl, in process_responses()
2322 skb = get_packet_pg(adap, fl, q, in process_responses()
2328 skb = get_packet(adap, fl, G_RSPD_LEN(len), in process_responses()
2356 refill_rspq(adap, q, q->credits); in process_responses()
2366 rx_eth(adap, q, skb, ethpad, lro); in process_responses()
2372 ngathered = rx_offload(&adap->tdev, q, skb, in process_responses()
2383 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); in process_responses()
2386 check_ring_db(adap, qs, sleeping); in process_responses()
2413 struct adapter *adap = qs->adap; in napi_rx_handler() local
2414 int work_done = process_responses(adap, qs, budget); in napi_rx_handler()
2433 t3_write_reg(adap, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) | in napi_rx_handler()
2462 static int process_pure_responses(struct adapter *adap, struct sge_qset *qs, in process_pure_responses() argument
2486 refill_rspq(adap, q, q->credits); in process_pure_responses()
2495 check_ring_db(adap, qs, sleeping); in process_pure_responses()
2519 static inline int handle_responses(struct adapter *adap, struct sge_rspq *q) in handle_responses() argument
2527 if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) { in handle_responses()
2528 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | in handle_responses()
2543 struct adapter *adap = qs->adap; in t3_sge_intr_msix() local
2547 if (process_responses(adap, qs, -1) == 0) in t3_sge_intr_msix()
2549 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | in t3_sge_intr_msix()
2566 if (handle_responses(qs->adap, q) < 0) in t3_sge_intr_msix_napi()
2581 struct adapter *adap = cookie; in t3_intr_msi() local
2582 struct sge_rspq *q = &adap->sge.qs[0].rspq; in t3_intr_msi()
2586 if (process_responses(adap, &adap->sge.qs[0], -1)) { in t3_intr_msi()
2587 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | in t3_intr_msi()
2592 if (adap->params.nports == 2 && in t3_intr_msi()
2593 process_responses(adap, &adap->sge.qs[1], -1)) { in t3_intr_msi()
2594 struct sge_rspq *q1 = &adap->sge.qs[1].rspq; in t3_intr_msi()
2596 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q1->cntxt_id) | in t3_intr_msi()
2602 if (!new_packets && t3_slow_intr_handler(adap) == 0) in t3_intr_msi()
2631 struct adapter *adap = cookie; in t3_intr_msi_napi() local
2632 struct sge_rspq *q = &adap->sge.qs[0].rspq; in t3_intr_msi_napi()
2636 new_packets = rspq_check_napi(&adap->sge.qs[0]); in t3_intr_msi_napi()
2637 if (adap->params.nports == 2) in t3_intr_msi_napi()
2638 new_packets += rspq_check_napi(&adap->sge.qs[1]); in t3_intr_msi_napi()
2639 if (!new_packets && t3_slow_intr_handler(adap) == 0) in t3_intr_msi_napi()
2649 static inline int process_responses_gts(struct adapter *adap, in process_responses_gts() argument
2654 work = process_responses(adap, rspq_to_qset(rq), -1); in process_responses_gts()
2655 t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) | in process_responses_gts()
2669 struct adapter *adap = cookie; in t3_intr() local
2670 struct sge_rspq *q0 = &adap->sge.qs[0].rspq; in t3_intr()
2671 struct sge_rspq *q1 = &adap->sge.qs[1].rspq; in t3_intr()
2676 w1 = adap->params.nports == 2 && in t3_intr()
2680 t3_write_reg(adap, A_PL_CLI, 0); in t3_intr()
2681 t3_read_reg(adap, A_PL_CLI); /* flush */ in t3_intr()
2684 process_responses_gts(adap, q0); in t3_intr()
2687 process_responses_gts(adap, q1); in t3_intr()
2691 work_done = t3_slow_intr_handler(adap); in t3_intr()
2707 struct adapter *adap = cookie; in t3b_intr() local
2708 struct sge_rspq *q0 = &adap->sge.qs[0].rspq; in t3b_intr()
2710 t3_write_reg(adap, A_PL_CLI, 0); in t3b_intr()
2711 map = t3_read_reg(adap, A_SG_DATA_INTR); in t3b_intr()
2719 t3_slow_intr_handler(adap); in t3b_intr()
2722 process_responses_gts(adap, q0); in t3b_intr()
2725 process_responses_gts(adap, &adap->sge.qs[1].rspq); in t3b_intr()
2741 struct adapter *adap = cookie; in t3b_intr_napi() local
2742 struct sge_qset *qs0 = &adap->sge.qs[0]; in t3b_intr_napi()
2745 t3_write_reg(adap, A_PL_CLI, 0); in t3b_intr_napi()
2746 map = t3_read_reg(adap, A_SG_DATA_INTR); in t3b_intr_napi()
2754 t3_slow_intr_handler(adap); in t3b_intr_napi()
2760 napi_schedule(&adap->sge.qs[1].napi); in t3b_intr_napi()
2775 irq_handler_t t3_intr_handler(struct adapter *adap, int polling) in t3_intr_handler() argument
2777 if (adap->flags & USING_MSIX) in t3_intr_handler()
2779 if (adap->flags & USING_MSI) in t3_intr_handler()
2781 if (adap->params.rev > 0) in t3_intr_handler()
2860 struct adapter *adap = pi->adapter; in sge_timer_tx() local
2865 tbd[TXQ_ETH] = reclaim_completed_tx(adap, &qs->txq[TXQ_ETH], in sge_timer_tx()
2871 tbd[TXQ_OFLD] = reclaim_completed_tx(adap, &qs->txq[TXQ_OFLD], in sge_timer_tx()
2901 struct adapter *adap = pi->adapter; in sge_timer_rx() local
2904 lock = adap->params.rev > 0 ? in sge_timer_rx()
2905 &qs->rspq.lock : &adap->sge.qs[0].rspq.lock; in sge_timer_rx()
2913 if (adap->params.rev < 4) { in sge_timer_rx()
2914 status = t3_read_reg(adap, A_SG_RSPQ_FL_STATUS); in sge_timer_rx()
2920 refill_rspq(adap, &qs->rspq, 1); in sge_timer_rx()
2922 t3_write_reg(adap, A_SG_RSPQ_FL_STATUS, in sge_timer_rx()
2929 __refill_fl(adap, &qs->fl[0]); in sge_timer_rx()
2931 __refill_fl(adap, &qs->fl[1]); in sge_timer_rx()
3105 q->adap = adapter; in t3_sge_alloc_qset()
3145 void t3_start_sge_timers(struct adapter *adap) in t3_start_sge_timers() argument
3150 struct sge_qset *q = &adap->sge.qs[i]; in t3_start_sge_timers()
3166 void t3_stop_sge_timers(struct adapter *adap) in t3_stop_sge_timers() argument
3171 struct sge_qset *q = &adap->sge.qs[i]; in t3_stop_sge_timers()
3186 void t3_free_sge_resources(struct adapter *adap) in t3_free_sge_resources() argument
3191 t3_free_qset(adap, &adap->sge.qs[i]); in t3_free_sge_resources()
3201 void t3_sge_start(struct adapter *adap) in t3_sge_start() argument
3203 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE); in t3_sge_start()
3219 void t3_sge_stop(struct adapter *adap) in t3_sge_stop() argument
3221 t3_set_reg_field(adap, A_SG_CONTROL, F_GLOBALENABLE, 0); in t3_sge_stop()
3226 struct sge_qset *qs = &adap->sge.qs[i]; in t3_sge_stop()
3244 void t3_sge_init(struct adapter *adap, struct sge_params *p) in t3_sge_init() argument
3246 unsigned int ctrl, ups = ffs(pci_resource_len(adap->pdev, 2) >> 12); in t3_sge_init()
3255 if (adap->params.rev > 0) { in t3_sge_init()
3256 if (!(adap->flags & (USING_MSIX | USING_MSI))) in t3_sge_init()
3259 t3_write_reg(adap, A_SG_CONTROL, ctrl); in t3_sge_init()
3260 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) | in t3_sge_init()
3262 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10); in t3_sge_init()
3263 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) | in t3_sge_init()
3264 V_TIMEOUT(200 * core_ticks_per_usec(adap))); in t3_sge_init()
3265 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, in t3_sge_init()
3266 adap->params.rev < T3_REV_C ? 1000 : 500); in t3_sge_init()
3267 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256); in t3_sge_init()
3268 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000); in t3_sge_init()
3269 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256); in t3_sge_init()
3270 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff)); in t3_sge_init()
3271 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024); in t3_sge_init()
3283 void t3_sge_prep(struct adapter *adap, struct sge_params *p) in t3_sge_prep() argument
3293 q->polling = adap->params.rev > 0; in t3_sge_prep()
3304 spin_lock_init(&adap->sge.reg_lock); in t3_sge_prep()