Lines Matching refs:adap
311 struct adapter *adap = pi->adapter; in dcb_tx_queue_prio_enable() local
312 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset]; in dcb_tx_queue_prio_enable()
332 err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1, in dcb_tx_queue_prio_enable()
337 dev_err(adap->pdev_dev, in dcb_tx_queue_prio_enable()
366 void t4_os_portmod_changed(const struct adapter *adap, int port_id) in t4_os_portmod_changed() argument
372 const struct net_device *dev = adap->port[port_id]; in t4_os_portmod_changed()
519 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd) in dcb_rpl() argument
522 struct net_device *dev = adap->port[port]; in dcb_rpl()
526 cxgb4_dcb_handle_fw_update(adap, pcmd); in dcb_rpl()
541 static void clear_filter(struct adapter *adap, struct filter_entry *f) in clear_filter() argument
561 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl) in filter_rpl() argument
564 unsigned int nidx = idx - adap->tids.ftid_base; in filter_rpl()
568 if (idx >= adap->tids.ftid_base && nidx < in filter_rpl()
569 (adap->tids.nftids + adap->tids.nsftids)) { in filter_rpl()
572 f = &adap->tids.ftid_tab[idx]; in filter_rpl()
578 clear_filter(adap, f); in filter_rpl()
580 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n", in filter_rpl()
582 clear_filter(adap, f); in filter_rpl()
591 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n", in filter_rpl()
593 clear_filter(adap, f); in filter_rpl()
615 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n" in fwevtq_handler()
626 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start]; in fwevtq_handler()
628 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) { in fwevtq_handler()
652 struct net_device *dev = q->adap->port[port]; in fwevtq_handler()
663 dcb_rpl(q->adap, pcmd); in fwevtq_handler()
667 t4_handle_fw_rpl(q->adap, p->data); in fwevtq_handler()
671 do_l2t_write_rpl(q->adap, p); in fwevtq_handler()
675 filter_rpl(q->adap, p); in fwevtq_handler()
677 dev_err(q->adap->pdev_dev, in fwevtq_handler()
703 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) { in uldrx_handler()
732 struct adapter *adap = cookie; in t4_nondata_intr() local
733 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A)); in t4_nondata_intr()
736 adap->swintr = 1; in t4_nondata_intr()
737 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v); in t4_nondata_intr()
739 if (adap->flags & MASTER_PF) in t4_nondata_intr()
740 t4_slow_intr_handler(adap); in t4_nondata_intr()
747 static void name_msix_vecs(struct adapter *adap) in name_msix_vecs() argument
749 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc); in name_msix_vecs()
752 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name); in name_msix_vecs()
755 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", in name_msix_vecs()
756 adap->port[0]->name); in name_msix_vecs()
759 for_each_port(adap, j) { in name_msix_vecs()
760 struct net_device *d = adap->port[j]; in name_msix_vecs()
764 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d", in name_msix_vecs()
769 for_each_ofldrxq(&adap->sge, i) in name_msix_vecs()
770 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d", in name_msix_vecs()
771 adap->port[0]->name, i); in name_msix_vecs()
773 for_each_rdmarxq(&adap->sge, i) in name_msix_vecs()
774 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d", in name_msix_vecs()
775 adap->port[0]->name, i); in name_msix_vecs()
777 for_each_rdmaciq(&adap->sge, i) in name_msix_vecs()
778 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d", in name_msix_vecs()
779 adap->port[0]->name, i); in name_msix_vecs()
782 static int request_msix_queue_irqs(struct adapter *adap) in request_msix_queue_irqs() argument
784 struct sge *s = &adap->sge; in request_msix_queue_irqs()
788 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, in request_msix_queue_irqs()
789 adap->msix_info[1].desc, &s->fw_evtq); in request_msix_queue_irqs()
794 err = request_irq(adap->msix_info[msi_index].vec, in request_msix_queue_irqs()
796 adap->msix_info[msi_index].desc, in request_msix_queue_irqs()
803 err = request_irq(adap->msix_info[msi_index].vec, in request_msix_queue_irqs()
805 adap->msix_info[msi_index].desc, in request_msix_queue_irqs()
812 err = request_irq(adap->msix_info[msi_index].vec, in request_msix_queue_irqs()
814 adap->msix_info[msi_index].desc, in request_msix_queue_irqs()
821 err = request_irq(adap->msix_info[msi_index].vec, in request_msix_queue_irqs()
823 adap->msix_info[msi_index].desc, in request_msix_queue_irqs()
833 free_irq(adap->msix_info[--msi_index].vec, in request_msix_queue_irqs()
836 free_irq(adap->msix_info[--msi_index].vec, in request_msix_queue_irqs()
839 free_irq(adap->msix_info[--msi_index].vec, in request_msix_queue_irqs()
842 free_irq(adap->msix_info[--msi_index].vec, in request_msix_queue_irqs()
844 free_irq(adap->msix_info[1].vec, &s->fw_evtq); in request_msix_queue_irqs()
848 static void free_msix_queue_irqs(struct adapter *adap) in free_msix_queue_irqs() argument
851 struct sge *s = &adap->sge; in free_msix_queue_irqs()
853 free_irq(adap->msix_info[1].vec, &s->fw_evtq); in free_msix_queue_irqs()
855 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq); in free_msix_queue_irqs()
857 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq); in free_msix_queue_irqs()
859 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq); in free_msix_queue_irqs()
861 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq); in free_msix_queue_irqs()
914 static int setup_rss(struct adapter *adap) in setup_rss() argument
918 for_each_port(adap, i) { in setup_rss()
919 const struct port_info *pi = adap2pinfo(adap, i); in setup_rss()
944 static void quiesce_rx(struct adapter *adap) in quiesce_rx() argument
948 for (i = 0; i < adap->sge.ingr_sz; i++) { in quiesce_rx()
949 struct sge_rspq *q = adap->sge.ingr_map[i]; in quiesce_rx()
963 static void disable_interrupts(struct adapter *adap) in disable_interrupts() argument
965 if (adap->flags & FULL_INIT_DONE) { in disable_interrupts()
966 t4_intr_disable(adap); in disable_interrupts()
967 if (adap->flags & USING_MSIX) { in disable_interrupts()
968 free_msix_queue_irqs(adap); in disable_interrupts()
969 free_irq(adap->msix_info[0].vec, adap); in disable_interrupts()
971 free_irq(adap->pdev->irq, adap); in disable_interrupts()
973 quiesce_rx(adap); in disable_interrupts()
980 static void enable_rx(struct adapter *adap) in enable_rx() argument
984 for (i = 0; i < adap->sge.ingr_sz; i++) { in enable_rx()
985 struct sge_rspq *q = adap->sge.ingr_map[i]; in enable_rx()
994 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), in enable_rx()
1000 static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q, in alloc_ofld_rxqs() argument
1009 err = t4_sge_alloc_rxq(adap, &q->rspq, false, in alloc_ofld_rxqs()
1010 adap->port[i / per_chan], in alloc_ofld_rxqs()
1030 static int setup_sge_queues(struct adapter *adap) in setup_sge_queues() argument
1033 struct sge *s = &adap->sge; in setup_sge_queues()
1038 if (adap->flags & USING_MSIX) in setup_sge_queues()
1041 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, in setup_sge_queues()
1061 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], in setup_sge_queues()
1064 freeout: t4_free_sge_resources(adap); in setup_sge_queues()
1068 for_each_port(adap, i) { in setup_sge_queues()
1069 struct net_device *dev = adap->port[i]; in setup_sge_queues()
1077 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, in setup_sge_queues()
1080 t4_get_mps_bg_map(adap, in setup_sge_queues()
1088 err = t4_sge_alloc_eth_txq(adap, t, dev, in setup_sge_queues()
1096 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */ in setup_sge_queues()
1098 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], in setup_sge_queues()
1099 adap->port[i / j], in setup_sge_queues()
1106 err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids); \ in setup_sge_queues()
1115 j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */ in setup_sge_queues()
1120 for_each_port(adap, i) { in setup_sge_queues()
1125 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i], in setup_sge_queues()
1132 t4_write_reg(adap, is_t4(adap->params.chip) ? in setup_sge_queues()
1135 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) | in setup_sge_queues()
1402 struct adapter *adap = q->adap; in cxgb4_set_rspq_intr_params() local
1411 new_idx = closest_thres(&adap->sge, cnt); in cxgb4_set_rspq_intr_params()
1418 err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, in cxgb4_set_rspq_intr_params()
1426 us = us == 0 ? 6 : closest_timer(&adap->sge, us); in cxgb4_set_rspq_intr_params()
1448 static int setup_debugfs(struct adapter *adap) in setup_debugfs() argument
1450 if (IS_ERR_OR_NULL(adap->debugfs_root)) in setup_debugfs()
1454 t4_setup_debugfs(adap); in setup_debugfs()
1615 struct adapter *adap = container_of(t, struct adapter, tids); in cxgb4_queue_tid_release() local
1617 spin_lock_bh(&adap->tid_release_lock); in cxgb4_queue_tid_release()
1618 *p = adap->tid_release_head; in cxgb4_queue_tid_release()
1620 adap->tid_release_head = (void **)((uintptr_t)p | chan); in cxgb4_queue_tid_release()
1621 if (!adap->tid_release_task_busy) { in cxgb4_queue_tid_release()
1622 adap->tid_release_task_busy = true; in cxgb4_queue_tid_release()
1623 queue_work(adap->workq, &adap->tid_release_task); in cxgb4_queue_tid_release()
1625 spin_unlock_bh(&adap->tid_release_lock); in cxgb4_queue_tid_release()
1634 struct adapter *adap; in process_tid_release_list() local
1636 adap = container_of(work, struct adapter, tid_release_task); in process_tid_release_list()
1638 spin_lock_bh(&adap->tid_release_lock); in process_tid_release_list()
1639 while (adap->tid_release_head) { in process_tid_release_list()
1640 void **p = adap->tid_release_head; in process_tid_release_list()
1644 adap->tid_release_head = *p; in process_tid_release_list()
1646 spin_unlock_bh(&adap->tid_release_lock); in process_tid_release_list()
1652 mk_tid_release(skb, chan, p - adap->tids.tid_tab); in process_tid_release_list()
1653 t4_ofld_send(adap, skb); in process_tid_release_list()
1654 spin_lock_bh(&adap->tid_release_lock); in process_tid_release_list()
1656 adap->tid_release_task_busy = false; in process_tid_release_list()
1657 spin_unlock_bh(&adap->tid_release_lock); in process_tid_release_list()
1667 struct adapter *adap = container_of(t, struct adapter, tids); in cxgb4_remove_tid() local
1682 t4_ofld_send(adap, skb); in cxgb4_remove_tid()
1696 struct adapter *adap = container_of(t, struct adapter, tids); in tid_init() local
1734 (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)) in tid_init()
1757 struct adapter *adap; in cxgb4_create_server() local
1765 adap = netdev2adap(dev); in cxgb4_create_server()
1773 chan = rxq_to_chan(&adap->sge, queue); in cxgb4_create_server()
1777 ret = t4_mgmt_tx(adap, skb); in cxgb4_create_server()
1798 struct adapter *adap; in cxgb4_create_server6() local
1806 adap = netdev2adap(dev); in cxgb4_create_server6()
1816 chan = rxq_to_chan(&adap->sge, queue); in cxgb4_create_server6()
1820 ret = t4_mgmt_tx(adap, skb); in cxgb4_create_server6()
1829 struct adapter *adap; in cxgb4_remove_server() local
1833 adap = netdev2adap(dev); in cxgb4_remove_server()
1844 ret = t4_mgmt_tx(adap, skb); in cxgb4_remove_server()
1979 struct adapter *adap = netdev2adap(dev); in cxgb4_dbfifo_count() local
1982 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A); in cxgb4_dbfifo_count()
1983 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A); in cxgb4_dbfifo_count()
1984 if (is_t4(adap->params.chip)) { in cxgb4_dbfifo_count()
2022 struct adapter *adap = pci_get_drvdata(pdev); in cxgb4_get_tcp_stats() local
2024 spin_lock(&adap->stats_lock); in cxgb4_get_tcp_stats()
2025 t4_tp_get_tcp_stats(adap, v4, v6); in cxgb4_get_tcp_stats()
2026 spin_unlock(&adap->stats_lock); in cxgb4_get_tcp_stats()
2033 struct adapter *adap = netdev2adap(dev); in cxgb4_iscsi_init() local
2035 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask); in cxgb4_iscsi_init()
2036 t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) | in cxgb4_iscsi_init()
2044 struct adapter *adap = netdev2adap(dev); in cxgb4_flush_eq_cache() local
2046 return t4_sge_ctxt_flush(adap, adap->mbox); in cxgb4_flush_eq_cache()
2050 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx) in read_eq_indices() argument
2052 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8; in read_eq_indices()
2056 spin_lock(&adap->win0_lock); in read_eq_indices()
2057 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr, in read_eq_indices()
2060 spin_unlock(&adap->win0_lock); in read_eq_indices()
2071 struct adapter *adap = netdev2adap(dev); in cxgb4_sync_txq_pidx() local
2075 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx); in cxgb4_sync_txq_pidx()
2088 if (is_t4(adap->params.chip)) in cxgb4_sync_txq_pidx()
2093 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), in cxgb4_sync_txq_pidx()
2103 struct adapter *adap; in cxgb4_read_tpte() local
2109 adap = netdev2adap(dev); in cxgb4_read_tpte()
2111 offset = ((stag >> 8) * 32) + adap->vres.stag.start; in cxgb4_read_tpte()
2119 size = t4_read_reg(adap, MA_EDRAM0_BAR_A); in cxgb4_read_tpte()
2121 size = t4_read_reg(adap, MA_EDRAM1_BAR_A); in cxgb4_read_tpte()
2123 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); in cxgb4_read_tpte()
2140 } else if (is_t5(adap->params.chip)) { in cxgb4_read_tpte()
2141 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); in cxgb4_read_tpte()
2157 spin_lock(&adap->win0_lock); in cxgb4_read_tpte()
2158 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ); in cxgb4_read_tpte()
2159 spin_unlock(&adap->win0_lock); in cxgb4_read_tpte()
2163 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n", in cxgb4_read_tpte()
2172 struct adapter *adap; in cxgb4_read_sge_timestamp() local
2174 adap = netdev2adap(dev); in cxgb4_read_sge_timestamp()
2175 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A); in cxgb4_read_sge_timestamp()
2176 hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A)); in cxgb4_read_sge_timestamp()
2233 static void drain_db_fifo(struct adapter *adap, int usecs) in drain_db_fifo() argument
2238 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A); in drain_db_fifo()
2239 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A); in drain_db_fifo()
2240 if (is_t4(adap->params.chip)) { in drain_db_fifo()
2264 static void enable_txq_db(struct adapter *adap, struct sge_txq *q) in enable_txq_db() argument
2272 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), in enable_txq_db()
2280 static void disable_dbs(struct adapter *adap) in disable_dbs() argument
2284 for_each_ethrxq(&adap->sge, i) in disable_dbs()
2285 disable_txq_db(&adap->sge.ethtxq[i].q); in disable_dbs()
2286 for_each_ofldrxq(&adap->sge, i) in disable_dbs()
2287 disable_txq_db(&adap->sge.ofldtxq[i].q); in disable_dbs()
2288 for_each_port(adap, i) in disable_dbs()
2289 disable_txq_db(&adap->sge.ctrlq[i].q); in disable_dbs()
2292 static void enable_dbs(struct adapter *adap) in enable_dbs() argument
2296 for_each_ethrxq(&adap->sge, i) in enable_dbs()
2297 enable_txq_db(adap, &adap->sge.ethtxq[i].q); in enable_dbs()
2298 for_each_ofldrxq(&adap->sge, i) in enable_dbs()
2299 enable_txq_db(adap, &adap->sge.ofldtxq[i].q); in enable_dbs()
2300 for_each_port(adap, i) in enable_dbs()
2301 enable_txq_db(adap, &adap->sge.ctrlq[i].q); in enable_dbs()
2304 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd) in notify_rdma_uld() argument
2306 if (adap->uld_handle[CXGB4_ULD_RDMA]) in notify_rdma_uld()
2307 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA], in notify_rdma_uld()
2313 struct adapter *adap; in process_db_full() local
2315 adap = container_of(work, struct adapter, db_full_task); in process_db_full()
2317 drain_db_fifo(adap, dbfifo_drain_delay); in process_db_full()
2318 enable_dbs(adap); in process_db_full()
2319 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); in process_db_full()
2320 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) in process_db_full()
2321 t4_set_reg_field(adap, SGE_INT_ENABLE3_A, in process_db_full()
2325 t4_set_reg_field(adap, SGE_INT_ENABLE3_A, in process_db_full()
2329 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) in sync_txq_pidx() argument
2335 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx); in sync_txq_pidx()
2347 if (is_t4(adap->params.chip)) in sync_txq_pidx()
2352 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), in sync_txq_pidx()
2360 CH_WARN(adap, "DB drop recovery failed.\n"); in sync_txq_pidx()
2362 static void recover_all_queues(struct adapter *adap) in recover_all_queues() argument
2366 for_each_ethrxq(&adap->sge, i) in recover_all_queues()
2367 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q); in recover_all_queues()
2368 for_each_ofldrxq(&adap->sge, i) in recover_all_queues()
2369 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q); in recover_all_queues()
2370 for_each_port(adap, i) in recover_all_queues()
2371 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q); in recover_all_queues()
2376 struct adapter *adap; in process_db_drop() local
2378 adap = container_of(work, struct adapter, db_drop_task); in process_db_drop()
2380 if (is_t4(adap->params.chip)) { in process_db_drop()
2381 drain_db_fifo(adap, dbfifo_drain_delay); in process_db_drop()
2382 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); in process_db_drop()
2383 drain_db_fifo(adap, dbfifo_drain_delay); in process_db_drop()
2384 recover_all_queues(adap); in process_db_drop()
2385 drain_db_fifo(adap, dbfifo_drain_delay); in process_db_drop()
2386 enable_dbs(adap); in process_db_drop()
2387 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); in process_db_drop()
2388 } else if (is_t5(adap->params.chip)) { in process_db_drop()
2389 u32 dropped_db = t4_read_reg(adap, 0x010ac); in process_db_drop()
2396 ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS, in process_db_drop()
2399 dev_err(adap->pdev_dev, "doorbell drop recovery: " in process_db_drop()
2403 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL); in process_db_drop()
2406 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15); in process_db_drop()
2409 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) in process_db_drop()
2410 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0); in process_db_drop()
2413 void t4_db_full(struct adapter *adap) in t4_db_full() argument
2415 if (is_t4(adap->params.chip)) { in t4_db_full()
2416 disable_dbs(adap); in t4_db_full()
2417 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); in t4_db_full()
2418 t4_set_reg_field(adap, SGE_INT_ENABLE3_A, in t4_db_full()
2420 queue_work(adap->workq, &adap->db_full_task); in t4_db_full()
2424 void t4_db_dropped(struct adapter *adap) in t4_db_dropped() argument
2426 if (is_t4(adap->params.chip)) { in t4_db_dropped()
2427 disable_dbs(adap); in t4_db_dropped()
2428 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); in t4_db_dropped()
2430 queue_work(adap->workq, &adap->db_drop_task); in t4_db_dropped()
2433 static void uld_attach(struct adapter *adap, unsigned int uld) in uld_attach() argument
2439 lli.pdev = adap->pdev; in uld_attach()
2440 lli.pf = adap->pf; in uld_attach()
2441 lli.l2t = adap->l2t; in uld_attach()
2442 lli.tids = &adap->tids; in uld_attach()
2443 lli.ports = adap->port; in uld_attach()
2444 lli.vr = &adap->vres; in uld_attach()
2445 lli.mtus = adap->params.mtus; in uld_attach()
2447 lli.rxq_ids = adap->sge.rdma_rxq; in uld_attach()
2448 lli.ciq_ids = adap->sge.rdma_ciq; in uld_attach()
2449 lli.nrxq = adap->sge.rdmaqs; in uld_attach()
2450 lli.nciq = adap->sge.rdmaciqs; in uld_attach()
2452 lli.rxq_ids = adap->sge.ofld_rxq; in uld_attach()
2453 lli.nrxq = adap->sge.ofldqsets; in uld_attach()
2455 lli.ntxq = adap->sge.ofldqsets; in uld_attach()
2456 lli.nchan = adap->params.nports; in uld_attach()
2457 lli.nports = adap->params.nports; in uld_attach()
2458 lli.wr_cred = adap->params.ofldq_wr_cred; in uld_attach()
2459 lli.adapter_type = adap->params.chip; in uld_attach()
2460 lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A)); in uld_attach()
2461 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk; in uld_attach()
2462 lli.udb_density = 1 << adap->params.sge.eq_qpp; in uld_attach()
2463 lli.ucq_density = 1 << adap->params.sge.iq_qpp; in uld_attach()
2464 lli.filt_mode = adap->params.tp.vlan_pri_map; in uld_attach()
2468 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A); in uld_attach()
2469 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A); in uld_attach()
2470 lli.fw_vers = adap->params.fw_vers; in uld_attach()
2472 lli.sge_ingpadboundary = adap->sge.fl_align; in uld_attach()
2473 lli.sge_egrstatuspagesize = adap->sge.stat_len; in uld_attach()
2474 lli.sge_pktshift = adap->sge.pktshift; in uld_attach()
2475 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN; in uld_attach()
2476 lli.max_ordird_qp = adap->params.max_ordird_qp; in uld_attach()
2477 lli.max_ird_adapter = adap->params.max_ird_adapter; in uld_attach()
2478 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl; in uld_attach()
2479 lli.nodeid = dev_to_node(adap->pdev_dev); in uld_attach()
2483 dev_warn(adap->pdev_dev, in uld_attach()
2489 adap->uld_handle[uld] = handle; in uld_attach()
2496 if (adap->flags & FULL_INIT_DONE) in uld_attach()
2500 static void attach_ulds(struct adapter *adap) in attach_ulds() argument
2505 list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list); in attach_ulds()
2509 list_add_tail(&adap->list_node, &adapter_list); in attach_ulds()
2512 uld_attach(adap, i); in attach_ulds()
2516 static void detach_ulds(struct adapter *adap) in detach_ulds() argument
2521 list_del(&adap->list_node); in detach_ulds()
2523 if (adap->uld_handle[i]) { in detach_ulds()
2524 ulds[i].state_change(adap->uld_handle[i], in detach_ulds()
2526 adap->uld_handle[i] = NULL; in detach_ulds()
2535 list_del_rcu(&adap->rcu_node); in detach_ulds()
2539 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state) in notify_ulds() argument
2545 if (adap->uld_handle[i]) in notify_ulds()
2546 ulds[i].state_change(adap->uld_handle[i], new_state); in notify_ulds()
2562 struct adapter *adap; in cxgb4_register_uld() local
2572 list_for_each_entry(adap, &adapter_list, list_node) in cxgb4_register_uld()
2573 uld_attach(adap, type); in cxgb4_register_uld()
2587 struct adapter *adap; in cxgb4_unregister_uld() local
2592 list_for_each_entry(adap, &adapter_list, list_node) in cxgb4_unregister_uld()
2593 adap->uld_handle[type] = NULL; in cxgb4_unregister_uld()
2608 struct adapter *adap; in cxgb4_inet6addr_handler() local
2614 list_for_each_entry(adap, &adapter_list, list_node) { in cxgb4_inet6addr_handler()
2617 cxgb4_clip_get(adap->port[0], in cxgb4_inet6addr_handler()
2621 cxgb4_clip_release(adap->port[0], in cxgb4_inet6addr_handler()
2655 static void update_clip(const struct adapter *adap) in update_clip() argument
2664 dev = adap->port[i]; in update_clip()
2687 static int cxgb_up(struct adapter *adap) in cxgb_up() argument
2691 err = setup_sge_queues(adap); in cxgb_up()
2694 err = setup_rss(adap); in cxgb_up()
2698 if (adap->flags & USING_MSIX) { in cxgb_up()
2699 name_msix_vecs(adap); in cxgb_up()
2700 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0, in cxgb_up()
2701 adap->msix_info[0].desc, adap); in cxgb_up()
2705 err = request_msix_queue_irqs(adap); in cxgb_up()
2707 free_irq(adap->msix_info[0].vec, adap); in cxgb_up()
2711 err = request_irq(adap->pdev->irq, t4_intr_handler(adap), in cxgb_up()
2712 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED, in cxgb_up()
2713 adap->port[0]->name, adap); in cxgb_up()
2717 enable_rx(adap); in cxgb_up()
2718 t4_sge_start(adap); in cxgb_up()
2719 t4_intr_enable(adap); in cxgb_up()
2720 adap->flags |= FULL_INIT_DONE; in cxgb_up()
2721 notify_ulds(adap, CXGB4_STATE_UP); in cxgb_up()
2723 update_clip(adap); in cxgb_up()
2728 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); in cxgb_up()
2730 t4_free_sge_resources(adap); in cxgb_up()
2820 struct adapter *adap; in cxgb4_create_server_filter() local
2824 adap = netdev2adap(dev); in cxgb4_create_server_filter()
2827 stid -= adap->tids.sftid_base; in cxgb4_create_server_filter()
2828 stid += adap->tids.nftids; in cxgb4_create_server_filter()
2832 f = &adap->tids.ftid_tab[stid]; in cxgb4_create_server_filter()
2841 clear_filter(adap, f); in cxgb4_create_server_filter()
2853 if (adap->params.tp.vlan_pri_map & PORT_F) { in cxgb4_create_server_filter()
2859 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) { in cxgb4_create_server_filter()
2870 ret = set_filter_wr(adap, stid); in cxgb4_create_server_filter()
2872 clear_filter(adap, f); in cxgb4_create_server_filter()
2885 struct adapter *adap; in cxgb4_remove_server_filter() local
2887 adap = netdev2adap(dev); in cxgb4_remove_server_filter()
2890 stid -= adap->tids.sftid_base; in cxgb4_remove_server_filter()
2891 stid += adap->tids.nftids; in cxgb4_remove_server_filter()
2893 f = &adap->tids.ftid_tab[stid]; in cxgb4_remove_server_filter()
2897 ret = delete_filter(adap, stid); in cxgb4_remove_server_filter()
3062 struct adapter *adap = pi->adapter; in cxgb_netpoll() local
3064 if (adap->flags & USING_MSIX) { in cxgb_netpoll()
3066 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset]; in cxgb_netpoll()
3071 t4_intr_handler(adap)(0, adap); in cxgb_netpoll()
3100 void t4_fatal_err(struct adapter *adap) in t4_fatal_err() argument
3102 t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0); in t4_fatal_err()
3103 t4_intr_disable(adap); in t4_fatal_err()
3104 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); in t4_fatal_err()
3107 static void setup_memwin(struct adapter *adap) in setup_memwin() argument
3109 u32 nic_win_base = t4_get_util_window(adap); in setup_memwin()
3111 t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC); in setup_memwin()
3114 static void setup_memwin_rdma(struct adapter *adap) in setup_memwin_rdma() argument
3116 if (adap->vres.ocq.size) { in setup_memwin_rdma()
3120 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2); in setup_memwin_rdma()
3122 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres); in setup_memwin_rdma()
3123 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10; in setup_memwin_rdma()
3124 t4_write_reg(adap, in setup_memwin_rdma()
3127 t4_write_reg(adap, in setup_memwin_rdma()
3129 adap->vres.ocq.start); in setup_memwin_rdma()
3130 t4_read_reg(adap, in setup_memwin_rdma()
3135 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) in adap_init1() argument
3145 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c); in adap_init1()
3156 dev_err(adap->pdev_dev, "virtualization ACLs not supported"); in adap_init1()
3161 ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL); in adap_init1()
3165 ret = t4_config_glbl_rss(adap, adap->pf, in adap_init1()
3172 ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64, in adap_init1()
3178 t4_sge_init(adap); in adap_init1()
3181 t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849); in adap_init1()
3182 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12)); in adap_init1()
3183 t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A); in adap_init1()
3184 v = t4_read_reg(adap, TP_PIO_DATA_A); in adap_init1()
3185 t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F); in adap_init1()
3188 adap->params.tp.tx_modq_map = 0xE4; in adap_init1()
3189 t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A, in adap_init1()
3190 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map)); in adap_init1()
3194 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, in adap_init1()
3196 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, in adap_init1()
3198 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, in adap_init1()
3202 if (is_offload(adap)) { in adap_init1()
3203 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A, in adap_init1()
3208 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A, in adap_init1()
3216 return t4_early_init(adap, adap->pf); in adap_init1()
3340 static int adap_init0_phy(struct adapter *adap) in adap_init0_phy() argument
3348 phy_info = find_phy_info(adap->pdev->device); in adap_init0_phy()
3350 dev_warn(adap->pdev_dev, in adap_init0_phy()
3361 adap->pdev_dev); in adap_init0_phy()
3369 dev_err(adap->pdev_dev, "unable to find PHY Firmware image " in adap_init0_phy()
3375 t4_phy_fw_ver(adap, &cur_phy_fw_ver); in adap_init0_phy()
3376 dev_warn(adap->pdev_dev, "continuing with, on-adapter " in adap_init0_phy()
3386 ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock, in adap_init0_phy()
3390 dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n", in adap_init0_phy()
3398 dev_info(adap->pdev_dev, "Successfully transferred PHY " in adap_init0_phy()
3687 static int adap_init0(struct adapter *adap) in adap_init0() argument
3699 ret = t4_init_devlog_params(adap); in adap_init0()
3704 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state); in adap_init0()
3706 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", in adap_init0()
3710 if (ret == adap->mbox) in adap_init0()
3711 adap->flags |= MASTER_PF; in adap_init0()
3720 t4_get_fw_version(adap, &adap->params.fw_vers); in adap_init0()
3721 t4_get_tp_version(adap, &adap->params.tp_vers); in adap_init0()
3722 ret = t4_check_fw_version(adap); in adap_init0()
3726 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) { in adap_init0()
3736 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip)); in adap_init0()
3738 dev_err(adap->pdev_dev, in adap_init0()
3740 CHELSIO_CHIP_VERSION(adap->params.chip)); in adap_init0()
3751 adap->pdev_dev); in adap_init0()
3753 dev_err(adap->pdev_dev, in adap_init0()
3762 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw, in adap_init0()
3780 ret = t4_get_vpd_params(adap, &adap->params.vpd); in adap_init0()
3792 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec); in adap_init0()
3796 adap->params.nports = hweight32(port_vec); in adap_init0()
3797 adap->params.portvec = port_vec; in adap_init0()
3803 dev_info(adap->pdev_dev, "Coming up as %s: "\ in adap_init0()
3805 adap->flags & MASTER_PF ? "MASTER" : "SLAVE"); in adap_init0()
3807 dev_info(adap->pdev_dev, "Coming up as MASTER: "\ in adap_init0()
3815 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, in adap_init0()
3822 dev_err(adap->pdev_dev, "firmware doesn't support " in adap_init0()
3831 ret = adap_init0_config(adap, reset); in adap_init0()
3833 dev_err(adap->pdev_dev, "no Configuration File " in adap_init0()
3838 dev_err(adap->pdev_dev, "could not initialize " in adap_init0()
3848 ret = t4_sge_init(adap); in adap_init0()
3852 if (is_bypass_device(adap->pdev->device)) in adap_init0()
3853 adap->params.bypass = 1; in adap_init0()
3874 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val); in adap_init0()
3877 adap->sge.egr_start = val[0]; in adap_init0()
3878 adap->l2t_start = val[1]; in adap_init0()
3879 adap->l2t_end = val[2]; in adap_init0()
3880 adap->tids.ftid_base = val[3]; in adap_init0()
3881 adap->tids.nftids = val[4] - val[3] + 1; in adap_init0()
3882 adap->sge.ingr_start = val[5]; in adap_init0()
3892 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); in adap_init0()
3895 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1; in adap_init0()
3896 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1; in adap_init0()
3898 adap->sge.egr_map = kcalloc(adap->sge.egr_sz, in adap_init0()
3899 sizeof(*adap->sge.egr_map), GFP_KERNEL); in adap_init0()
3900 if (!adap->sge.egr_map) { in adap_init0()
3905 adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz, in adap_init0()
3906 sizeof(*adap->sge.ingr_map), GFP_KERNEL); in adap_init0()
3907 if (!adap->sge.ingr_map) { in adap_init0()
3915 adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), in adap_init0()
3917 if (!adap->sge.starving_fl) { in adap_init0()
3922 adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), in adap_init0()
3924 if (!adap->sge.txq_maperr) { in adap_init0()
3930 adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), in adap_init0()
3932 if (!adap->sge.blocked_fl) { in adap_init0()
3940 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); in adap_init0()
3943 adap->clipt_start = val[0]; in adap_init0()
3944 adap->clipt_end = val[1]; in adap_init0()
3949 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val); in adap_init0()
3954 adap->flags |= FW_OFLD_CONN; in adap_init0()
3955 adap->tids.aftid_base = val[0]; in adap_init0()
3956 adap->tids.aftid_end = val[1]; in adap_init0()
3966 (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val); in adap_init0()
3974 if (is_t4(adap->params.chip)) { in adap_init0()
3975 adap->params.ulptx_memwrite_dsgl = false; in adap_init0()
3978 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, in adap_init0()
3980 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0); in adap_init0()
3991 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd), in adap_init0()
4004 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, in adap_init0()
4008 adap->tids.ntids = val[0]; in adap_init0()
4009 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); in adap_init0()
4010 adap->tids.stid_base = val[1]; in adap_init0()
4011 adap->tids.nstids = val[2] - val[1] + 1; in adap_init0()
4021 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) { in adap_init0()
4022 adap->tids.sftid_base = adap->tids.ftid_base + in adap_init0()
4023 DIV_ROUND_UP(adap->tids.nftids, 3); in adap_init0()
4024 adap->tids.nsftids = adap->tids.nftids - in adap_init0()
4025 DIV_ROUND_UP(adap->tids.nftids, 3); in adap_init0()
4026 adap->tids.nftids = adap->tids.sftid_base - in adap_init0()
4027 adap->tids.ftid_base; in adap_init0()
4029 adap->vres.ddp.start = val[3]; in adap_init0()
4030 adap->vres.ddp.size = val[4] - val[3] + 1; in adap_init0()
4031 adap->params.ofldq_wr_cred = val[5]; in adap_init0()
4033 adap->params.offload = 1; in adap_init0()
4042 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, in adap_init0()
4046 adap->vres.stag.start = val[0]; in adap_init0()
4047 adap->vres.stag.size = val[1] - val[0] + 1; in adap_init0()
4048 adap->vres.rq.start = val[2]; in adap_init0()
4049 adap->vres.rq.size = val[3] - val[2] + 1; in adap_init0()
4050 adap->vres.pbl.start = val[4]; in adap_init0()
4051 adap->vres.pbl.size = val[5] - val[4] + 1; in adap_init0()
4059 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, in adap_init0()
4063 adap->vres.qp.start = val[0]; in adap_init0()
4064 adap->vres.qp.size = val[1] - val[0] + 1; in adap_init0()
4065 adap->vres.cq.start = val[2]; in adap_init0()
4066 adap->vres.cq.size = val[3] - val[2] + 1; in adap_init0()
4067 adap->vres.ocq.start = val[4]; in adap_init0()
4068 adap->vres.ocq.size = val[5] - val[4] + 1; in adap_init0()
4072 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, in adap_init0()
4075 adap->params.max_ordird_qp = 8; in adap_init0()
4076 adap->params.max_ird_adapter = 32 * adap->tids.ntids; in adap_init0()
4079 adap->params.max_ordird_qp = val[0]; in adap_init0()
4080 adap->params.max_ird_adapter = val[1]; in adap_init0()
4082 dev_info(adap->pdev_dev, in adap_init0()
4084 adap->params.max_ordird_qp, in adap_init0()
4085 adap->params.max_ird_adapter); in adap_init0()
4090 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, in adap_init0()
4094 adap->vres.iscsi.start = val[0]; in adap_init0()
4095 adap->vres.iscsi.size = val[1] - val[0] + 1; in adap_init0()
4105 t4_read_mtu_tbl(adap, adap->params.mtus, NULL); in adap_init0()
4127 if (adap->params.mtus[i] == 1492) { in adap_init0()
4128 adap->params.mtus[i] = 1488; in adap_init0()
4132 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, in adap_init0()
4133 adap->params.b_wnd); in adap_init0()
4135 t4_init_sge_params(adap); in adap_init0()
4136 adap->flags |= FW_OK; in adap_init0()
4137 t4_init_tp_params(adap); in adap_init0()
4146 kfree(adap->sge.egr_map); in adap_init0()
4147 kfree(adap->sge.ingr_map); in adap_init0()
4148 kfree(adap->sge.starving_fl); in adap_init0()
4149 kfree(adap->sge.txq_maperr); in adap_init0()
4151 kfree(adap->sge.blocked_fl); in adap_init0()
4154 t4_fw_bye(adap, adap->mbox); in adap_init0()
4164 struct adapter *adap = pci_get_drvdata(pdev); in eeh_err_detected() local
4166 if (!adap) in eeh_err_detected()
4170 adap->flags &= ~FW_OK; in eeh_err_detected()
4171 notify_ulds(adap, CXGB4_STATE_START_RECOVERY); in eeh_err_detected()
4172 spin_lock(&adap->stats_lock); in eeh_err_detected()
4173 for_each_port(adap, i) { in eeh_err_detected()
4174 struct net_device *dev = adap->port[i]; in eeh_err_detected()
4179 spin_unlock(&adap->stats_lock); in eeh_err_detected()
4180 disable_interrupts(adap); in eeh_err_detected()
4181 if (adap->flags & FULL_INIT_DONE) in eeh_err_detected()
4182 cxgb_down(adap); in eeh_err_detected()
4184 if ((adap->flags & DEV_ENABLED)) { in eeh_err_detected()
4186 adap->flags &= ~DEV_ENABLED; in eeh_err_detected()
4196 struct adapter *adap = pci_get_drvdata(pdev); in eeh_slot_reset() local
4198 if (!adap) { in eeh_slot_reset()
4204 if (!(adap->flags & DEV_ENABLED)) { in eeh_slot_reset()
4210 adap->flags |= DEV_ENABLED; in eeh_slot_reset()
4218 if (t4_wait_dev_ready(adap->regs) < 0) in eeh_slot_reset()
4220 if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0) in eeh_slot_reset()
4222 adap->flags |= FW_OK; in eeh_slot_reset()
4223 if (adap_init1(adap, &c)) in eeh_slot_reset()
4226 for_each_port(adap, i) { in eeh_slot_reset()
4227 struct port_info *p = adap2pinfo(adap, i); in eeh_slot_reset()
4229 ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1, in eeh_slot_reset()
4237 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, in eeh_slot_reset()
4238 adap->params.b_wnd); in eeh_slot_reset()
4239 setup_memwin(adap); in eeh_slot_reset()
4240 if (cxgb_up(adap)) in eeh_slot_reset()
4248 struct adapter *adap = pci_get_drvdata(pdev); in eeh_resume() local
4250 if (!adap) in eeh_resume()
4254 for_each_port(adap, i) { in eeh_resume()
4255 struct net_device *dev = adap->port[i]; in eeh_resume()
4278 static inline void init_rspq(struct adapter *adap, struct sge_rspq *q, in init_rspq() argument
4282 q->adap = adap; in init_rspq()
4293 static void cfg_queues(struct adapter *adap) in cfg_queues() argument
4295 struct sge *s = &adap->sge; in cfg_queues()
4302 for_each_port(adap, i) in cfg_queues()
4303 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); in cfg_queues()
4309 if (adap->params.nports * 8 > MAX_ETH_QSETS) { in cfg_queues()
4310 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n", in cfg_queues()
4311 MAX_ETH_QSETS, adap->params.nports * 8); in cfg_queues()
4315 for_each_port(adap, i) { in cfg_queues()
4316 struct port_info *pi = adap2pinfo(adap, i); in cfg_queues()
4328 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g; in cfg_queues()
4332 for_each_port(adap, i) { in cfg_queues()
4333 struct port_info *pi = adap2pinfo(adap, i); in cfg_queues()
4344 if (is_offload(adap)) { in cfg_queues()
4353 s->ofldqsets = roundup(i, adap->params.nports); in cfg_queues()
4355 s->ofldqsets = adap->params.nports; in cfg_queues()
4357 s->rdmaqs = adap->params.nports; in cfg_queues()
4365 s->rdmaciqs = (s->rdmaciqs / adap->params.nports) * in cfg_queues()
4366 adap->params.nports; in cfg_queues()
4367 s->rdmaciqs = max_t(int, s->rdmaciqs, adap->params.nports); in cfg_queues()
4373 init_rspq(adap, &r->rspq, 5, 10, 1024, 64); in cfg_queues()
4389 init_rspq(adap, &r->rspq, 5, 1, 1024, 64); in cfg_queues()
4397 init_rspq(adap, &r->rspq, 5, 1, 511, 64); in cfg_queues()
4402 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids; in cfg_queues()
4404 CH_WARN(adap, "CIQ size too small for available IQs\n"); in cfg_queues()
4411 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64); in cfg_queues()
4415 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64); in cfg_queues()
4416 init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64); in cfg_queues()
4423 static void reduce_ethqs(struct adapter *adap, int n) in reduce_ethqs() argument
4428 while (n < adap->sge.ethqsets) in reduce_ethqs()
4429 for_each_port(adap, i) { in reduce_ethqs()
4430 pi = adap2pinfo(adap, i); in reduce_ethqs()
4433 adap->sge.ethqsets--; in reduce_ethqs()
4434 if (adap->sge.ethqsets <= n) in reduce_ethqs()
4440 for_each_port(adap, i) { in reduce_ethqs()
4441 pi = adap2pinfo(adap, i); in reduce_ethqs()
4450 static int enable_msix(struct adapter *adap) in enable_msix() argument
4454 struct sge *s = &adap->sge; in enable_msix()
4455 unsigned int nchan = adap->params.nports; in enable_msix()
4467 if (is_offload(adap)) { in enable_msix()
4476 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need; in enable_msix()
4478 need = adap->params.nports + EXTRA_VECS + ofld_need; in enable_msix()
4480 allocated = pci_enable_msix_range(adap->pdev, entries, need, want); in enable_msix()
4482 dev_info(adap->pdev_dev, "not enough MSI-X vectors left," in enable_msix()
4496 reduce_ethqs(adap, i); in enable_msix()
4498 if (is_offload(adap)) { in enable_msix()
4510 adap->msix_info[i].vec = entries[i].vector; in enable_msix()
4511 dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, " in enable_msix()
4522 static int init_rss(struct adapter *adap) in init_rss() argument
4527 err = t4_init_rss_mode(adap, adap->mbox); in init_rss()
4531 for_each_port(adap, i) { in init_rss()
4532 struct port_info *pi = adap2pinfo(adap, i); in init_rss()
4547 const struct adapter *adap = pi->adapter; in print_port_info() local
4549 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB) in print_port_info()
4551 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB) in print_port_info()
4553 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB) in print_port_info()
4569 adap->params.vpd.id, in print_port_info()
4570 CHELSIO_CHIP_RELEASE(adap->params.chip), buf, in print_port_info()
4571 is_offload(adap) ? "R" : "", adap->params.pci.width, spd, in print_port_info()
4572 (adap->flags & USING_MSIX) ? " MSI-X" : in print_port_info()
4573 (adap->flags & USING_MSI) ? " MSI" : ""); in print_port_info()
4575 adap->params.vpd.sn, adap->params.vpd.pn); in print_port_info()