Lines Matching refs:adap

300 	struct adapter *adap = pi->adapter;  in dcb_tx_queue_prio_enable()  local
301 struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset]; in dcb_tx_queue_prio_enable()
321 err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1, in dcb_tx_queue_prio_enable()
325 dev_err(adap->pdev_dev, in dcb_tx_queue_prio_enable()
354 void t4_os_portmod_changed(const struct adapter *adap, int port_id) in t4_os_portmod_changed() argument
360 const struct net_device *dev = adap->port[port_id]; in t4_os_portmod_changed()
507 static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd) in dcb_rpl() argument
510 struct net_device *dev = adap->port[port]; in dcb_rpl()
514 cxgb4_dcb_handle_fw_update(adap, pcmd); in dcb_rpl()
529 static void clear_filter(struct adapter *adap, struct filter_entry *f) in clear_filter() argument
549 static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl) in filter_rpl() argument
552 unsigned int nidx = idx - adap->tids.ftid_base; in filter_rpl()
556 if (idx >= adap->tids.ftid_base && nidx < in filter_rpl()
557 (adap->tids.nftids + adap->tids.nsftids)) { in filter_rpl()
560 f = &adap->tids.ftid_tab[idx]; in filter_rpl()
566 clear_filter(adap, f); in filter_rpl()
568 dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n", in filter_rpl()
570 clear_filter(adap, f); in filter_rpl()
579 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n", in filter_rpl()
581 clear_filter(adap, f); in filter_rpl()
603 dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n" in fwevtq_handler()
614 txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start]; in fwevtq_handler()
616 if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) { in fwevtq_handler()
640 struct net_device *dev = q->adap->port[port]; in fwevtq_handler()
651 dcb_rpl(q->adap, pcmd); in fwevtq_handler()
655 t4_handle_fw_rpl(q->adap, p->data); in fwevtq_handler()
659 do_l2t_write_rpl(q->adap, p); in fwevtq_handler()
663 filter_rpl(q->adap, p); in fwevtq_handler()
665 dev_err(q->adap->pdev_dev, in fwevtq_handler()
691 if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) { in uldrx_handler()
720 struct adapter *adap = cookie; in t4_nondata_intr() local
721 u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A)); in t4_nondata_intr()
724 adap->swintr = 1; in t4_nondata_intr()
725 t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v); in t4_nondata_intr()
727 if (adap->flags & MASTER_PF) in t4_nondata_intr()
728 t4_slow_intr_handler(adap); in t4_nondata_intr()
735 static void name_msix_vecs(struct adapter *adap) in name_msix_vecs() argument
737 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc); in name_msix_vecs()
740 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name); in name_msix_vecs()
743 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", in name_msix_vecs()
744 adap->port[0]->name); in name_msix_vecs()
747 for_each_port(adap, j) { in name_msix_vecs()
748 struct net_device *d = adap->port[j]; in name_msix_vecs()
752 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d", in name_msix_vecs()
757 for_each_ofldrxq(&adap->sge, i) in name_msix_vecs()
758 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d", in name_msix_vecs()
759 adap->port[0]->name, i); in name_msix_vecs()
761 for_each_rdmarxq(&adap->sge, i) in name_msix_vecs()
762 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d", in name_msix_vecs()
763 adap->port[0]->name, i); in name_msix_vecs()
765 for_each_rdmaciq(&adap->sge, i) in name_msix_vecs()
766 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d", in name_msix_vecs()
767 adap->port[0]->name, i); in name_msix_vecs()
770 static int request_msix_queue_irqs(struct adapter *adap) in request_msix_queue_irqs() argument
772 struct sge *s = &adap->sge; in request_msix_queue_irqs()
776 err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0, in request_msix_queue_irqs()
777 adap->msix_info[1].desc, &s->fw_evtq); in request_msix_queue_irqs()
782 err = request_irq(adap->msix_info[msi_index].vec, in request_msix_queue_irqs()
784 adap->msix_info[msi_index].desc, in request_msix_queue_irqs()
791 err = request_irq(adap->msix_info[msi_index].vec, in request_msix_queue_irqs()
793 adap->msix_info[msi_index].desc, in request_msix_queue_irqs()
800 err = request_irq(adap->msix_info[msi_index].vec, in request_msix_queue_irqs()
802 adap->msix_info[msi_index].desc, in request_msix_queue_irqs()
809 err = request_irq(adap->msix_info[msi_index].vec, in request_msix_queue_irqs()
811 adap->msix_info[msi_index].desc, in request_msix_queue_irqs()
821 free_irq(adap->msix_info[--msi_index].vec, in request_msix_queue_irqs()
824 free_irq(adap->msix_info[--msi_index].vec, in request_msix_queue_irqs()
827 free_irq(adap->msix_info[--msi_index].vec, in request_msix_queue_irqs()
830 free_irq(adap->msix_info[--msi_index].vec, in request_msix_queue_irqs()
832 free_irq(adap->msix_info[1].vec, &s->fw_evtq); in request_msix_queue_irqs()
836 static void free_msix_queue_irqs(struct adapter *adap) in free_msix_queue_irqs() argument
839 struct sge *s = &adap->sge; in free_msix_queue_irqs()
841 free_irq(adap->msix_info[1].vec, &s->fw_evtq); in free_msix_queue_irqs()
843 free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq); in free_msix_queue_irqs()
845 free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq); in free_msix_queue_irqs()
847 free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq); in free_msix_queue_irqs()
849 free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq); in free_msix_queue_irqs()
886 static int setup_rss(struct adapter *adap) in setup_rss() argument
890 for_each_port(adap, i) { in setup_rss()
891 const struct port_info *pi = adap2pinfo(adap, i); in setup_rss()
912 static void quiesce_rx(struct adapter *adap) in quiesce_rx() argument
916 for (i = 0; i < adap->sge.ingr_sz; i++) { in quiesce_rx()
917 struct sge_rspq *q = adap->sge.ingr_map[i]; in quiesce_rx()
931 static void disable_interrupts(struct adapter *adap) in disable_interrupts() argument
933 if (adap->flags & FULL_INIT_DONE) { in disable_interrupts()
934 t4_intr_disable(adap); in disable_interrupts()
935 if (adap->flags & USING_MSIX) { in disable_interrupts()
936 free_msix_queue_irqs(adap); in disable_interrupts()
937 free_irq(adap->msix_info[0].vec, adap); in disable_interrupts()
939 free_irq(adap->pdev->irq, adap); in disable_interrupts()
941 quiesce_rx(adap); in disable_interrupts()
948 static void enable_rx(struct adapter *adap) in enable_rx() argument
952 for (i = 0; i < adap->sge.ingr_sz; i++) { in enable_rx()
953 struct sge_rspq *q = adap->sge.ingr_map[i]; in enable_rx()
962 t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A), in enable_rx()
968 static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q, in alloc_ofld_rxqs() argument
977 err = t4_sge_alloc_rxq(adap, &q->rspq, false, in alloc_ofld_rxqs()
978 adap->port[i / per_chan], in alloc_ofld_rxqs()
998 static int setup_sge_queues(struct adapter *adap) in setup_sge_queues() argument
1001 struct sge *s = &adap->sge; in setup_sge_queues()
1006 if (adap->flags & USING_MSIX) in setup_sge_queues()
1009 err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0, in setup_sge_queues()
1029 err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0], in setup_sge_queues()
1032 freeout: t4_free_sge_resources(adap); in setup_sge_queues()
1036 for_each_port(adap, i) { in setup_sge_queues()
1037 struct net_device *dev = adap->port[i]; in setup_sge_queues()
1045 err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, in setup_sge_queues()
1054 err = t4_sge_alloc_eth_txq(adap, t, dev, in setup_sge_queues()
1062 j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */ in setup_sge_queues()
1064 err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], in setup_sge_queues()
1065 adap->port[i / j], in setup_sge_queues()
1072 err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids); \ in setup_sge_queues()
1081 j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */ in setup_sge_queues()
1086 for_each_port(adap, i) { in setup_sge_queues()
1091 err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i], in setup_sge_queues()
1098 t4_write_reg(adap, is_t4(adap->params.chip) ? in setup_sge_queues()
1101 RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) | in setup_sge_queues()
1327 static inline int is_offload(const struct adapter *adap) in is_offload() argument
1329 return adap->params.offload; in is_offload()
1376 struct adapter *adap = q->adap; in cxgb4_set_rspq_intr_params() local
1385 new_idx = closest_thres(&adap->sge, cnt); in cxgb4_set_rspq_intr_params()
1392 err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v, in cxgb4_set_rspq_intr_params()
1400 us = us == 0 ? 6 : closest_timer(&adap->sge, us); in cxgb4_set_rspq_intr_params()
1422 static int setup_debugfs(struct adapter *adap) in setup_debugfs() argument
1424 if (IS_ERR_OR_NULL(adap->debugfs_root)) in setup_debugfs()
1428 t4_setup_debugfs(adap); in setup_debugfs()
1585 struct adapter *adap = container_of(t, struct adapter, tids); in cxgb4_queue_tid_release() local
1587 spin_lock_bh(&adap->tid_release_lock); in cxgb4_queue_tid_release()
1588 *p = adap->tid_release_head; in cxgb4_queue_tid_release()
1590 adap->tid_release_head = (void **)((uintptr_t)p | chan); in cxgb4_queue_tid_release()
1591 if (!adap->tid_release_task_busy) { in cxgb4_queue_tid_release()
1592 adap->tid_release_task_busy = true; in cxgb4_queue_tid_release()
1593 queue_work(adap->workq, &adap->tid_release_task); in cxgb4_queue_tid_release()
1595 spin_unlock_bh(&adap->tid_release_lock); in cxgb4_queue_tid_release()
1604 struct adapter *adap; in process_tid_release_list() local
1606 adap = container_of(work, struct adapter, tid_release_task); in process_tid_release_list()
1608 spin_lock_bh(&adap->tid_release_lock); in process_tid_release_list()
1609 while (adap->tid_release_head) { in process_tid_release_list()
1610 void **p = adap->tid_release_head; in process_tid_release_list()
1614 adap->tid_release_head = *p; in process_tid_release_list()
1616 spin_unlock_bh(&adap->tid_release_lock); in process_tid_release_list()
1622 mk_tid_release(skb, chan, p - adap->tids.tid_tab); in process_tid_release_list()
1623 t4_ofld_send(adap, skb); in process_tid_release_list()
1624 spin_lock_bh(&adap->tid_release_lock); in process_tid_release_list()
1626 adap->tid_release_task_busy = false; in process_tid_release_list()
1627 spin_unlock_bh(&adap->tid_release_lock); in process_tid_release_list()
1638 struct adapter *adap = container_of(t, struct adapter, tids); in cxgb4_remove_tid() local
1645 t4_ofld_send(adap, skb); in cxgb4_remove_tid()
1661 struct adapter *adap = container_of(t, struct adapter, tids); in tid_init() local
1697 (is_t4(adap->params.chip) || is_t5(adap->params.chip))) in tid_init()
1720 struct adapter *adap; in cxgb4_create_server() local
1728 adap = netdev2adap(dev); in cxgb4_create_server()
1736 chan = rxq_to_chan(&adap->sge, queue); in cxgb4_create_server()
1740 ret = t4_mgmt_tx(adap, skb); in cxgb4_create_server()
1761 struct adapter *adap; in cxgb4_create_server6() local
1769 adap = netdev2adap(dev); in cxgb4_create_server6()
1779 chan = rxq_to_chan(&adap->sge, queue); in cxgb4_create_server6()
1783 ret = t4_mgmt_tx(adap, skb); in cxgb4_create_server6()
1792 struct adapter *adap; in cxgb4_remove_server() local
1796 adap = netdev2adap(dev); in cxgb4_remove_server()
1807 ret = t4_mgmt_tx(adap, skb); in cxgb4_remove_server()
1920 struct adapter *adap = netdev2adap(dev); in cxgb4_dbfifo_count() local
1923 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A); in cxgb4_dbfifo_count()
1924 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A); in cxgb4_dbfifo_count()
1925 if (is_t4(adap->params.chip)) { in cxgb4_dbfifo_count()
1963 struct adapter *adap = pci_get_drvdata(pdev); in cxgb4_get_tcp_stats() local
1965 spin_lock(&adap->stats_lock); in cxgb4_get_tcp_stats()
1966 t4_tp_get_tcp_stats(adap, v4, v6); in cxgb4_get_tcp_stats()
1967 spin_unlock(&adap->stats_lock); in cxgb4_get_tcp_stats()
1974 struct adapter *adap = netdev2adap(dev); in cxgb4_iscsi_init() local
1976 t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask); in cxgb4_iscsi_init()
1977 t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) | in cxgb4_iscsi_init()
1985 struct adapter *adap = netdev2adap(dev); in cxgb4_flush_eq_cache() local
1988 ret = t4_fwaddrspace_write(adap, adap->mbox, in cxgb4_flush_eq_cache()
1994 static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx) in read_eq_indices() argument
1996 u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8; in read_eq_indices()
2000 spin_lock(&adap->win0_lock); in read_eq_indices()
2001 ret = t4_memory_rw(adap, 0, MEM_EDC0, addr, in read_eq_indices()
2004 spin_unlock(&adap->win0_lock); in read_eq_indices()
2015 struct adapter *adap = netdev2adap(dev); in cxgb4_sync_txq_pidx() local
2019 ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx); in cxgb4_sync_txq_pidx()
2032 if (is_t4(adap->params.chip)) in cxgb4_sync_txq_pidx()
2037 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), in cxgb4_sync_txq_pidx()
2047 struct adapter *adap; in cxgb4_disable_db_coalescing() local
2049 adap = netdev2adap(dev); in cxgb4_disable_db_coalescing()
2050 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F, in cxgb4_disable_db_coalescing()
2057 struct adapter *adap; in cxgb4_enable_db_coalescing() local
2059 adap = netdev2adap(dev); in cxgb4_enable_db_coalescing()
2060 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F, 0); in cxgb4_enable_db_coalescing()
2066 struct adapter *adap; in cxgb4_read_tpte() local
2072 adap = netdev2adap(dev); in cxgb4_read_tpte()
2074 offset = ((stag >> 8) * 32) + adap->vres.stag.start; in cxgb4_read_tpte()
2082 size = t4_read_reg(adap, MA_EDRAM0_BAR_A); in cxgb4_read_tpte()
2084 size = t4_read_reg(adap, MA_EDRAM1_BAR_A); in cxgb4_read_tpte()
2086 size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); in cxgb4_read_tpte()
2103 } else if (is_t4(adap->params.chip)) { in cxgb4_read_tpte()
2107 size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); in cxgb4_read_tpte()
2120 spin_lock(&adap->win0_lock); in cxgb4_read_tpte()
2121 ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ); in cxgb4_read_tpte()
2122 spin_unlock(&adap->win0_lock); in cxgb4_read_tpte()
2126 dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n", in cxgb4_read_tpte()
2135 struct adapter *adap; in cxgb4_read_sge_timestamp() local
2137 adap = netdev2adap(dev); in cxgb4_read_sge_timestamp()
2138 lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A); in cxgb4_read_sge_timestamp()
2139 hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A)); in cxgb4_read_sge_timestamp()
2194 static void drain_db_fifo(struct adapter *adap, int usecs) in drain_db_fifo() argument
2199 v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A); in drain_db_fifo()
2200 v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A); in drain_db_fifo()
2201 if (is_t4(adap->params.chip)) { in drain_db_fifo()
2225 static void enable_txq_db(struct adapter *adap, struct sge_txq *q) in enable_txq_db() argument
2233 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), in enable_txq_db()
2241 static void disable_dbs(struct adapter *adap) in disable_dbs() argument
2245 for_each_ethrxq(&adap->sge, i) in disable_dbs()
2246 disable_txq_db(&adap->sge.ethtxq[i].q); in disable_dbs()
2247 for_each_ofldrxq(&adap->sge, i) in disable_dbs()
2248 disable_txq_db(&adap->sge.ofldtxq[i].q); in disable_dbs()
2249 for_each_port(adap, i) in disable_dbs()
2250 disable_txq_db(&adap->sge.ctrlq[i].q); in disable_dbs()
2253 static void enable_dbs(struct adapter *adap) in enable_dbs() argument
2257 for_each_ethrxq(&adap->sge, i) in enable_dbs()
2258 enable_txq_db(adap, &adap->sge.ethtxq[i].q); in enable_dbs()
2259 for_each_ofldrxq(&adap->sge, i) in enable_dbs()
2260 enable_txq_db(adap, &adap->sge.ofldtxq[i].q); in enable_dbs()
2261 for_each_port(adap, i) in enable_dbs()
2262 enable_txq_db(adap, &adap->sge.ctrlq[i].q); in enable_dbs()
2265 static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd) in notify_rdma_uld() argument
2267 if (adap->uld_handle[CXGB4_ULD_RDMA]) in notify_rdma_uld()
2268 ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA], in notify_rdma_uld()
2274 struct adapter *adap; in process_db_full() local
2276 adap = container_of(work, struct adapter, db_full_task); in process_db_full()
2278 drain_db_fifo(adap, dbfifo_drain_delay); in process_db_full()
2279 enable_dbs(adap); in process_db_full()
2280 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); in process_db_full()
2281 t4_set_reg_field(adap, SGE_INT_ENABLE3_A, in process_db_full()
2286 static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) in sync_txq_pidx() argument
2292 ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx); in sync_txq_pidx()
2304 if (is_t4(adap->params.chip)) in sync_txq_pidx()
2309 t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A), in sync_txq_pidx()
2317 CH_WARN(adap, "DB drop recovery failed.\n"); in sync_txq_pidx()
2319 static void recover_all_queues(struct adapter *adap) in recover_all_queues() argument
2323 for_each_ethrxq(&adap->sge, i) in recover_all_queues()
2324 sync_txq_pidx(adap, &adap->sge.ethtxq[i].q); in recover_all_queues()
2325 for_each_ofldrxq(&adap->sge, i) in recover_all_queues()
2326 sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q); in recover_all_queues()
2327 for_each_port(adap, i) in recover_all_queues()
2328 sync_txq_pidx(adap, &adap->sge.ctrlq[i].q); in recover_all_queues()
2333 struct adapter *adap; in process_db_drop() local
2335 adap = container_of(work, struct adapter, db_drop_task); in process_db_drop()
2337 if (is_t4(adap->params.chip)) { in process_db_drop()
2338 drain_db_fifo(adap, dbfifo_drain_delay); in process_db_drop()
2339 notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP); in process_db_drop()
2340 drain_db_fifo(adap, dbfifo_drain_delay); in process_db_drop()
2341 recover_all_queues(adap); in process_db_drop()
2342 drain_db_fifo(adap, dbfifo_drain_delay); in process_db_drop()
2343 enable_dbs(adap); in process_db_drop()
2344 notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY); in process_db_drop()
2346 u32 dropped_db = t4_read_reg(adap, 0x010ac); in process_db_drop()
2353 ret = cxgb4_t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS, in process_db_drop()
2356 dev_err(adap->pdev_dev, "doorbell drop recovery: " in process_db_drop()
2360 adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL); in process_db_drop()
2363 t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15); in process_db_drop()
2366 t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0); in process_db_drop()
2369 void t4_db_full(struct adapter *adap) in t4_db_full() argument
2371 if (is_t4(adap->params.chip)) { in t4_db_full()
2372 disable_dbs(adap); in t4_db_full()
2373 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); in t4_db_full()
2374 t4_set_reg_field(adap, SGE_INT_ENABLE3_A, in t4_db_full()
2376 queue_work(adap->workq, &adap->db_full_task); in t4_db_full()
2380 void t4_db_dropped(struct adapter *adap) in t4_db_dropped() argument
2382 if (is_t4(adap->params.chip)) { in t4_db_dropped()
2383 disable_dbs(adap); in t4_db_dropped()
2384 notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL); in t4_db_dropped()
2386 queue_work(adap->workq, &adap->db_drop_task); in t4_db_dropped()
2389 static void uld_attach(struct adapter *adap, unsigned int uld) in uld_attach() argument
2395 lli.pdev = adap->pdev; in uld_attach()
2396 lli.pf = adap->fn; in uld_attach()
2397 lli.l2t = adap->l2t; in uld_attach()
2398 lli.tids = &adap->tids; in uld_attach()
2399 lli.ports = adap->port; in uld_attach()
2400 lli.vr = &adap->vres; in uld_attach()
2401 lli.mtus = adap->params.mtus; in uld_attach()
2403 lli.rxq_ids = adap->sge.rdma_rxq; in uld_attach()
2404 lli.ciq_ids = adap->sge.rdma_ciq; in uld_attach()
2405 lli.nrxq = adap->sge.rdmaqs; in uld_attach()
2406 lli.nciq = adap->sge.rdmaciqs; in uld_attach()
2408 lli.rxq_ids = adap->sge.ofld_rxq; in uld_attach()
2409 lli.nrxq = adap->sge.ofldqsets; in uld_attach()
2411 lli.ntxq = adap->sge.ofldqsets; in uld_attach()
2412 lli.nchan = adap->params.nports; in uld_attach()
2413 lli.nports = adap->params.nports; in uld_attach()
2414 lli.wr_cred = adap->params.ofldq_wr_cred; in uld_attach()
2415 lli.adapter_type = adap->params.chip; in uld_attach()
2416 lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A)); in uld_attach()
2417 lli.cclk_ps = 1000000000 / adap->params.vpd.cclk; in uld_attach()
2418 lli.udb_density = 1 << adap->params.sge.eq_qpp; in uld_attach()
2419 lli.ucq_density = 1 << adap->params.sge.iq_qpp; in uld_attach()
2420 lli.filt_mode = adap->params.tp.vlan_pri_map; in uld_attach()
2424 lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A); in uld_attach()
2425 lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A); in uld_attach()
2426 lli.fw_vers = adap->params.fw_vers; in uld_attach()
2428 lli.sge_ingpadboundary = adap->sge.fl_align; in uld_attach()
2429 lli.sge_egrstatuspagesize = adap->sge.stat_len; in uld_attach()
2430 lli.sge_pktshift = adap->sge.pktshift; in uld_attach()
2431 lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN; in uld_attach()
2432 lli.max_ordird_qp = adap->params.max_ordird_qp; in uld_attach()
2433 lli.max_ird_adapter = adap->params.max_ird_adapter; in uld_attach()
2434 lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl; in uld_attach()
2438 dev_warn(adap->pdev_dev, in uld_attach()
2444 adap->uld_handle[uld] = handle; in uld_attach()
2451 if (adap->flags & FULL_INIT_DONE) in uld_attach()
2455 static void attach_ulds(struct adapter *adap) in attach_ulds() argument
2460 list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list); in attach_ulds()
2464 list_add_tail(&adap->list_node, &adapter_list); in attach_ulds()
2467 uld_attach(adap, i); in attach_ulds()
2471 static void detach_ulds(struct adapter *adap) in detach_ulds() argument
2476 list_del(&adap->list_node); in detach_ulds()
2478 if (adap->uld_handle[i]) { in detach_ulds()
2479 ulds[i].state_change(adap->uld_handle[i], in detach_ulds()
2481 adap->uld_handle[i] = NULL; in detach_ulds()
2490 list_del_rcu(&adap->rcu_node); in detach_ulds()
2494 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state) in notify_ulds() argument
2500 if (adap->uld_handle[i]) in notify_ulds()
2501 ulds[i].state_change(adap->uld_handle[i], new_state); in notify_ulds()
2517 struct adapter *adap; in cxgb4_register_uld() local
2527 list_for_each_entry(adap, &adapter_list, list_node) in cxgb4_register_uld()
2528 uld_attach(adap, type); in cxgb4_register_uld()
2542 struct adapter *adap; in cxgb4_unregister_uld() local
2547 list_for_each_entry(adap, &adapter_list, list_node) in cxgb4_unregister_uld()
2548 adap->uld_handle[type] = NULL; in cxgb4_unregister_uld()
2563 struct adapter *adap; in cxgb4_inet6addr_handler() local
2569 list_for_each_entry(adap, &adapter_list, list_node) { in cxgb4_inet6addr_handler()
2572 cxgb4_clip_get(adap->port[0], in cxgb4_inet6addr_handler()
2576 cxgb4_clip_release(adap->port[0], in cxgb4_inet6addr_handler()
2610 static void update_clip(const struct adapter *adap) in update_clip() argument
2619 dev = adap->port[i]; in update_clip()
2642 static int cxgb_up(struct adapter *adap) in cxgb_up() argument
2646 err = setup_sge_queues(adap); in cxgb_up()
2649 err = setup_rss(adap); in cxgb_up()
2653 if (adap->flags & USING_MSIX) { in cxgb_up()
2654 name_msix_vecs(adap); in cxgb_up()
2655 err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0, in cxgb_up()
2656 adap->msix_info[0].desc, adap); in cxgb_up()
2660 err = request_msix_queue_irqs(adap); in cxgb_up()
2662 free_irq(adap->msix_info[0].vec, adap); in cxgb_up()
2666 err = request_irq(adap->pdev->irq, t4_intr_handler(adap), in cxgb_up()
2667 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED, in cxgb_up()
2668 adap->port[0]->name, adap); in cxgb_up()
2672 enable_rx(adap); in cxgb_up()
2673 t4_sge_start(adap); in cxgb_up()
2674 t4_intr_enable(adap); in cxgb_up()
2675 adap->flags |= FULL_INIT_DONE; in cxgb_up()
2676 notify_ulds(adap, CXGB4_STATE_UP); in cxgb_up()
2678 update_clip(adap); in cxgb_up()
2683 dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err); in cxgb_up()
2685 t4_free_sge_resources(adap); in cxgb_up()
2775 struct adapter *adap; in cxgb4_create_server_filter() local
2779 adap = netdev2adap(dev); in cxgb4_create_server_filter()
2782 stid -= adap->tids.sftid_base; in cxgb4_create_server_filter()
2783 stid += adap->tids.nftids; in cxgb4_create_server_filter()
2787 f = &adap->tids.ftid_tab[stid]; in cxgb4_create_server_filter()
2796 clear_filter(adap, f); in cxgb4_create_server_filter()
2808 if (adap->params.tp.vlan_pri_map & PORT_F) { in cxgb4_create_server_filter()
2814 if (adap->params.tp.vlan_pri_map & PROTOCOL_F) { in cxgb4_create_server_filter()
2825 ret = set_filter_wr(adap, stid); in cxgb4_create_server_filter()
2827 clear_filter(adap, f); in cxgb4_create_server_filter()
2840 struct adapter *adap; in cxgb4_remove_server_filter() local
2842 adap = netdev2adap(dev); in cxgb4_remove_server_filter()
2845 stid -= adap->tids.sftid_base; in cxgb4_remove_server_filter()
2846 stid += adap->tids.nftids; in cxgb4_remove_server_filter()
2848 f = &adap->tids.ftid_tab[stid]; in cxgb4_remove_server_filter()
2852 ret = delete_filter(adap, stid); in cxgb4_remove_server_filter()
2992 struct adapter *adap = pi->adapter; in cxgb_netpoll() local
2994 if (adap->flags & USING_MSIX) { in cxgb_netpoll()
2996 struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset]; in cxgb_netpoll()
3001 t4_intr_handler(adap)(0, adap); in cxgb_netpoll()
3030 void t4_fatal_err(struct adapter *adap) in t4_fatal_err() argument
3032 t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0); in t4_fatal_err()
3033 t4_intr_disable(adap); in t4_fatal_err()
3034 dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n"); in t4_fatal_err()
3042 static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg) in t4_read_pcie_cfg4() argument
3060 (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->fn)); in t4_read_pcie_cfg4()
3062 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd), in t4_read_pcie_cfg4()
3071 t4_hw_pci_read_cfg4(adap, reg, &val); in t4_read_pcie_cfg4()
3076 static void setup_memwin(struct adapter *adap) in setup_memwin() argument
3080 if (is_t4(adap->params.chip)) { in setup_memwin()
3092 bar0 = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_0); in setup_memwin()
3094 adap->t4_bar0 = bar0; in setup_memwin()
3107 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 0), in setup_memwin()
3110 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 1), in setup_memwin()
3113 t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2), in setup_memwin()
3116 t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2)); in setup_memwin()
3119 static void setup_memwin_rdma(struct adapter *adap) in setup_memwin_rdma() argument
3121 if (adap->vres.ocq.size) { in setup_memwin_rdma()
3125 start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2); in setup_memwin_rdma()
3127 start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres); in setup_memwin_rdma()
3128 sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10; in setup_memwin_rdma()
3129 t4_write_reg(adap, in setup_memwin_rdma()
3132 t4_write_reg(adap, in setup_memwin_rdma()
3134 adap->vres.ocq.start); in setup_memwin_rdma()
3135 t4_read_reg(adap, in setup_memwin_rdma()
3140 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c) in adap_init1() argument
3150 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c); in adap_init1()
3161 dev_err(adap->pdev_dev, "virtualization ACLs not supported"); in adap_init1()
3166 ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL); in adap_init1()
3170 ret = t4_config_glbl_rss(adap, adap->fn, in adap_init1()
3177 ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, adap->sge.egr_sz, 64, in adap_init1()
3183 t4_sge_init(adap); in adap_init1()
3186 t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849); in adap_init1()
3187 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12)); in adap_init1()
3188 t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A); in adap_init1()
3189 v = t4_read_reg(adap, TP_PIO_DATA_A); in adap_init1()
3190 t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F); in adap_init1()
3193 adap->params.tp.tx_modq_map = 0xE4; in adap_init1()
3194 t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A, in adap_init1()
3195 TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map)); in adap_init1()
3199 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, in adap_init1()
3201 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, in adap_init1()
3203 t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, in adap_init1()
3207 if (is_offload(adap)) { in adap_init1()
3208 t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A, in adap_init1()
3213 t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A, in adap_init1()
3221 return t4_early_init(adap, adap->fn); in adap_init1()
3526 static int adap_init0(struct adapter *adap) in adap_init0() argument
3538 ret = t4_init_devlog_params(adap); in adap_init0()
3543 ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state); in adap_init0()
3545 dev_err(adap->pdev_dev, "could not connect to FW, error %d\n", in adap_init0()
3549 if (ret == adap->mbox) in adap_init0()
3550 adap->flags |= MASTER_PF; in adap_init0()
3559 t4_get_fw_version(adap, &adap->params.fw_vers); in adap_init0()
3560 t4_get_tp_version(adap, &adap->params.tp_vers); in adap_init0()
3561 if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) { in adap_init0()
3571 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip)); in adap_init0()
3573 dev_err(adap->pdev_dev, in adap_init0()
3575 CHELSIO_CHIP_VERSION(adap->params.chip)); in adap_init0()
3586 adap->pdev_dev); in adap_init0()
3588 dev_err(adap->pdev_dev, in adap_init0()
3597 ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw, in adap_init0()
3615 ret = get_vpd_params(adap, &adap->params.vpd); in adap_init0()
3627 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec); in adap_init0()
3631 adap->params.nports = hweight32(port_vec); in adap_init0()
3632 adap->params.portvec = port_vec; in adap_init0()
3638 dev_info(adap->pdev_dev, "Coming up as %s: "\ in adap_init0()
3640 adap->flags & MASTER_PF ? "MASTER" : "SLAVE"); in adap_init0()
3642 dev_info(adap->pdev_dev, "Coming up as MASTER: "\ in adap_init0()
3650 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, in adap_init0()
3657 dev_err(adap->pdev_dev, "firmware doesn't support " in adap_init0()
3666 ret = adap_init0_config(adap, reset); in adap_init0()
3668 dev_err(adap->pdev_dev, "no Configuration File " in adap_init0()
3673 dev_err(adap->pdev_dev, "could not initialize " in adap_init0()
3683 ret = t4_sge_init(adap); in adap_init0()
3687 if (is_bypass_device(adap->pdev->device)) in adap_init0()
3688 adap->params.bypass = 1; in adap_init0()
3709 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val); in adap_init0()
3712 adap->sge.egr_start = val[0]; in adap_init0()
3713 adap->l2t_start = val[1]; in adap_init0()
3714 adap->l2t_end = val[2]; in adap_init0()
3715 adap->tids.ftid_base = val[3]; in adap_init0()
3716 adap->tids.nftids = val[4] - val[3] + 1; in adap_init0()
3717 adap->sge.ingr_start = val[5]; in adap_init0()
3727 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val); in adap_init0()
3730 adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1; in adap_init0()
3731 adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1; in adap_init0()
3733 adap->sge.egr_map = kcalloc(adap->sge.egr_sz, in adap_init0()
3734 sizeof(*adap->sge.egr_map), GFP_KERNEL); in adap_init0()
3735 if (!adap->sge.egr_map) { in adap_init0()
3740 adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz, in adap_init0()
3741 sizeof(*adap->sge.ingr_map), GFP_KERNEL); in adap_init0()
3742 if (!adap->sge.ingr_map) { in adap_init0()
3750 adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), in adap_init0()
3752 if (!adap->sge.starving_fl) { in adap_init0()
3757 adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), in adap_init0()
3759 if (!adap->sge.txq_maperr) { in adap_init0()
3766 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val); in adap_init0()
3769 adap->clipt_start = val[0]; in adap_init0()
3770 adap->clipt_end = val[1]; in adap_init0()
3775 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val); in adap_init0()
3780 adap->flags |= FW_OFLD_CONN; in adap_init0()
3781 adap->tids.aftid_base = val[0]; in adap_init0()
3782 adap->tids.aftid_end = val[1]; in adap_init0()
3792 (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val); in adap_init0()
3800 if (is_t4(adap->params.chip)) { in adap_init0()
3801 adap->params.ulptx_memwrite_dsgl = false; in adap_init0()
3804 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, in adap_init0()
3806 adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0); in adap_init0()
3817 ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd), in adap_init0()
3830 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, in adap_init0()
3834 adap->tids.ntids = val[0]; in adap_init0()
3835 adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS); in adap_init0()
3836 adap->tids.stid_base = val[1]; in adap_init0()
3837 adap->tids.nstids = val[2] - val[1] + 1; in adap_init0()
3847 if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) { in adap_init0()
3848 adap->tids.sftid_base = adap->tids.ftid_base + in adap_init0()
3849 DIV_ROUND_UP(adap->tids.nftids, 3); in adap_init0()
3850 adap->tids.nsftids = adap->tids.nftids - in adap_init0()
3851 DIV_ROUND_UP(adap->tids.nftids, 3); in adap_init0()
3852 adap->tids.nftids = adap->tids.sftid_base - in adap_init0()
3853 adap->tids.ftid_base; in adap_init0()
3855 adap->vres.ddp.start = val[3]; in adap_init0()
3856 adap->vres.ddp.size = val[4] - val[3] + 1; in adap_init0()
3857 adap->params.ofldq_wr_cred = val[5]; in adap_init0()
3859 adap->params.offload = 1; in adap_init0()
3868 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, in adap_init0()
3872 adap->vres.stag.start = val[0]; in adap_init0()
3873 adap->vres.stag.size = val[1] - val[0] + 1; in adap_init0()
3874 adap->vres.rq.start = val[2]; in adap_init0()
3875 adap->vres.rq.size = val[3] - val[2] + 1; in adap_init0()
3876 adap->vres.pbl.start = val[4]; in adap_init0()
3877 adap->vres.pbl.size = val[5] - val[4] + 1; in adap_init0()
3885 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, in adap_init0()
3889 adap->vres.qp.start = val[0]; in adap_init0()
3890 adap->vres.qp.size = val[1] - val[0] + 1; in adap_init0()
3891 adap->vres.cq.start = val[2]; in adap_init0()
3892 adap->vres.cq.size = val[3] - val[2] + 1; in adap_init0()
3893 adap->vres.ocq.start = val[4]; in adap_init0()
3894 adap->vres.ocq.size = val[5] - val[4] + 1; in adap_init0()
3898 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, in adap_init0()
3901 adap->params.max_ordird_qp = 8; in adap_init0()
3902 adap->params.max_ird_adapter = 32 * adap->tids.ntids; in adap_init0()
3905 adap->params.max_ordird_qp = val[0]; in adap_init0()
3906 adap->params.max_ird_adapter = val[1]; in adap_init0()
3908 dev_info(adap->pdev_dev, in adap_init0()
3910 adap->params.max_ordird_qp, in adap_init0()
3911 adap->params.max_ird_adapter); in adap_init0()
3916 ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, in adap_init0()
3920 adap->vres.iscsi.start = val[0]; in adap_init0()
3921 adap->vres.iscsi.size = val[1] - val[0] + 1; in adap_init0()
3931 t4_read_mtu_tbl(adap, adap->params.mtus, NULL); in adap_init0()
3953 if (adap->params.mtus[i] == 1492) { in adap_init0()
3954 adap->params.mtus[i] = 1488; in adap_init0()
3958 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, in adap_init0()
3959 adap->params.b_wnd); in adap_init0()
3961 t4_init_sge_params(adap); in adap_init0()
3962 t4_init_tp_params(adap); in adap_init0()
3963 adap->flags |= FW_OK; in adap_init0()
3972 kfree(adap->sge.egr_map); in adap_init0()
3973 kfree(adap->sge.ingr_map); in adap_init0()
3974 kfree(adap->sge.starving_fl); in adap_init0()
3975 kfree(adap->sge.txq_maperr); in adap_init0()
3977 t4_fw_bye(adap, adap->mbox); in adap_init0()
3987 struct adapter *adap = pci_get_drvdata(pdev); in eeh_err_detected() local
3989 if (!adap) in eeh_err_detected()
3993 adap->flags &= ~FW_OK; in eeh_err_detected()
3994 notify_ulds(adap, CXGB4_STATE_START_RECOVERY); in eeh_err_detected()
3995 spin_lock(&adap->stats_lock); in eeh_err_detected()
3996 for_each_port(adap, i) { in eeh_err_detected()
3997 struct net_device *dev = adap->port[i]; in eeh_err_detected()
4002 spin_unlock(&adap->stats_lock); in eeh_err_detected()
4003 disable_interrupts(adap); in eeh_err_detected()
4004 if (adap->flags & FULL_INIT_DONE) in eeh_err_detected()
4005 cxgb_down(adap); in eeh_err_detected()
4007 if ((adap->flags & DEV_ENABLED)) { in eeh_err_detected()
4009 adap->flags &= ~DEV_ENABLED; in eeh_err_detected()
4019 struct adapter *adap = pci_get_drvdata(pdev); in eeh_slot_reset() local
4021 if (!adap) { in eeh_slot_reset()
4027 if (!(adap->flags & DEV_ENABLED)) { in eeh_slot_reset()
4033 adap->flags |= DEV_ENABLED; in eeh_slot_reset()
4041 if (t4_wait_dev_ready(adap->regs) < 0) in eeh_slot_reset()
4043 if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0) in eeh_slot_reset()
4045 adap->flags |= FW_OK; in eeh_slot_reset()
4046 if (adap_init1(adap, &c)) in eeh_slot_reset()
4049 for_each_port(adap, i) { in eeh_slot_reset()
4050 struct port_info *p = adap2pinfo(adap, i); in eeh_slot_reset()
4052 ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1, in eeh_slot_reset()
4060 t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, in eeh_slot_reset()
4061 adap->params.b_wnd); in eeh_slot_reset()
4062 setup_memwin(adap); in eeh_slot_reset()
4063 if (cxgb_up(adap)) in eeh_slot_reset()
4071 struct adapter *adap = pci_get_drvdata(pdev); in eeh_resume() local
4073 if (!adap) in eeh_resume()
4077 for_each_port(adap, i) { in eeh_resume()
4078 struct net_device *dev = adap->port[i]; in eeh_resume()
4101 static inline void init_rspq(struct adapter *adap, struct sge_rspq *q, in init_rspq() argument
4105 q->adap = adap; in init_rspq()
4116 static void cfg_queues(struct adapter *adap) in cfg_queues() argument
4118 struct sge *s = &adap->sge; in cfg_queues()
4125 for_each_port(adap, i) in cfg_queues()
4126 n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg); in cfg_queues()
4132 if (adap->params.nports * 8 > MAX_ETH_QSETS) { in cfg_queues()
4133 dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n", in cfg_queues()
4134 MAX_ETH_QSETS, adap->params.nports * 8); in cfg_queues()
4138 for_each_port(adap, i) { in cfg_queues()
4139 struct port_info *pi = adap2pinfo(adap, i); in cfg_queues()
4151 q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g; in cfg_queues()
4155 for_each_port(adap, i) { in cfg_queues()
4156 struct port_info *pi = adap2pinfo(adap, i); in cfg_queues()
4167 if (is_offload(adap)) { in cfg_queues()
4176 s->ofldqsets = roundup(i, adap->params.nports); in cfg_queues()
4178 s->ofldqsets = adap->params.nports; in cfg_queues()
4180 s->rdmaqs = adap->params.nports; in cfg_queues()
4188 s->rdmaciqs = (s->rdmaciqs / adap->params.nports) * in cfg_queues()
4189 adap->params.nports; in cfg_queues()
4190 s->rdmaciqs = max_t(int, s->rdmaciqs, adap->params.nports); in cfg_queues()
4196 init_rspq(adap, &r->rspq, 5, 10, 1024, 64); in cfg_queues()
4212 init_rspq(adap, &r->rspq, 5, 1, 1024, 64); in cfg_queues()
4220 init_rspq(adap, &r->rspq, 5, 1, 511, 64); in cfg_queues()
4225 ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids; in cfg_queues()
4227 CH_WARN(adap, "CIQ size too small for available IQs\n"); in cfg_queues()
4234 init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64); in cfg_queues()
4238 init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64); in cfg_queues()
4239 init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64); in cfg_queues()
4246 static void reduce_ethqs(struct adapter *adap, int n) in reduce_ethqs() argument
4251 while (n < adap->sge.ethqsets) in reduce_ethqs()
4252 for_each_port(adap, i) { in reduce_ethqs()
4253 pi = adap2pinfo(adap, i); in reduce_ethqs()
4256 adap->sge.ethqsets--; in reduce_ethqs()
4257 if (adap->sge.ethqsets <= n) in reduce_ethqs()
4263 for_each_port(adap, i) { in reduce_ethqs()
4264 pi = adap2pinfo(adap, i); in reduce_ethqs()
4273 static int enable_msix(struct adapter *adap) in enable_msix() argument
4277 struct sge *s = &adap->sge; in enable_msix()
4278 unsigned int nchan = adap->params.nports; in enable_msix()
4290 if (is_offload(adap)) { in enable_msix()
4299 need = 8 * adap->params.nports + EXTRA_VECS + ofld_need; in enable_msix()
4301 need = adap->params.nports + EXTRA_VECS + ofld_need; in enable_msix()
4303 allocated = pci_enable_msix_range(adap->pdev, entries, need, want); in enable_msix()
4305 dev_info(adap->pdev_dev, "not enough MSI-X vectors left," in enable_msix()
4319 reduce_ethqs(adap, i); in enable_msix()
4321 if (is_offload(adap)) { in enable_msix()
4333 adap->msix_info[i].vec = entries[i].vector; in enable_msix()
4341 static int init_rss(struct adapter *adap) in init_rss() argument
4345 for_each_port(adap, i) { in init_rss()
4346 struct port_info *pi = adap2pinfo(adap, i); in init_rss()
4363 const struct adapter *adap = pi->adapter; in print_port_info() local
4365 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB) in print_port_info()
4367 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB) in print_port_info()
4369 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB) in print_port_info()
4385 adap->params.vpd.id, in print_port_info()
4386 CHELSIO_CHIP_RELEASE(adap->params.chip), buf, in print_port_info()
4387 is_offload(adap) ? "R" : "", adap->params.pci.width, spd, in print_port_info()
4388 (adap->flags & USING_MSIX) ? " MSI-X" : in print_port_info()
4389 (adap->flags & USING_MSI) ? " MSI" : ""); in print_port_info()
4391 adap->params.vpd.sn, adap->params.vpd.pn); in print_port_info()