Lines Matching refs:adap

199 void t3_os_link_fault(struct adapter *adap, int port_id, int state)  in t3_os_link_fault()  argument
201 struct net_device *dev = adap->port[port_id]; in t3_os_link_fault()
212 disable_tx_fifo_drain(adap, pi); in t3_os_link_fault()
215 t3_xgm_intr_disable(adap, pi->port_id); in t3_os_link_fault()
216 t3_read_reg(adap, A_XGM_INT_STATUS + in t3_os_link_fault()
218 t3_write_reg(adap, in t3_os_link_fault()
222 t3_set_reg_field(adap, in t3_os_link_fault()
226 t3_xgm_intr_enable(adap, pi->port_id); in t3_os_link_fault()
233 enable_tx_fifo_drain(adap, pi); in t3_os_link_fault()
315 void t3_os_phymod_changed(struct adapter *adap, int port_id) in t3_os_phymod_changed() argument
321 const struct net_device *dev = adap->port[port_id]; in t3_os_phymod_changed()
382 static void name_msix_vecs(struct adapter *adap) in name_msix_vecs() argument
384 int i, j, msi_idx = 1, n = sizeof(adap->msix_info[0].desc) - 1; in name_msix_vecs()
386 snprintf(adap->msix_info[0].desc, n, "%s", adap->name); in name_msix_vecs()
387 adap->msix_info[0].desc[n] = 0; in name_msix_vecs()
389 for_each_port(adap, j) { in name_msix_vecs()
390 struct net_device *d = adap->port[j]; in name_msix_vecs()
394 snprintf(adap->msix_info[msi_idx].desc, n, in name_msix_vecs()
396 adap->msix_info[msi_idx].desc[n] = 0; in name_msix_vecs()
401 static int request_msix_data_irqs(struct adapter *adap) in request_msix_data_irqs() argument
405 for_each_port(adap, i) { in request_msix_data_irqs()
406 int nqsets = adap2pinfo(adap, i)->nqsets; in request_msix_data_irqs()
409 err = request_irq(adap->msix_info[qidx + 1].vec, in request_msix_data_irqs()
410 t3_intr_handler(adap, in request_msix_data_irqs()
411 adap->sge.qs[qidx]. in request_msix_data_irqs()
413 adap->msix_info[qidx + 1].desc, in request_msix_data_irqs()
414 &adap->sge.qs[qidx]); in request_msix_data_irqs()
417 free_irq(adap->msix_info[qidx + 1].vec, in request_msix_data_irqs()
418 &adap->sge.qs[qidx]); in request_msix_data_irqs()
443 static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt, in await_mgmt_replies() argument
448 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) { in await_mgmt_replies()
456 static int init_tp_parity(struct adapter *adap) in init_tp_parity() argument
461 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts; in init_tp_parity()
463 t3_tp_set_offload_mode(adap, 1); in init_tp_parity()
470 skb = adap->nofail_skb; in init_tp_parity()
480 t3_mgmt_tx(adap, skb); in init_tp_parity()
481 if (skb == adap->nofail_skb) { in init_tp_parity()
482 await_mgmt_replies(adap, cnt, i + 1); in init_tp_parity()
483 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL); in init_tp_parity()
484 if (!adap->nofail_skb) in init_tp_parity()
494 skb = adap->nofail_skb; in init_tp_parity()
503 t3_mgmt_tx(adap, skb); in init_tp_parity()
504 if (skb == adap->nofail_skb) { in init_tp_parity()
505 await_mgmt_replies(adap, cnt, 16 + i + 1); in init_tp_parity()
506 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL); in init_tp_parity()
507 if (!adap->nofail_skb) in init_tp_parity()
517 skb = adap->nofail_skb; in init_tp_parity()
526 t3_mgmt_tx(adap, skb); in init_tp_parity()
527 if (skb == adap->nofail_skb) { in init_tp_parity()
528 await_mgmt_replies(adap, cnt, 16 + 2048 + i + 1); in init_tp_parity()
529 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL); in init_tp_parity()
530 if (!adap->nofail_skb) in init_tp_parity()
537 skb = adap->nofail_skb; in init_tp_parity()
546 t3_mgmt_tx(adap, skb); in init_tp_parity()
548 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1); in init_tp_parity()
549 if (skb == adap->nofail_skb) { in init_tp_parity()
550 i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1); in init_tp_parity()
551 adap->nofail_skb = alloc_skb(sizeof(*greq), GFP_KERNEL); in init_tp_parity()
554 t3_tp_set_offload_mode(adap, 0); in init_tp_parity()
558 t3_tp_set_offload_mode(adap, 0); in init_tp_parity()
573 static void setup_rss(struct adapter *adap) in setup_rss() argument
576 unsigned int nq0 = adap2pinfo(adap, 0)->nqsets; in setup_rss()
577 unsigned int nq1 = adap->port[1] ? adap2pinfo(adap, 1)->nqsets : 1; in setup_rss()
590 t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN | in setup_rss()
595 static void ring_dbs(struct adapter *adap) in ring_dbs() argument
600 struct sge_qset *qs = &adap->sge.qs[i]; in ring_dbs()
602 if (qs->adap) in ring_dbs()
604 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id)); in ring_dbs()
608 static void init_napi(struct adapter *adap) in init_napi() argument
613 struct sge_qset *qs = &adap->sge.qs[i]; in init_napi()
615 if (qs->adap) in init_napi()
625 adap->flags |= NAPI_INIT; in init_napi()
633 static void quiesce_rx(struct adapter *adap) in quiesce_rx() argument
638 if (adap->sge.qs[i].adap) in quiesce_rx()
639 napi_disable(&adap->sge.qs[i].napi); in quiesce_rx()
642 static void enable_all_napi(struct adapter *adap) in enable_all_napi() argument
646 if (adap->sge.qs[i].adap) in enable_all_napi()
647 napi_enable(&adap->sge.qs[i].napi); in enable_all_napi()
658 static int setup_sge_qsets(struct adapter *adap) in setup_sge_qsets() argument
663 if (adap->params.rev > 0 && !(adap->flags & USING_MSI)) in setup_sge_qsets()
666 for_each_port(adap, i) { in setup_sge_qsets()
667 struct net_device *dev = adap->port[i]; in setup_sge_qsets()
670 pi->qs = &adap->sge.qs[pi->first_qset]; in setup_sge_qsets()
672 err = t3_sge_alloc_qset(adap, qset_idx, 1, in setup_sge_qsets()
673 (adap->flags & USING_MSIX) ? qset_idx + 1 : in setup_sge_qsets()
675 &adap->params.sge.qset[qset_idx], ntxq, dev, in setup_sge_qsets()
678 t3_free_sge_resources(adap); in setup_sge_qsets()
727 struct adapter *adap = pi->adapter; \
739 struct adapter *adap = pi->adapter; in set_nfilters() local
740 int min_tids = is_offload(adap) ? MC5_MIN_TIDS : 0; in set_nfilters()
742 if (adap->flags & FULL_INIT_DONE) in set_nfilters()
744 if (val && adap->params.rev == 0) in set_nfilters()
746 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers - in set_nfilters()
749 adap->params.mc5.nfilters = val; in set_nfilters()
762 struct adapter *adap = pi->adapter; in set_nservers() local
764 if (adap->flags & FULL_INIT_DONE) in set_nservers()
766 if (val > t3_mc5_size(&adap->mc5) - adap->params.mc5.nfilters - in set_nservers()
769 adap->params.mc5.nservers = val; in set_nservers()
787 CXGB3_ATTR_R(cam_size, t3_mc5_size(&adap->mc5));
788 CXGB3_ATTR_RW(nfilters, adap->params.mc5.nfilters, store_nfilters);
789 CXGB3_ATTR_RW(nservers, adap->params.mc5.nservers, store_nservers);
804 struct adapter *adap = pi->adapter; in tm_attr_show() local
810 t3_write_reg(adap, A_TP_TM_PIO_ADDR, addr); in tm_attr_show()
811 v = t3_read_reg(adap, A_TP_TM_PIO_DATA); in tm_attr_show()
819 v = (adap->params.vpd.cclk * 1000) / cpt; in tm_attr_show()
830 struct adapter *adap = pi->adapter; in tm_attr_store() local
843 ret = t3_config_sched(adap, val, sched); in tm_attr_store()
939 static int send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo, in send_pktsched_cmd() argument
948 skb = adap->nofail_skb; in send_pktsched_cmd()
960 ret = t3_mgmt_tx(adap, skb); in send_pktsched_cmd()
961 if (skb == adap->nofail_skb) { in send_pktsched_cmd()
962 adap->nofail_skb = alloc_skb(sizeof(struct cpl_set_tcb_field), in send_pktsched_cmd()
964 if (!adap->nofail_skb) in send_pktsched_cmd()
971 static int bind_qsets(struct adapter *adap) in bind_qsets() argument
975 for_each_port(adap, i) { in bind_qsets()
976 const struct port_info *pi = adap2pinfo(adap, i); in bind_qsets()
979 int ret = send_pktsched_cmd(adap, 1, in bind_qsets()
1072 static int upgrade_fw(struct adapter *adap) in upgrade_fw() argument
1076 struct device *dev = &adap->pdev->dev; in upgrade_fw()
1084 ret = t3_load_fw(adap, fw->data, fw->size); in upgrade_fw()
1113 static int update_tpsram(struct adapter *adap) in update_tpsram() argument
1117 struct device *dev = &adap->pdev->dev; in update_tpsram()
1121 rev = t3rev2char(adap); in update_tpsram()
1134 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size); in update_tpsram()
1138 ret = t3_set_proto_sram(adap, tpsram->data); in update_tpsram()
1165 static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p) in t3_synchronize_rx() argument
1170 struct sge_rspq *q = &adap->sge.qs[i].rspq; in t3_synchronize_rx()
1209 static int cxgb_up(struct adapter *adap) in cxgb_up() argument
1213 if (!(adap->flags & FULL_INIT_DONE)) { in cxgb_up()
1214 err = t3_check_fw_version(adap); in cxgb_up()
1216 err = upgrade_fw(adap); in cxgb_up()
1217 CH_WARN(adap, "FW upgrade to %d.%d.%d %s\n", in cxgb_up()
1222 err = t3_check_tpsram_version(adap); in cxgb_up()
1224 err = update_tpsram(adap); in cxgb_up()
1225 CH_WARN(adap, "TP upgrade to %d.%d.%d %s\n", in cxgb_up()
1235 t3_intr_clear(adap); in cxgb_up()
1237 err = t3_init_hw(adap, 0); in cxgb_up()
1241 t3_set_reg_field(adap, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT); in cxgb_up()
1242 t3_write_reg(adap, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12)); in cxgb_up()
1244 err = setup_sge_qsets(adap); in cxgb_up()
1248 for_each_port(adap, i) in cxgb_up()
1249 cxgb_vlan_mode(adap->port[i], adap->port[i]->features); in cxgb_up()
1251 setup_rss(adap); in cxgb_up()
1252 if (!(adap->flags & NAPI_INIT)) in cxgb_up()
1253 init_napi(adap); in cxgb_up()
1255 t3_start_sge_timers(adap); in cxgb_up()
1256 adap->flags |= FULL_INIT_DONE; in cxgb_up()
1259 t3_intr_clear(adap); in cxgb_up()
1261 if (adap->flags & USING_MSIX) { in cxgb_up()
1262 name_msix_vecs(adap); in cxgb_up()
1263 err = request_irq(adap->msix_info[0].vec, in cxgb_up()
1265 adap->msix_info[0].desc, adap); in cxgb_up()
1269 err = request_msix_data_irqs(adap); in cxgb_up()
1271 free_irq(adap->msix_info[0].vec, adap); in cxgb_up()
1274 } else if ((err = request_irq(adap->pdev->irq, in cxgb_up()
1275 t3_intr_handler(adap, in cxgb_up()
1276 adap->sge.qs[0].rspq. in cxgb_up()
1278 (adap->flags & USING_MSI) ? in cxgb_up()
1280 adap->name, adap))) in cxgb_up()
1283 enable_all_napi(adap); in cxgb_up()
1284 t3_sge_start(adap); in cxgb_up()
1285 t3_intr_enable(adap); in cxgb_up()
1287 if (adap->params.rev >= T3_REV_C && !(adap->flags & TP_PARITY_INIT) && in cxgb_up()
1288 is_offload(adap) && init_tp_parity(adap) == 0) in cxgb_up()
1289 adap->flags |= TP_PARITY_INIT; in cxgb_up()
1291 if (adap->flags & TP_PARITY_INIT) { in cxgb_up()
1292 t3_write_reg(adap, A_TP_INT_CAUSE, in cxgb_up()
1294 t3_write_reg(adap, A_TP_INT_ENABLE, 0x7fbfffff); in cxgb_up()
1297 if (!(adap->flags & QUEUES_BOUND)) { in cxgb_up()
1298 int ret = bind_qsets(adap); in cxgb_up()
1301 CH_ERR(adap, "failed to bind qsets, err %d\n", ret); in cxgb_up()
1302 t3_intr_disable(adap); in cxgb_up()
1303 free_irq_resources(adap); in cxgb_up()
1307 adap->flags |= QUEUES_BOUND; in cxgb_up()
1313 CH_ERR(adap, "request_irq failed, err %d\n", err); in cxgb_up()
1334 static void schedule_chk_task(struct adapter *adap) in schedule_chk_task() argument
1338 timeo = adap->params.linkpoll_period ? in schedule_chk_task()
1339 (HZ * adap->params.linkpoll_period) / 10 : in schedule_chk_task()
1340 adap->params.stats_update_period * HZ; in schedule_chk_task()
1342 queue_delayed_work(cxgb3_wq, &adap->adap_check_task, timeo); in schedule_chk_task()
3056 static void set_nqsets(struct adapter *adap) in set_nqsets() argument
3060 int hwports = adap->params.nports; in set_nqsets()
3061 int nqsets = adap->msix_nvectors - 1; in set_nqsets()
3063 if (adap->params.rev > 0 && adap->flags & USING_MSIX) { in set_nqsets()
3075 for_each_port(adap, i) { in set_nqsets()
3076 struct port_info *pi = adap2pinfo(adap, i); in set_nqsets()
3082 dev_info(&adap->pdev->dev, in set_nqsets()
3087 static int cxgb_enable_msix(struct adapter *adap) in cxgb_enable_msix() argument
3097 vectors = pci_enable_msix_range(adap->pdev, entries, in cxgb_enable_msix()
3098 adap->params.nports + 1, vectors); in cxgb_enable_msix()
3103 adap->msix_info[i].vec = entries[i].vector; in cxgb_enable_msix()
3104 adap->msix_nvectors = vectors; in cxgb_enable_msix()
3109 static void print_port_info(struct adapter *adap, const struct adapter_info *ai) in print_port_info() argument
3118 if (is_pcie(adap)) in print_port_info()
3120 pci_variant[adap->params.pci.variant], in print_port_info()
3121 adap->params.pci.width); in print_port_info()
3124 pci_variant[adap->params.pci.variant], in print_port_info()
3125 adap->params.pci.speed, adap->params.pci.width); in print_port_info()
3127 for_each_port(adap, i) { in print_port_info()
3128 struct net_device *dev = adap->port[i]; in print_port_info()
3131 if (!test_bit(i, &adap->registered_device_map)) in print_port_info()
3135 is_offload(adap) ? "R" : "", adap->params.rev, buf, in print_port_info()
3136 (adap->flags & USING_MSIX) ? " MSI-X" : in print_port_info()
3137 (adap->flags & USING_MSI) ? " MSI" : ""); in print_port_info()
3138 if (adap->name == dev->name && adap->params.vpd.mclk) in print_port_info()
3140 adap->name, t3_mc7_size(&adap->cm) >> 20, in print_port_info()
3141 t3_mc7_size(&adap->pmtx) >> 20, in print_port_info()
3142 t3_mc7_size(&adap->pmrx) >> 20, in print_port_info()
3143 adap->params.vpd.sn); in print_port_info()