Lines Matching refs:dd
102 void qib_set_ctxtcnt(struct qib_devdata *dd) in qib_set_ctxtcnt() argument
105 dd->cfgctxts = dd->first_user_ctxt + num_online_cpus(); in qib_set_ctxtcnt()
106 if (dd->cfgctxts > dd->ctxtcnt) in qib_set_ctxtcnt()
107 dd->cfgctxts = dd->ctxtcnt; in qib_set_ctxtcnt()
108 } else if (qib_cfgctxts < dd->num_pports) in qib_set_ctxtcnt()
109 dd->cfgctxts = dd->ctxtcnt; in qib_set_ctxtcnt()
110 else if (qib_cfgctxts <= dd->ctxtcnt) in qib_set_ctxtcnt()
111 dd->cfgctxts = qib_cfgctxts; in qib_set_ctxtcnt()
113 dd->cfgctxts = dd->ctxtcnt; in qib_set_ctxtcnt()
114 dd->freectxts = (dd->first_user_ctxt > dd->cfgctxts) ? 0 : in qib_set_ctxtcnt()
115 dd->cfgctxts - dd->first_user_ctxt; in qib_set_ctxtcnt()
121 int qib_create_ctxts(struct qib_devdata *dd) in qib_create_ctxts() argument
124 int local_node_id = pcibus_to_node(dd->pcidev->bus); in qib_create_ctxts()
128 dd->assigned_node_id = local_node_id; in qib_create_ctxts()
134 dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL); in qib_create_ctxts()
135 if (!dd->rcd) { in qib_create_ctxts()
136 qib_dev_err(dd, in qib_create_ctxts()
142 for (i = 0; i < dd->first_user_ctxt; ++i) { in qib_create_ctxts()
146 if (dd->skip_kctxt_mask & (1 << i)) in qib_create_ctxts()
149 ppd = dd->pport + (i % dd->num_pports); in qib_create_ctxts()
151 rcd = qib_create_ctxtdata(ppd, i, dd->assigned_node_id); in qib_create_ctxts()
153 qib_dev_err(dd, in qib_create_ctxts()
155 kfree(dd->rcd); in qib_create_ctxts()
156 dd->rcd = NULL; in qib_create_ctxts()
171 struct qib_devdata *dd = ppd->dd; in qib_create_ctxtdata() local
179 rcd->dd = dd; in qib_create_ctxtdata()
182 dd->rcd[ctxt] = rcd; in qib_create_ctxtdata()
184 if (ctxt < dd->first_user_ctxt) { /* N/A for PSM contexts */ in qib_create_ctxtdata()
189 qib_dev_err(dd, in qib_create_ctxtdata()
195 dd->f_init_ctxt(rcd); in qib_create_ctxtdata()
210 rcd->rcvegrbuf_size / dd->rcvegrbufsize; in qib_create_ctxtdata()
224 int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, in qib_init_pportdata() argument
229 ppd->dd = dd; in qib_init_pportdata()
261 qib_dev_err(dd, in qib_init_pportdata()
270 qib_dev_err(dd, in qib_init_pportdata()
279 qib_dev_err(dd, in qib_init_pportdata()
288 qib_dev_err(dd, in qib_init_pportdata()
312 qib_dev_err(dd, in qib_init_pportdata()
317 qib_dev_err(dd, "Congestion Control Agent disabled for port %d\n", in qib_init_pportdata()
322 static int init_pioavailregs(struct qib_devdata *dd) in init_pioavailregs() argument
327 dd->pioavailregs_dma = dma_alloc_coherent( in init_pioavailregs()
328 &dd->pcidev->dev, PAGE_SIZE, &dd->pioavailregs_phys, in init_pioavailregs()
330 if (!dd->pioavailregs_dma) { in init_pioavailregs()
331 qib_dev_err(dd, in init_pioavailregs()
342 ((char *) dd->pioavailregs_dma + in init_pioavailregs()
344 dd->pioavregs * sizeof(u64)) & ~L1_CACHE_BYTES)); in init_pioavailregs()
346 dd->devstatusp = status_page; in init_pioavailregs()
348 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in init_pioavailregs()
349 dd->pport[pidx].statusp = status_page; in init_pioavailregs()
357 dd->freezemsg = (char *) status_page; in init_pioavailregs()
358 *dd->freezemsg = 0; in init_pioavailregs()
360 ret = (char *) status_page - (char *) dd->pioavailregs_dma; in init_pioavailregs()
361 dd->freezelen = PAGE_SIZE - ret; in init_pioavailregs()
380 static void init_shadow_tids(struct qib_devdata *dd) in init_shadow_tids() argument
385 pages = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(struct page *)); in init_shadow_tids()
387 qib_dev_err(dd, in init_shadow_tids()
392 addrs = vzalloc(dd->cfgctxts * dd->rcvtidcnt * sizeof(dma_addr_t)); in init_shadow_tids()
394 qib_dev_err(dd, in init_shadow_tids()
399 dd->pageshadow = pages; in init_shadow_tids()
400 dd->physshadow = addrs; in init_shadow_tids()
406 dd->pageshadow = NULL; in init_shadow_tids()
413 static int loadtime_init(struct qib_devdata *dd) in loadtime_init() argument
417 if (((dd->revision >> QLOGIC_IB_R_SOFTWARE_SHIFT) & in loadtime_init()
419 qib_dev_err(dd, in loadtime_init()
422 (int)(dd->revision >> in loadtime_init()
425 (unsigned long long) dd->revision); in loadtime_init()
430 if (dd->revision & QLOGIC_IB_R_EMULATOR_MASK) in loadtime_init()
431 qib_devinfo(dd->pcidev, "%s", dd->boardversion); in loadtime_init()
433 spin_lock_init(&dd->pioavail_lock); in loadtime_init()
434 spin_lock_init(&dd->sendctrl_lock); in loadtime_init()
435 spin_lock_init(&dd->uctxt_lock); in loadtime_init()
436 spin_lock_init(&dd->qib_diag_trans_lock); in loadtime_init()
437 spin_lock_init(&dd->eep_st_lock); in loadtime_init()
438 mutex_init(&dd->eep_lock); in loadtime_init()
443 ret = init_pioavailregs(dd); in loadtime_init()
444 init_shadow_tids(dd); in loadtime_init()
446 qib_get_eeprom_info(dd); in loadtime_init()
449 init_timer(&dd->intrchk_timer); in loadtime_init()
450 dd->intrchk_timer.function = verify_interrupt; in loadtime_init()
451 dd->intrchk_timer.data = (unsigned long) dd; in loadtime_init()
453 ret = qib_cq_init(dd); in loadtime_init()
466 static int init_after_reset(struct qib_devdata *dd) in init_after_reset() argument
475 for (i = 0; i < dd->num_pports; ++i) { in init_after_reset()
480 dd->f_rcvctrl(dd->pport + i, QIB_RCVCTRL_CTXT_DIS | in init_after_reset()
484 dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_DIS | in init_after_reset()
491 static void enable_chip(struct qib_devdata *dd) in enable_chip() argument
499 for (i = 0; i < dd->num_pports; ++i) in enable_chip()
500 dd->f_sendctrl(dd->pport + i, QIB_SENDCTRL_SEND_ENB | in enable_chip()
507 rcvmask |= (dd->flags & QIB_NODMA_RTAIL) ? in enable_chip()
509 for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) { in enable_chip()
510 struct qib_ctxtdata *rcd = dd->rcd[i]; in enable_chip()
513 dd->f_rcvctrl(rcd->ppd, rcvmask, i); in enable_chip()
519 struct qib_devdata *dd = (struct qib_devdata *) opaque; in verify_interrupt() local
522 if (!dd) in verify_interrupt()
529 int_counter = qib_int_counter(dd) - dd->z_int_counter; in verify_interrupt()
531 if (!dd->f_intr_fallback(dd)) in verify_interrupt()
532 dev_err(&dd->pcidev->dev, in verify_interrupt()
535 mod_timer(&dd->intrchk_timer, jiffies + HZ/2); in verify_interrupt()
539 static void init_piobuf_state(struct qib_devdata *dd) in init_piobuf_state() argument
552 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_ALL); in init_piobuf_state()
553 for (pidx = 0; pidx < dd->num_pports; ++pidx) in init_piobuf_state()
554 dd->f_sendctrl(dd->pport + pidx, QIB_SENDCTRL_FLUSH); in init_piobuf_state()
562 uctxts = dd->cfgctxts - dd->first_user_ctxt; in init_piobuf_state()
563 dd->ctxts_extrabuf = dd->pbufsctxt ? in init_piobuf_state()
564 dd->lastctxt_piobuf - (dd->pbufsctxt * uctxts) : 0; in init_piobuf_state()
575 for (i = 0; i < dd->pioavregs; i++) { in init_piobuf_state()
578 tmp = dd->pioavailregs_dma[i]; in init_piobuf_state()
584 dd->pioavailshadow[i] = le64_to_cpu(tmp); in init_piobuf_state()
586 while (i < ARRAY_SIZE(dd->pioavailshadow)) in init_piobuf_state()
587 dd->pioavailshadow[i++] = 0; /* for debugging sanity */ in init_piobuf_state()
590 qib_chg_pioavailkernel(dd, 0, dd->piobcnt2k + dd->piobcnt4k, in init_piobuf_state()
592 dd->f_initvl15_bufs(dd); in init_piobuf_state()
599 static int qib_create_workqueues(struct qib_devdata *dd) in qib_create_workqueues() argument
604 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in qib_create_workqueues()
605 ppd = dd->pport + pidx; in qib_create_workqueues()
610 dd->unit, pidx); in qib_create_workqueues()
621 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in qib_create_workqueues()
622 ppd = dd->pport + pidx; in qib_create_workqueues()
652 int qib_init(struct qib_devdata *dd, int reinit) in qib_init() argument
662 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in qib_init()
663 ppd = dd->pport + pidx; in qib_init()
672 ret = init_after_reset(dd); in qib_init()
674 ret = loadtime_init(dd); in qib_init()
682 ret = dd->f_late_initreg(dd); in qib_init()
687 for (i = 0; dd->rcd && i < dd->first_user_ctxt; ++i) { in qib_init()
694 rcd = dd->rcd[i]; in qib_init()
698 lastfail = qib_create_rcvhdrq(dd, rcd); in qib_init()
702 qib_dev_err(dd, in qib_init()
708 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in qib_init()
713 ppd = dd->pport + pidx; in qib_init()
721 dd->piosize4k : dd->piosize2k, in qib_init()
722 dd->rcvegrbufsize + in qib_init()
723 (dd->rcvhdrentsize << 2)); in qib_init()
735 lastfail = dd->f_bringup_serdes(ppd); in qib_init()
737 qib_devinfo(dd->pcidev, in qib_init()
755 enable_chip(dd); in qib_init()
757 init_piobuf_state(dd); in qib_init()
762 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in qib_init()
763 ppd = dd->pport + pidx; in qib_init()
772 if (dd->flags & QIB_HAS_SEND_DMA) in qib_init()
781 dd->f_set_intr_state(dd, 1); in qib_init()
787 mod_timer(&dd->intrchk_timer, jiffies + HZ/2); in qib_init()
789 mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); in qib_init()
802 int __attribute__((weak)) qib_enable_wc(struct qib_devdata *dd) in qib_enable_wc() argument
807 void __attribute__((weak)) qib_disable_wc(struct qib_devdata *dd) in qib_disable_wc() argument
818 struct qib_devdata *dd; in qib_lookup() local
822 dd = __qib_lookup(unit); in qib_lookup()
825 return dd; in qib_lookup()
832 static void qib_stop_timers(struct qib_devdata *dd) in qib_stop_timers() argument
837 if (dd->stats_timer.data) { in qib_stop_timers()
838 del_timer_sync(&dd->stats_timer); in qib_stop_timers()
839 dd->stats_timer.data = 0; in qib_stop_timers()
841 if (dd->intrchk_timer.data) { in qib_stop_timers()
842 del_timer_sync(&dd->intrchk_timer); in qib_stop_timers()
843 dd->intrchk_timer.data = 0; in qib_stop_timers()
845 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in qib_stop_timers()
846 ppd = dd->pport + pidx; in qib_stop_timers()
867 static void qib_shutdown_device(struct qib_devdata *dd) in qib_shutdown_device() argument
872 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in qib_shutdown_device()
873 ppd = dd->pport + pidx; in qib_shutdown_device()
882 dd->flags &= ~QIB_INITTED; in qib_shutdown_device()
885 dd->f_set_intr_state(dd, 0); in qib_shutdown_device()
887 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in qib_shutdown_device()
888 ppd = dd->pport + pidx; in qib_shutdown_device()
889 dd->f_rcvctrl(ppd, QIB_RCVCTRL_TAILUPD_DIS | in qib_shutdown_device()
897 dd->f_sendctrl(ppd, QIB_SENDCTRL_CLEAR); in qib_shutdown_device()
906 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in qib_shutdown_device()
907 ppd = dd->pport + pidx; in qib_shutdown_device()
908 dd->f_setextled(ppd, 0); /* make sure LEDs are off */ in qib_shutdown_device()
910 if (dd->flags & QIB_HAS_SEND_DMA) in qib_shutdown_device()
913 dd->f_sendctrl(ppd, QIB_SENDCTRL_AVAIL_DIS | in qib_shutdown_device()
919 dd->f_quiet_serdes(ppd); in qib_shutdown_device()
941 void qib_free_ctxtdata(struct qib_devdata *dd, struct qib_ctxtdata *rcd) in qib_free_ctxtdata() argument
947 dma_free_coherent(&dd->pcidev->dev, rcd->rcvhdrq_size, in qib_free_ctxtdata()
951 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, in qib_free_ctxtdata()
964 dma_free_coherent(&dd->pcidev->dev, size, in qib_free_ctxtdata()
999 static void qib_verify_pioperf(struct qib_devdata *dd) in qib_verify_pioperf() argument
1006 piobuf = dd->f_getsendbuf(dd->pport, 0ULL, &pbnum); in qib_verify_pioperf()
1008 qib_devinfo(dd->pcidev, in qib_verify_pioperf()
1021 qib_devinfo(dd->pcidev, in qib_verify_pioperf()
1035 dd->f_set_armlaunch(dd, 0); in qib_verify_pioperf()
1056 qib_dev_err(dd, in qib_verify_pioperf()
1066 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(pbnum)); in qib_verify_pioperf()
1067 qib_sendbuf_done(dd, pbnum); in qib_verify_pioperf()
1068 dd->f_set_armlaunch(dd, 1); in qib_verify_pioperf()
1071 void qib_free_devdata(struct qib_devdata *dd) in qib_free_devdata() argument
1076 idr_remove(&qib_unit_table, dd->unit); in qib_free_devdata()
1077 list_del(&dd->list); in qib_free_devdata()
1081 qib_dbg_ibdev_exit(&dd->verbs_dev); in qib_free_devdata()
1083 free_percpu(dd->int_counter); in qib_free_devdata()
1084 ib_dealloc_device(&dd->verbs_dev.ibdev); in qib_free_devdata()
1087 u64 qib_int_counter(struct qib_devdata *dd) in qib_int_counter() argument
1093 int_counter += *per_cpu_ptr(dd->int_counter, cpu); in qib_int_counter()
1100 struct qib_devdata *dd; in qib_sps_ints() local
1104 list_for_each_entry(dd, &qib_dev_list, list) { in qib_sps_ints()
1105 sps_ints += qib_int_counter(dd); in qib_sps_ints()
1122 struct qib_devdata *dd; in qib_alloc_devdata() local
1125 dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra); in qib_alloc_devdata()
1126 if (!dd) in qib_alloc_devdata()
1129 INIT_LIST_HEAD(&dd->list); in qib_alloc_devdata()
1134 ret = idr_alloc(&qib_unit_table, dd, 0, 0, GFP_NOWAIT); in qib_alloc_devdata()
1136 dd->unit = ret; in qib_alloc_devdata()
1137 list_add(&dd->list, &qib_dev_list); in qib_alloc_devdata()
1148 dd->int_counter = alloc_percpu(u64); in qib_alloc_devdata()
1149 if (!dd->int_counter) { in qib_alloc_devdata()
1168 qib_dbg_ibdev_init(&dd->verbs_dev); in qib_alloc_devdata()
1170 return dd; in qib_alloc_devdata()
1172 if (!list_empty(&dd->list)) in qib_alloc_devdata()
1173 list_del_init(&dd->list); in qib_alloc_devdata()
1174 ib_dealloc_device(&dd->verbs_dev.ibdev); in qib_alloc_devdata()
1183 void qib_disable_after_error(struct qib_devdata *dd) in qib_disable_after_error() argument
1185 if (dd->flags & QIB_INITTED) { in qib_disable_after_error()
1188 dd->flags &= ~QIB_INITTED; in qib_disable_after_error()
1189 if (dd->pport) in qib_disable_after_error()
1190 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in qib_disable_after_error()
1193 ppd = dd->pport + pidx; in qib_disable_after_error()
1194 if (dd->flags & QIB_PRESENT) { in qib_disable_after_error()
1197 dd->f_setextled(ppd, 0); in qib_disable_after_error()
1208 if (dd->devstatusp) in qib_disable_after_error()
1209 *dd->devstatusp |= QIB_STATUS_HWERROR; in qib_disable_after_error()
1246 struct qib_devdata *dd = dev_get_drvdata(device); in qib_notify_dca_device() local
1249 return dd->f_notify_dca(dd, event); in qib_notify_dca_device()
1345 static void cleanup_device_data(struct qib_devdata *dd) in cleanup_device_data() argument
1353 for (pidx = 0; pidx < dd->num_pports; ++pidx) { in cleanup_device_data()
1354 if (dd->pport[pidx].statusp) in cleanup_device_data()
1355 *dd->pport[pidx].statusp &= ~QIB_STATUS_CHIP_PRESENT; in cleanup_device_data()
1357 spin_lock(&dd->pport[pidx].cc_shadow_lock); in cleanup_device_data()
1359 kfree(dd->pport[pidx].congestion_entries); in cleanup_device_data()
1360 dd->pport[pidx].congestion_entries = NULL; in cleanup_device_data()
1361 kfree(dd->pport[pidx].ccti_entries); in cleanup_device_data()
1362 dd->pport[pidx].ccti_entries = NULL; in cleanup_device_data()
1363 kfree(dd->pport[pidx].ccti_entries_shadow); in cleanup_device_data()
1364 dd->pport[pidx].ccti_entries_shadow = NULL; in cleanup_device_data()
1365 kfree(dd->pport[pidx].congestion_entries_shadow); in cleanup_device_data()
1366 dd->pport[pidx].congestion_entries_shadow = NULL; in cleanup_device_data()
1368 spin_unlock(&dd->pport[pidx].cc_shadow_lock); in cleanup_device_data()
1371 qib_disable_wc(dd); in cleanup_device_data()
1373 if (dd->pioavailregs_dma) { in cleanup_device_data()
1374 dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE, in cleanup_device_data()
1375 (void *) dd->pioavailregs_dma, in cleanup_device_data()
1376 dd->pioavailregs_phys); in cleanup_device_data()
1377 dd->pioavailregs_dma = NULL; in cleanup_device_data()
1380 if (dd->pageshadow) { in cleanup_device_data()
1381 struct page **tmpp = dd->pageshadow; in cleanup_device_data()
1382 dma_addr_t *tmpd = dd->physshadow; in cleanup_device_data()
1385 for (ctxt = 0; ctxt < dd->cfgctxts; ctxt++) { in cleanup_device_data()
1386 int ctxt_tidbase = ctxt * dd->rcvtidcnt; in cleanup_device_data()
1387 int maxtid = ctxt_tidbase + dd->rcvtidcnt; in cleanup_device_data()
1392 pci_unmap_page(dd->pcidev, tmpd[i], in cleanup_device_data()
1399 dd->pageshadow = NULL; in cleanup_device_data()
1401 dd->physshadow = NULL; in cleanup_device_data()
1412 spin_lock_irqsave(&dd->uctxt_lock, flags); in cleanup_device_data()
1413 tmp = dd->rcd; in cleanup_device_data()
1414 dd->rcd = NULL; in cleanup_device_data()
1415 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in cleanup_device_data()
1416 for (ctxt = 0; tmp && ctxt < dd->ctxtcnt; ctxt++) { in cleanup_device_data()
1420 qib_free_ctxtdata(dd, rcd); in cleanup_device_data()
1423 kfree(dd->boardname); in cleanup_device_data()
1424 qib_cq_exit(dd); in cleanup_device_data()
1431 static void qib_postinit_cleanup(struct qib_devdata *dd) in qib_postinit_cleanup() argument
1440 if (dd->f_cleanup) in qib_postinit_cleanup()
1441 dd->f_cleanup(dd); in qib_postinit_cleanup()
1443 qib_pcie_ddcleanup(dd); in qib_postinit_cleanup()
1445 cleanup_device_data(dd); in qib_postinit_cleanup()
1447 qib_free_devdata(dd); in qib_postinit_cleanup()
1453 struct qib_devdata *dd = NULL; in qib_init_one() local
1466 dd = qib_init_iba6120_funcs(pdev, ent); in qib_init_one()
1471 dd = ERR_PTR(-ENODEV); in qib_init_one()
1476 dd = qib_init_iba7220_funcs(pdev, ent); in qib_init_one()
1480 dd = qib_init_iba7322_funcs(pdev, ent); in qib_init_one()
1490 if (IS_ERR(dd)) in qib_init_one()
1491 ret = PTR_ERR(dd); in qib_init_one()
1495 ret = qib_create_workqueues(dd); in qib_init_one()
1500 initfail = qib_init(dd, 0); in qib_init_one()
1502 ret = qib_register_ib_device(dd); in qib_init_one()
1511 dd->flags |= QIB_INITTED; in qib_init_one()
1513 j = qib_device_create(dd); in qib_init_one()
1515 qib_dev_err(dd, "Failed to create /dev devices: %d\n", -j); in qib_init_one()
1516 j = qibfs_add(dd); in qib_init_one()
1518 qib_dev_err(dd, "Failed filesystem setup for counters: %d\n", in qib_init_one()
1522 qib_stop_timers(dd); in qib_init_one()
1524 for (pidx = 0; pidx < dd->num_pports; ++pidx) in qib_init_one()
1525 dd->f_quiet_serdes(dd->pport + pidx); in qib_init_one()
1529 (void) qibfs_remove(dd); in qib_init_one()
1530 qib_device_remove(dd); in qib_init_one()
1533 qib_unregister_ib_device(dd); in qib_init_one()
1534 qib_postinit_cleanup(dd); in qib_init_one()
1540 ret = qib_enable_wc(dd); in qib_init_one()
1542 qib_dev_err(dd, in qib_init_one()
1548 qib_verify_pioperf(dd); in qib_init_one()
1555 struct qib_devdata *dd = pci_get_drvdata(pdev); in qib_remove_one() local
1559 qib_unregister_ib_device(dd); in qib_remove_one()
1566 qib_shutdown_device(dd); in qib_remove_one()
1568 qib_stop_timers(dd); in qib_remove_one()
1573 ret = qibfs_remove(dd); in qib_remove_one()
1575 qib_dev_err(dd, "Failed counters filesystem cleanup: %d\n", in qib_remove_one()
1578 qib_device_remove(dd); in qib_remove_one()
1580 qib_postinit_cleanup(dd); in qib_remove_one()
1592 int qib_create_rcvhdrq(struct qib_devdata *dd, struct qib_ctxtdata *rcd) in qib_create_rcvhdrq() argument
1601 amt = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize * in qib_create_rcvhdrq()
1603 gfp_flags = (rcd->ctxt >= dd->first_user_ctxt) ? in qib_create_rcvhdrq()
1606 old_node_id = dev_to_node(&dd->pcidev->dev); in qib_create_rcvhdrq()
1607 set_dev_node(&dd->pcidev->dev, rcd->node_id); in qib_create_rcvhdrq()
1609 &dd->pcidev->dev, amt, &rcd->rcvhdrq_phys, in qib_create_rcvhdrq()
1611 set_dev_node(&dd->pcidev->dev, old_node_id); in qib_create_rcvhdrq()
1614 qib_dev_err(dd, in qib_create_rcvhdrq()
1620 if (rcd->ctxt >= dd->first_user_ctxt) { in qib_create_rcvhdrq()
1626 if (!(dd->flags & QIB_NODMA_RTAIL)) { in qib_create_rcvhdrq()
1627 set_dev_node(&dd->pcidev->dev, rcd->node_id); in qib_create_rcvhdrq()
1629 &dd->pcidev->dev, PAGE_SIZE, &phys_hdrqtail, in qib_create_rcvhdrq()
1631 set_dev_node(&dd->pcidev->dev, old_node_id); in qib_create_rcvhdrq()
1647 qib_dev_err(dd, in qib_create_rcvhdrq()
1653 dma_free_coherent(&dd->pcidev->dev, amt, rcd->rcvhdrq, in qib_create_rcvhdrq()
1671 struct qib_devdata *dd = rcd->dd; in qib_setup_eagerbufs() local
1687 egrsize = dd->rcvegrbufsize; in qib_setup_eagerbufs()
1710 old_node_id = dev_to_node(&dd->pcidev->dev); in qib_setup_eagerbufs()
1711 set_dev_node(&dd->pcidev->dev, rcd->node_id); in qib_setup_eagerbufs()
1713 dma_alloc_coherent(&dd->pcidev->dev, size, in qib_setup_eagerbufs()
1716 set_dev_node(&dd->pcidev->dev, old_node_id); in qib_setup_eagerbufs()
1731 dd->f_put_tid(dd, e + egroff + in qib_setup_eagerbufs()
1734 dd->kregbase + in qib_setup_eagerbufs()
1735 dd->rcvegrbase), in qib_setup_eagerbufs()
1746 dma_free_coherent(&dd->pcidev->dev, size, in qib_setup_eagerbufs()
1763 int init_chip_wc_pat(struct qib_devdata *dd, u32 vl15buflen) in init_chip_wc_pat() argument
1769 u64 qib_pio2koffset = dd->piobufbase & 0xffffffff; in init_chip_wc_pat()
1770 u64 qib_pio4koffset = dd->piobufbase >> 32; in init_chip_wc_pat()
1771 u64 qib_pio2klen = dd->piobcnt2k * dd->palign; in init_chip_wc_pat()
1772 u64 qib_pio4klen = dd->piobcnt4k * dd->align4k; in init_chip_wc_pat()
1773 u64 qib_physaddr = dd->physaddr; in init_chip_wc_pat()
1782 iounmap(dd->kregbase); in init_chip_wc_pat()
1783 dd->kregbase = NULL; in init_chip_wc_pat()
1794 if (dd->piobcnt4k == 0) { in init_chip_wc_pat()
1806 if (dd->uregbase > qib_kreglen) in init_chip_wc_pat()
1807 qib_userlen = dd->ureg_align * dd->cfgctxts; in init_chip_wc_pat()
1819 qib_userbase = ioremap_nocache(qib_physaddr + dd->uregbase, in init_chip_wc_pat()
1825 dd->kregbase = qib_kregbase; in init_chip_wc_pat()
1826 dd->kregend = (u64 __iomem *) in init_chip_wc_pat()
1828 dd->piobase = qib_piobase; in init_chip_wc_pat()
1829 dd->pio2kbase = (void __iomem *) in init_chip_wc_pat()
1830 (((char __iomem *) dd->piobase) + in init_chip_wc_pat()
1832 if (dd->piobcnt4k) in init_chip_wc_pat()
1833 dd->pio4kbase = (void __iomem *) in init_chip_wc_pat()
1834 (((char __iomem *) dd->piobase) + in init_chip_wc_pat()
1838 dd->userbase = qib_userbase; in init_chip_wc_pat()