Lines Matching refs:dd
104 struct qib_devdata *dd = rcd->dd; in qib_get_base_info() local
135 ret = dd->f_get_base_info(rcd, kinfo); in qib_get_base_info()
139 kinfo->spi_rcvhdr_cnt = dd->rcvhdrcnt; in qib_get_base_info()
140 kinfo->spi_rcvhdrent_size = dd->rcvhdrentsize; in qib_get_base_info()
142 kinfo->spi_rcv_egrbufsize = dd->rcvegrbufsize; in qib_get_base_info()
151 kinfo->spi_tidcnt = dd->rcvtidcnt / subctxt_cnt; in qib_get_base_info()
153 kinfo->spi_tidcnt += dd->rcvtidcnt % subctxt_cnt; in qib_get_base_info()
158 kinfo->spi_nctxts = dd->cfgctxts; in qib_get_base_info()
160 kinfo->spi_unit = dd->unit; in qib_get_base_info()
186 kinfo->spi_rhf_offset = dd->rhf_offset; in qib_get_base_info()
188 kinfo->spi_pioavailaddr = (u64) dd->pioavailregs_phys; in qib_get_base_info()
192 (char *) dd->pioavailregs_dma; in qib_get_base_info()
193 kinfo->spi_uregbase = (u64) dd->uregbase + dd->ureg_align * rcd->ctxt; in qib_get_base_info()
203 dd->palign * in qib_get_base_info()
210 dd->palign * kinfo->spi_piocnt * slave; in qib_get_base_info()
231 kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->pio2k_bufbase) / in qib_get_base_info()
232 dd->palign; in qib_get_base_info()
233 kinfo->spi_pioalign = dd->palign; in qib_get_base_info()
240 kinfo->spi_piosize = dd->piosize2k - 2 * sizeof(u32); in qib_get_base_info()
246 kinfo->spi_hw_version = dd->revision; in qib_get_base_info()
290 struct qib_devdata *dd = rcd->dd; in qib_tid_update() local
298 if (!dd->pageshadow) { in qib_tid_update()
308 ctxttid = rcd->ctxt * dd->rcvtidcnt; in qib_tid_update()
310 tidcnt = dd->rcvtidcnt; in qib_tid_update()
314 tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) + in qib_tid_update()
315 (dd->rcvtidcnt % rcd->subctxt_cnt); in qib_tid_update()
316 tidoff = dd->rcvtidcnt - tidcnt; in qib_tid_update()
320 tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt; in qib_tid_update()
327 qib_devinfo(dd->pcidev, in qib_tid_update()
333 tidlist = (u16 *) &pagep[dd->rcvtidcnt]; in qib_tid_update()
340 tidbase = (u64 __iomem *) (((char __iomem *) dd->kregbase) + in qib_tid_update()
341 dd->rcvtidbase + in qib_tid_update()
361 dd->pcidev, in qib_tid_update()
370 if (!dd->pageshadow[ctxttid + tid]) in qib_tid_update()
385 dd->pageshadow[ctxttid + tid] = pagep[i]; in qib_tid_update()
386 dd->physshadow[ctxttid + tid] = in qib_tid_update()
387 qib_map_page(dd->pcidev, pagep[i], 0, PAGE_SIZE, in qib_tid_update()
393 physaddr = dd->physshadow[ctxttid + tid]; in qib_tid_update()
395 dd->f_put_tid(dd, &tidbase[tid], in qib_tid_update()
417 if (dd->pageshadow[ctxttid + tid]) { in qib_tid_update()
420 phys = dd->physshadow[ctxttid + tid]; in qib_tid_update()
421 dd->physshadow[ctxttid + tid] = dd->tidinvalid; in qib_tid_update()
425 dd->f_put_tid(dd, &tidbase[tid], in qib_tid_update()
427 dd->tidinvalid); in qib_tid_update()
428 pci_unmap_page(dd->pcidev, phys, PAGE_SIZE, in qib_tid_update()
430 dd->pageshadow[ctxttid + tid] = NULL; in qib_tid_update()
484 struct qib_devdata *dd = rcd->dd; in qib_tid_free() local
488 if (!dd->pageshadow) { in qib_tid_free()
499 ctxttid = rcd->ctxt * dd->rcvtidcnt; in qib_tid_free()
501 tidcnt = dd->rcvtidcnt; in qib_tid_free()
503 tidcnt = (dd->rcvtidcnt / rcd->subctxt_cnt) + in qib_tid_free()
504 (dd->rcvtidcnt % rcd->subctxt_cnt); in qib_tid_free()
505 ctxttid += dd->rcvtidcnt - tidcnt; in qib_tid_free()
507 tidcnt = dd->rcvtidcnt / rcd->subctxt_cnt; in qib_tid_free()
510 tidbase = (u64 __iomem *) ((char __iomem *)(dd->kregbase) + in qib_tid_free()
511 dd->rcvtidbase + in qib_tid_free()
530 if (dd->pageshadow[ctxttid + tid]) { in qib_tid_free()
534 p = dd->pageshadow[ctxttid + tid]; in qib_tid_free()
535 dd->pageshadow[ctxttid + tid] = NULL; in qib_tid_free()
536 phys = dd->physshadow[ctxttid + tid]; in qib_tid_free()
537 dd->physshadow[ctxttid + tid] = dd->tidinvalid; in qib_tid_free()
541 dd->f_put_tid(dd, &tidbase[tid], in qib_tid_free()
542 RCVHQ_RCV_TYPE_EXPECTED, dd->tidinvalid); in qib_tid_free()
543 pci_unmap_page(dd->pcidev, phys, PAGE_SIZE, in qib_tid_free()
645 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0); in qib_set_part_key()
669 struct qib_devdata *dd = rcd->dd; in qib_manage_rcvq() local
689 dd->f_rcvctrl(rcd->ppd, rcvctrl_op, rcd->ctxt); in qib_manage_rcvq()
696 struct qib_devdata *dd) in qib_clean_part_key() argument
725 (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0); in qib_clean_part_key()
732 struct qib_devdata *dd = rcd->dd; in qib_mmap_mem() local
737 qib_devinfo(dd->pcidev, in qib_mmap_mem()
750 qib_devinfo(dd->pcidev, in qib_mmap_mem()
764 qib_devinfo(dd->pcidev, in qib_mmap_mem()
771 static int mmap_ureg(struct vm_area_struct *vma, struct qib_devdata *dd, in mmap_ureg() argument
783 sz = dd->flags & QIB_HAS_HDRSUPP ? 2 * PAGE_SIZE : PAGE_SIZE; in mmap_ureg()
785 qib_devinfo(dd->pcidev, in mmap_ureg()
790 phys = dd->physaddr + ureg; in mmap_ureg()
803 struct qib_devdata *dd, in mmap_piobufs() argument
816 if ((vma->vm_end - vma->vm_start) > (piocnt * dd->palign)) { in mmap_piobufs()
817 qib_devinfo(dd->pcidev, in mmap_piobufs()
824 phys = dd->physaddr + piobufs; in mmap_piobufs()
841 if (!dd->wc_cookie) in mmap_piobufs()
854 struct qib_devdata *dd = rcd->dd; in mmap_rcvegrbufs() local
863 qib_devinfo(dd->pcidev, in mmap_rcvegrbufs()
872 qib_devinfo(dd->pcidev, in mmap_rcvegrbufs()
920 struct qib_devdata *dd = rcd->dd; in mmap_kvaddr() local
961 qib_devinfo(dd->pcidev, in mmap_kvaddr()
1002 struct qib_devdata *dd; in qib_mmapf() local
1012 dd = rcd->dd; in qib_mmapf()
1046 ureg = dd->uregbase + dd->ureg_align * rcd->ctxt; in qib_mmapf()
1056 dd->palign * (rcd->piocnt - piocnt); in qib_mmapf()
1062 piobufs = rcd->piobufs + dd->palign * piocnt * slave; in qib_mmapf()
1066 ret = mmap_ureg(vma, dd, ureg); in qib_mmapf()
1068 ret = mmap_piobufs(vma, dd, rcd, piobufs, piocnt); in qib_mmapf()
1069 else if (pgaddr == dd->pioavailregs_phys) in qib_mmapf()
1072 (void *) dd->pioavailregs_dma, 0, in qib_mmapf()
1097 qib_devinfo(dd->pcidev, in qib_mmapf()
1109 struct qib_devdata *dd = rcd->dd; in qib_poll_urgent() local
1114 spin_lock_irq(&dd->uctxt_lock); in qib_poll_urgent()
1122 spin_unlock_irq(&dd->uctxt_lock); in qib_poll_urgent()
1131 struct qib_devdata *dd = rcd->dd; in qib_poll_next() local
1136 spin_lock_irq(&dd->uctxt_lock); in qib_poll_next()
1137 if (dd->f_hdrqempty(rcd)) { in qib_poll_next()
1139 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_INTRAVAIL_ENB, rcd->ctxt); in qib_poll_next()
1143 spin_unlock_irq(&dd->uctxt_lock); in qib_poll_next()
1166 static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd) in assign_ctxt_affinity() argument
1170 const struct cpumask *local_mask = cpumask_of_pcibus(dd->pcidev->bus); in assign_ctxt_affinity()
1200 qib_dev_err(dd, in assign_ctxt_affinity()
1239 static int init_subctxts(struct qib_devdata *dd, in init_subctxts() argument
1258 qib_devinfo(dd->pcidev, in init_subctxts()
1276 size = ALIGN(dd->rcvhdrcnt * dd->rcvhdrentsize * in init_subctxts()
1312 struct qib_devdata *dd = ppd->dd; in setup_ctxt() local
1318 assign_ctxt_affinity(fp, dd); in setup_ctxt()
1322 numa_node_id()) : dd->assigned_node_id; in setup_ctxt()
1331 ptmp = kmalloc(dd->rcvtidcnt * sizeof(u16) + in setup_ctxt()
1332 dd->rcvtidcnt * sizeof(struct page **), in setup_ctxt()
1336 qib_dev_err(dd, in setup_ctxt()
1342 ret = init_subctxts(dd, rcd, uinfo); in setup_ctxt()
1347 init_waitqueue_head(&dd->rcd[ctxt]->wait); in setup_ctxt()
1351 dd->freectxts--; in setup_ctxt()
1359 dd->rcd[ctxt] = NULL; in setup_ctxt()
1368 struct qib_devdata *dd = ppd->dd; in usable() local
1370 return dd && (dd->flags & QIB_PRESENT) && dd->kregbase && ppd->lid && in usable()
1378 static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port, in choose_port_ctxt() argument
1385 if (!usable(dd->pport + port - 1)) { in choose_port_ctxt()
1389 ppd = dd->pport + port - 1; in choose_port_ctxt()
1391 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts && dd->rcd[ctxt]; in choose_port_ctxt()
1394 if (ctxt == dd->cfgctxts) { in choose_port_ctxt()
1399 u32 pidx = ctxt % dd->num_pports; in choose_port_ctxt()
1401 if (usable(dd->pport + pidx)) in choose_port_ctxt()
1402 ppd = dd->pport + pidx; in choose_port_ctxt()
1404 for (pidx = 0; pidx < dd->num_pports && !ppd; in choose_port_ctxt()
1406 if (usable(dd->pport + pidx)) in choose_port_ctxt()
1407 ppd = dd->pport + pidx; in choose_port_ctxt()
1418 struct qib_devdata *dd = qib_lookup(unit); in find_free_ctxt() local
1421 if (!dd || (uinfo->spu_port && uinfo->spu_port > dd->num_pports)) in find_free_ctxt()
1424 ret = choose_port_ctxt(fp, dd, uinfo->spu_port, uinfo); in find_free_ctxt()
1451 struct qib_devdata *dd = qib_lookup(ndev); in get_a_ctxt() local
1454 if (!dd) in get_a_ctxt()
1456 if (port && port <= dd->num_pports && in get_a_ctxt()
1457 usable(dd->pport + port - 1)) in get_a_ctxt()
1460 for (i = 0; i < dd->num_pports; i++) in get_a_ctxt()
1461 if (usable(dd->pport + i)) in get_a_ctxt()
1465 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; in get_a_ctxt()
1467 if (dd->rcd[ctxt]) in get_a_ctxt()
1472 udd = dd; in get_a_ctxt()
1482 struct qib_devdata *dd = qib_lookup(ndev); in get_a_ctxt() local
1484 if (dd) { in get_a_ctxt()
1485 ret = choose_port_ctxt(fp, dd, port, uinfo); in get_a_ctxt()
1508 struct qib_devdata *dd = qib_lookup(ndev); in find_shared_ctxt() local
1511 if (!(dd && (dd->flags & QIB_PRESENT) && dd->kregbase)) in find_shared_ctxt()
1513 for (i = dd->first_user_ctxt; i < dd->cfgctxts; i++) { in find_shared_ctxt()
1514 struct qib_ctxtdata *rcd = dd->rcd[i]; in find_shared_ctxt()
1568 struct qib_devdata *dd = qib_lookup(ndev); in find_hca() local
1570 if (dd) { in find_hca()
1571 if (pcibus_to_node(dd->pcidev->bus) < 0) { in find_hca()
1576 pcibus_to_node(dd->pcidev->bus)) { in find_hca()
1590 struct qib_devdata *dd = rcd->dd; in do_qib_user_sdma_queue_create() local
1592 if (dd->flags & QIB_HAS_SEND_DMA) { in do_qib_user_sdma_queue_create()
1594 fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev, in do_qib_user_sdma_queue_create()
1595 dd->unit, in do_qib_user_sdma_queue_create()
1640 assign_ctxt_affinity(fp, (ctxt_fp(fp))->dd); in qib_assign_ctxt()
1679 struct qib_devdata *dd; in qib_do_user_init() local
1689 dd = rcd->dd; in qib_do_user_init()
1692 uctxt = rcd->ctxt - dd->first_user_ctxt; in qib_do_user_init()
1693 if (uctxt < dd->ctxts_extrabuf) { in qib_do_user_init()
1694 rcd->piocnt = dd->pbufsctxt + 1; in qib_do_user_init()
1697 rcd->piocnt = dd->pbufsctxt; in qib_do_user_init()
1699 dd->ctxts_extrabuf; in qib_do_user_init()
1708 if ((rcd->pio_base + rcd->piocnt) > dd->piobcnt2k) { in qib_do_user_init()
1709 if (rcd->pio_base >= dd->piobcnt2k) { in qib_do_user_init()
1710 qib_dev_err(dd, in qib_do_user_init()
1712 dd->unit, rcd->ctxt); in qib_do_user_init()
1716 rcd->piocnt = dd->piobcnt2k - rcd->pio_base; in qib_do_user_init()
1717 qib_dev_err(dd, "Ctxt%u: would use 4KB bufs, using %u\n", in qib_do_user_init()
1721 rcd->piobufs = dd->pio2k_bufbase + rcd->pio_base * dd->palign; in qib_do_user_init()
1722 qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt, in qib_do_user_init()
1734 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP); in qib_do_user_init()
1742 ret = qib_create_rcvhdrq(dd, rcd); in qib_do_user_init()
1768 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_ENB | QIB_RCVCTRL_TIDFLOW_ENB, in qib_do_user_init()
1779 qib_chg_pioavailkernel(dd, rcd->pio_base, rcd->piocnt, in qib_do_user_init()
1794 struct qib_devdata *dd = rcd->dd; in unlock_expected_tids() local
1795 int ctxt_tidbase = rcd->ctxt * dd->rcvtidcnt; in unlock_expected_tids()
1796 int i, cnt = 0, maxtid = ctxt_tidbase + dd->rcvtidcnt; in unlock_expected_tids()
1799 struct page *p = dd->pageshadow[i]; in unlock_expected_tids()
1805 phys = dd->physshadow[i]; in unlock_expected_tids()
1806 dd->physshadow[i] = dd->tidinvalid; in unlock_expected_tids()
1807 dd->pageshadow[i] = NULL; in unlock_expected_tids()
1808 pci_unmap_page(dd->pcidev, phys, PAGE_SIZE, in unlock_expected_tids()
1820 struct qib_devdata *dd; in qib_close() local
1835 dd = rcd->dd; in qib_close()
1862 spin_lock_irqsave(&dd->uctxt_lock, flags); in qib_close()
1864 dd->rcd[ctxt] = NULL; in qib_close()
1867 spin_unlock_irqrestore(&dd->uctxt_lock, flags); in qib_close()
1879 if (dd->kregbase) { in qib_close()
1881 dd->f_rcvctrl(rcd->ppd, QIB_RCVCTRL_CTXT_DIS | in qib_close()
1885 qib_clean_part_key(rcd, dd); in qib_close()
1886 qib_disarm_piobufs(dd, rcd->pio_base, rcd->piocnt); in qib_close()
1887 qib_chg_pioavailkernel(dd, rcd->pio_base, in qib_close()
1890 dd->f_clear_tids(dd, rcd); in qib_close()
1892 if (dd->pageshadow) in qib_close()
1895 dd->freectxts++; in qib_close()
1899 qib_free_ctxtdata(dd, rcd); /* after releasing the mutex */ in qib_close()
1917 info.unit = rcd->dd->unit; in qib_ctxt_info()
1922 info.num_ctxts = rcd->dd->cfgctxts - rcd->dd->first_user_ctxt; in qib_ctxt_info()
2011 spin_lock_irqsave(&ppd->dd->uctxt_lock, flags); in qib_set_uevent_bits()
2012 for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts; in qib_set_uevent_bits()
2014 rcd = ppd->dd->rcd[ctxt]; in qib_set_uevent_bits()
2030 spin_unlock_irqrestore(&ppd->dd->uctxt_lock, flags); in qib_set_uevent_bits()
2225 qib_force_pio_avail_update(rcd->dd); in qib_write()
2233 rcd->dd->f_set_armlaunch(rcd->dd, cmd.cmd.armlaunch_ctrl); in qib_write()
2372 static void qib_user_remove(struct qib_devdata *dd) in qib_user_remove() argument
2377 qib_cdev_cleanup(&dd->user_cdev, &dd->user_device); in qib_user_remove()
2380 static int qib_user_add(struct qib_devdata *dd) in qib_user_add() argument
2392 snprintf(name, sizeof(name), "ipath%d", dd->unit); in qib_user_add()
2393 ret = qib_cdev_init(dd->unit + 1, name, &qib_file_ops, in qib_user_add()
2394 &dd->user_cdev, &dd->user_device); in qib_user_add()
2396 qib_user_remove(dd); in qib_user_add()
2404 int qib_device_create(struct qib_devdata *dd) in qib_device_create() argument
2408 r = qib_user_add(dd); in qib_device_create()
2409 ret = qib_diag_add(dd); in qib_device_create()
2419 void qib_device_remove(struct qib_devdata *dd) in qib_device_remove() argument
2421 qib_user_remove(dd); in qib_device_remove()
2422 qib_diag_remove(dd); in qib_device_remove()