pq 68 arch/powerpc/kvm/book3s_xive.c u64 pq; pq 98 arch/powerpc/kvm/book3s_xive.c pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr + pq 123 arch/powerpc/kvm/book3s_xive.c if (!(pq & XIVE_ESB_VAL_P)) pq 2070 arch/powerpc/kvm/book3s_xive.c u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET); pq 2073 arch/powerpc/kvm/book3s_xive.c (pq & XIVE_ESB_VAL_P) ? 'P' : 'p', pq 2074 arch/powerpc/kvm/book3s_xive.c (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q', pq 2075 arch/powerpc/kvm/book3s_xive.c xc->esc_virq[i], pq, xd->eoi_page); pq 854 arch/powerpc/sysdev/xive/common.c u8 pq; pq 871 arch/powerpc/sysdev/xive/common.c pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10); pq 873 arch/powerpc/sysdev/xive/common.c xd->saved_p = !!(pq & XIVE_ESB_VAL_P); pq 973 arch/powerpc/sysdev/xive/common.c u8 pq; pq 977 arch/powerpc/sysdev/xive/common.c pq = xive_esb_read(xd, XIVE_ESB_GET); pq 986 arch/powerpc/sysdev/xive/common.c *state = (pq != XIVE_ESB_INVALID) && !xd->stale_p && pq 987 arch/powerpc/sysdev/xive/common.c (xd->saved_p || !!(pq & XIVE_ESB_VAL_P)); pq 300 crypto/async_tx/async_pq.c dma_addr_t pq[2]; pq 319 crypto/async_tx/async_pq.c pq[0] = 0; pq 322 crypto/async_tx/async_pq.c pq[0] = dma_map_page(dev, P(blocks, disks), pq 325 crypto/async_tx/async_pq.c unmap->addr[j++] = pq[0]; pq 329 crypto/async_tx/async_pq.c pq[1] = 0; pq 332 crypto/async_tx/async_pq.c pq[1] = dma_map_page(dev, Q(blocks, disks), pq 335 crypto/async_tx/async_pq.c unmap->addr[j++] = pq[1]; pq 342 crypto/async_tx/async_pq.c tx = device->device_prep_dma_pq_val(chan, pq, pq 34 crypto/async_tx/async_raid6_recov.c dma_addr_t pq[2]; pq 47 crypto/async_tx/async_raid6_recov.c pq[1] = unmap->addr[2]; pq 50 crypto/async_tx/async_raid6_recov.c tx = dma->device_prep_dma_pq(chan, pq, unmap->addr, 2, coef, pq 619 drivers/atm/firestream.c static struct FS_QENTRY pq[60]; pq 640 drivers/atm/firestream.c pq[qp].cmd = cmd; pq 641 drivers/atm/firestream.c pq[qp].p0 = p1; pq 642 drivers/atm/firestream.c pq[qp].p1 = p2; pq 643 drivers/atm/firestream.c pq[qp].p2 = p3; pq 1940 drivers/atm/firestream.c i, pq[qp].cmd, pq[qp].p0, pq[qp].p1, pq[qp].p2); pq 410 drivers/dma/fsl_raid.c struct fsl_re_pq_cdb *pq; pq 472 drivers/dma/fsl_raid.c pq = desc->cdb_addr; pq 473 drivers/dma/fsl_raid.c pq->cdb32 = cdb; pq 475 drivers/dma/fsl_raid.c p = pq->gfm_q1; pq 496 drivers/dma/ioat/dma.c struct ioat_pq_descriptor *pq = desc->pq; pq 498 drivers/dma/ioat/dma.c if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3) pq 551 drivers/dma/ioat/dma.c struct ioat_pq_descriptor *pq = desc->pq; pq 554 drivers/dma/ioat/dma.c if (!pq->dwbes_f.wbes) pq 559 drivers/dma/ioat/dma.c if (pq->dwbes_f.p_val_err) pq 562 drivers/dma/ioat/dma.c if (pq->dwbes_f.q_val_err) pq 178 drivers/dma/ioat/dma.h struct ioat_pq_descriptor *pq; pq 374 drivers/dma/ioat/dma.h ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, pq 56 drivers/dma/ioat/prep.c struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *) descs[0]; pq 60 drivers/dma/ioat/prep.c pq->coef[idx] = coef; pq 66 drivers/dma/ioat/prep.c struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0]; pq 74 drivers/dma/ioat/prep.c pq->coef[idx] = coef; pq 280 drivers/dma/ioat/prep.c struct ioat_pq_descriptor *pq = desc->pq; pq 282 drivers/dma/ioat/prep.c struct ioat_raw_descriptor *descs[] = { (void *) pq, (void *) pq_ex }; pq 283 drivers/dma/ioat/prep.c int src_cnt = src_cnt_to_sw(pq->ctl_f.src_cnt); pq 290 drivers/dma/ioat/prep.c (unsigned long long) (pq_ex ? pq_ex->next : pq->next), pq 291 drivers/dma/ioat/prep.c desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq 292 drivers/dma/ioat/prep.c pq->ctl_f.int_en, pq->ctl_f.compl_write, pq 293 drivers/dma/ioat/prep.c pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q", pq 294 drivers/dma/ioat/prep.c pq->ctl_f.src_cnt); pq 297 drivers/dma/ioat/prep.c (unsigned long long) pq_get_src(descs, i), pq->coef[i]); pq 298 drivers/dma/ioat/prep.c dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); pq 299 drivers/dma/ioat/prep.c dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); pq 300 drivers/dma/ioat/prep.c dev_dbg(dev, "\tNEXT: %#llx\n", pq->next); pq 307 drivers/dma/ioat/prep.c struct ioat_pq_descriptor *pq = desc->pq; pq 308 drivers/dma/ioat/prep.c struct ioat_raw_descriptor *descs[] = { (void *)pq, pq 309 drivers/dma/ioat/prep.c (void *)pq, pq 310 drivers/dma/ioat/prep.c (void *)pq }; pq 311 drivers/dma/ioat/prep.c int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt); pq 323 drivers/dma/ioat/prep.c (unsigned long long) pq->next, pq 324 drivers/dma/ioat/prep.c desc->txd.flags, pq->size, pq->ctl, pq 325 drivers/dma/ioat/prep.c pq->ctl_f.op, pq->ctl_f.int_en, pq 326 drivers/dma/ioat/prep.c pq->ctl_f.compl_write, pq 327 drivers/dma/ioat/prep.c pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q", pq 328 drivers/dma/ioat/prep.c pq->ctl_f.src_cnt); pq 332 drivers/dma/ioat/prep.c pq->coef[i]); pq 334 drivers/dma/ioat/prep.c dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); pq 335 drivers/dma/ioat/prep.c dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); pq 350 drivers/dma/ioat/prep.c struct ioat_pq_descriptor *pq; pq 393 drivers/dma/ioat/prep.c pq = desc->pq; pq 402 drivers/dma/ioat/prep.c descs[0] = (struct ioat_raw_descriptor *) pq; pq 416 drivers/dma/ioat/prep.c pq->size = xfer_size; pq 417 drivers/dma/ioat/prep.c pq->p_addr = dst[0] + offset; pq 418 drivers/dma/ioat/prep.c pq->q_addr = dst[1] + offset; pq 419 drivers/dma/ioat/prep.c pq->ctl = 0; pq 420 drivers/dma/ioat/prep.c pq->ctl_f.op = op; pq 423 drivers/dma/ioat/prep.c pq->ctl_f.wb_en = result ? 1 : 0; pq 424 drivers/dma/ioat/prep.c pq->ctl_f.src_cnt = src_cnt_to_hw(s); pq 425 drivers/dma/ioat/prep.c pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); pq 426 drivers/dma/ioat/prep.c pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); pq 437 drivers/dma/ioat/prep.c pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); pq 441 drivers/dma/ioat/prep.c pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); pq 442 drivers/dma/ioat/prep.c pq->ctl_f.compl_write = 1; pq 472 drivers/dma/ioat/prep.c struct ioat_pq_descriptor *pq; pq 501 drivers/dma/ioat/prep.c pq = desc->pq; pq 503 drivers/dma/ioat/prep.c descs[0] = (struct ioat_raw_descriptor *) pq; pq 512 drivers/dma/ioat/prep.c pq->sed_addr = desc->sed->dma; pq 530 drivers/dma/ioat/prep.c pq->size = xfer_size; pq 531 drivers/dma/ioat/prep.c pq->p_addr = dst[0] + offset; pq 532 drivers/dma/ioat/prep.c pq->q_addr = dst[1] + offset; pq 533 drivers/dma/ioat/prep.c pq->ctl = 0; pq 534 drivers/dma/ioat/prep.c pq->ctl_f.op = op; pq 535 drivers/dma/ioat/prep.c pq->ctl_f.src_cnt = src16_cnt_to_hw(s); pq 538 drivers/dma/ioat/prep.c pq->ctl_f.wb_en = result ? 1 : 0; pq 539 drivers/dma/ioat/prep.c pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); pq 540 drivers/dma/ioat/prep.c pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); pq 551 drivers/dma/ioat/prep.c pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); pq 554 drivers/dma/ioat/prep.c pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); pq 555 drivers/dma/ioat/prep.c pq->ctl_f.compl_write = 1; pq 619 drivers/dma/ioat/prep.c ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, pq 630 drivers/dma/ioat/prep.c pq[0] = pq[1]; pq 632 drivers/dma/ioat/prep.c pq[1] = pq[0]; pq 640 drivers/dma/ioat/prep.c __ioat_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len, pq 642 drivers/dma/ioat/prep.c __ioat_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, pq 651 drivers/dma/ioat/prep.c dma_addr_t pq[2]; pq 661 drivers/dma/ioat/prep.c pq[0] = dst; pq 663 drivers/dma/ioat/prep.c pq[1] = dst; /* specify valid address for disabled result */ pq 666 drivers/dma/ioat/prep.c __ioat_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len, pq 668 drivers/dma/ioat/prep.c __ioat_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, pq 678 drivers/dma/ioat/prep.c dma_addr_t pq[2]; pq 693 drivers/dma/ioat/prep.c pq[0] = src[0]; pq 695 drivers/dma/ioat/prep.c pq[1] = pq[0]; /* specify valid address for disabled result */ pq 698 drivers/dma/ioat/prep.c __ioat_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1, pq 700 drivers/dma/ioat/prep.c __ioat_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, pq 671 drivers/dma/iop-adma.c iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, pq 1083 drivers/dma/iop-adma.c struct page *pq[IOP_ADMA_NUM_SRC_TEST+2+2]; pq 1085 drivers/dma/iop-adma.c struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2]; pq 1101 drivers/dma/iop-adma.c for (i = 0; i < ARRAY_SIZE(pq); i++) { pq 1102 drivers/dma/iop-adma.c pq[i] = alloc_page(GFP_KERNEL); pq 1103 drivers/dma/iop-adma.c if (!pq[i]) { pq 1105 drivers/dma/iop-adma.c __free_page(pq[i]); pq 1112 drivers/dma/iop-adma.c pq_sw[i] = page_address(pq[i]); pq 1115 drivers/dma/iop-adma.c pq_sw[i] = page_address(pq[i]); pq 1116 drivers/dma/iop-adma.c pq_sw[i+1] = page_address(pq[i+1]); pq 1136 drivers/dma/iop-adma.c pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE, pq 1173 drivers/dma/iop-adma.c pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE, pq 1205 drivers/dma/iop-adma.c pq_src[i] = dma_map_page(dev, pq[i], 0, PAGE_SIZE, pq 1235 drivers/dma/iop-adma.c i = ARRAY_SIZE(pq); pq 1237 drivers/dma/iop-adma.c __free_page(pq[i]); pq 2586 drivers/dma/ppc4xx/adma.c struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, pq 2600 drivers/dma/ppc4xx/adma.c pdest = pq[0]; pq 2605 drivers/dma/ppc4xx/adma.c qdest = pq[1]; pq 2769 drivers/dma/ppc4xx/adma.c dma_addr_t pq[2]; pq 2772 drivers/dma/ppc4xx/adma.c pq[0] = src[0]; pq 2773 drivers/dma/ppc4xx/adma.c pq[1] = 0; pq 2776 drivers/dma/ppc4xx/adma.c tx = ppc440spe_adma_prep_dma_pqzero_sum(chan, pq, &src[1], pq 81 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq); pq 87 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c kq->pq_kernel_addr = kq->pq->cpu_ptr; pq 88 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c kq->pq_gpu_addr = kq->pq->gpu_addr; pq 178 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c kfd_gtt_sa_free(dev, kq->pq); pq 204 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c kfd_gtt_sa_free(kq->dev, kq->pq); pq 88 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h struct kfd_mem_obj *pq; pq 305 drivers/infiniband/hw/hfi1/file_ops.c struct hfi1_user_sdma_pkt_q *pq; pq 312 drivers/infiniband/hw/hfi1/file_ops.c pq = srcu_dereference(fd->pq, &fd->pq_srcu); pq 313 drivers/infiniband/hw/hfi1/file_ops.c if (!cq || !pq) { pq 325 drivers/infiniband/hw/hfi1/file_ops.c if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) { pq 1447 drivers/infiniband/hw/hfi1/hfi.h struct hfi1_user_sdma_pkt_q __rcu *pq; pq 81 drivers/infiniband/hw/hfi1/user_sdma.c static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq); pq 94 drivers/infiniband/hw/hfi1/user_sdma.c static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq, pq 131 drivers/infiniband/hw/hfi1/user_sdma.c struct hfi1_user_sdma_pkt_q *pq = pq 142 drivers/infiniband/hw/hfi1/user_sdma.c xchg(&pq->state, SDMA_PKT_Q_DEFERRED); pq 143 drivers/infiniband/hw/hfi1/user_sdma.c if (list_empty(&pq->busy.list)) { pq 144 drivers/infiniband/hw/hfi1/user_sdma.c pq->busy.lock = &sde->waitlock; pq 145 drivers/infiniband/hw/hfi1/user_sdma.c iowait_get_priority(&pq->busy); pq 146 drivers/infiniband/hw/hfi1/user_sdma.c iowait_queue(pkts_sent, &pq->busy, &sde->dmawait); pq 157 drivers/infiniband/hw/hfi1/user_sdma.c struct hfi1_user_sdma_pkt_q *pq = pq 159 drivers/infiniband/hw/hfi1/user_sdma.c pq->busy.lock = NULL; pq 160 drivers/infiniband/hw/hfi1/user_sdma.c xchg(&pq->state, SDMA_PKT_Q_ACTIVE); pq 171 drivers/infiniband/hw/hfi1/user_sdma.c struct hfi1_user_sdma_pkt_q *pq; pq 181 drivers/infiniband/hw/hfi1/user_sdma.c pq = kzalloc(sizeof(*pq), GFP_KERNEL); pq 182 drivers/infiniband/hw/hfi1/user_sdma.c if (!pq) pq 184 drivers/infiniband/hw/hfi1/user_sdma.c pq->dd = dd; pq 185 drivers/infiniband/hw/hfi1/user_sdma.c pq->ctxt = uctxt->ctxt; pq 186 drivers/infiniband/hw/hfi1/user_sdma.c pq->subctxt = fd->subctxt; pq 187 drivers/infiniband/hw/hfi1/user_sdma.c pq->n_max_reqs = hfi1_sdma_comp_ring_size; pq 188 drivers/infiniband/hw/hfi1/user_sdma.c atomic_set(&pq->n_reqs, 0); pq 189 drivers/infiniband/hw/hfi1/user_sdma.c init_waitqueue_head(&pq->wait); pq 190 drivers/infiniband/hw/hfi1/user_sdma.c atomic_set(&pq->n_locked, 0); pq 191 drivers/infiniband/hw/hfi1/user_sdma.c pq->mm = fd->mm; pq 193 drivers/infiniband/hw/hfi1/user_sdma.c iowait_init(&pq->busy, 0, NULL, NULL, defer_packet_queue, pq 195 drivers/infiniband/hw/hfi1/user_sdma.c pq->reqidx = 0; pq 197 drivers/infiniband/hw/hfi1/user_sdma.c pq->reqs = kcalloc(hfi1_sdma_comp_ring_size, pq 198 drivers/infiniband/hw/hfi1/user_sdma.c sizeof(*pq->reqs), pq 200 drivers/infiniband/hw/hfi1/user_sdma.c if (!pq->reqs) pq 203 drivers/infiniband/hw/hfi1/user_sdma.c pq->req_in_use = kcalloc(BITS_TO_LONGS(hfi1_sdma_comp_ring_size), pq 204 drivers/infiniband/hw/hfi1/user_sdma.c sizeof(*pq->req_in_use), pq 206 drivers/infiniband/hw/hfi1/user_sdma.c if (!pq->req_in_use) pq 211 drivers/infiniband/hw/hfi1/user_sdma.c pq->txreq_cache = kmem_cache_create(buf, pq 216 drivers/infiniband/hw/hfi1/user_sdma.c if (!pq->txreq_cache) { pq 233 drivers/infiniband/hw/hfi1/user_sdma.c ret = hfi1_mmu_rb_register(pq, pq->mm, &sdma_rb_ops, dd->pport->hfi1_wq, pq 234 drivers/infiniband/hw/hfi1/user_sdma.c &pq->handler); pq 240 drivers/infiniband/hw/hfi1/user_sdma.c rcu_assign_pointer(fd->pq, pq); pq 250 drivers/infiniband/hw/hfi1/user_sdma.c kmem_cache_destroy(pq->txreq_cache); pq 252 drivers/infiniband/hw/hfi1/user_sdma.c kfree(pq->req_in_use); pq 254 drivers/infiniband/hw/hfi1/user_sdma.c kfree(pq->reqs); pq 256 drivers/infiniband/hw/hfi1/user_sdma.c kfree(pq); pq 261 drivers/infiniband/hw/hfi1/user_sdma.c static void flush_pq_iowait(struct hfi1_user_sdma_pkt_q *pq) pq 264 drivers/infiniband/hw/hfi1/user_sdma.c seqlock_t *lock = pq->busy.lock; pq 269 drivers/infiniband/hw/hfi1/user_sdma.c if (!list_empty(&pq->busy.list)) { pq 270 drivers/infiniband/hw/hfi1/user_sdma.c list_del_init(&pq->busy.list); pq 271 drivers/infiniband/hw/hfi1/user_sdma.c pq->busy.lock = NULL; pq 279 drivers/infiniband/hw/hfi1/user_sdma.c struct hfi1_user_sdma_pkt_q *pq; pq 284 drivers/infiniband/hw/hfi1/user_sdma.c pq = srcu_dereference_check(fd->pq, &fd->pq_srcu, pq 286 drivers/infiniband/hw/hfi1/user_sdma.c if (pq) { pq 287 drivers/infiniband/hw/hfi1/user_sdma.c rcu_assign_pointer(fd->pq, NULL); pq 291 drivers/infiniband/hw/hfi1/user_sdma.c if (pq->handler) pq 292 drivers/infiniband/hw/hfi1/user_sdma.c hfi1_mmu_rb_unregister(pq->handler); pq 293 drivers/infiniband/hw/hfi1/user_sdma.c iowait_sdma_drain(&pq->busy); pq 296 drivers/infiniband/hw/hfi1/user_sdma.c pq->wait, pq 297 drivers/infiniband/hw/hfi1/user_sdma.c !atomic_read(&pq->n_reqs)); pq 298 drivers/infiniband/hw/hfi1/user_sdma.c kfree(pq->reqs); pq 299 drivers/infiniband/hw/hfi1/user_sdma.c kfree(pq->req_in_use); pq 300 drivers/infiniband/hw/hfi1/user_sdma.c kmem_cache_destroy(pq->txreq_cache); pq 301 drivers/infiniband/hw/hfi1/user_sdma.c flush_pq_iowait(pq); pq 302 drivers/infiniband/hw/hfi1/user_sdma.c kfree(pq); pq 348 drivers/infiniband/hw/hfi1/user_sdma.c struct hfi1_user_sdma_pkt_q *pq = pq 349 drivers/infiniband/hw/hfi1/user_sdma.c srcu_dereference(fd->pq, &fd->pq_srcu); pq 351 drivers/infiniband/hw/hfi1/user_sdma.c struct hfi1_devdata *dd = pq->dd; pq 406 drivers/infiniband/hw/hfi1/user_sdma.c if (test_and_set_bit(info.comp_idx, pq->req_in_use)) { pq 417 drivers/infiniband/hw/hfi1/user_sdma.c req = pq->reqs + info.comp_idx; pq 420 drivers/infiniband/hw/hfi1/user_sdma.c req->pq = pq; pq 435 drivers/infiniband/hw/hfi1/user_sdma.c atomic_inc(&pq->n_reqs); pq 590 drivers/infiniband/hw/hfi1/user_sdma.c set_comp_state(pq, cq, info.comp_idx, QUEUED, 0); pq 591 drivers/infiniband/hw/hfi1/user_sdma.c pq->state = SDMA_PKT_Q_ACTIVE; pq 605 drivers/infiniband/hw/hfi1/user_sdma.c pq->busy.wait_dma, pq 606 drivers/infiniband/hw/hfi1/user_sdma.c pq->state == SDMA_PKT_Q_ACTIVE, pq 609 drivers/infiniband/hw/hfi1/user_sdma.c flush_pq_iowait(pq); pq 622 drivers/infiniband/hw/hfi1/user_sdma.c wait_event(pq->busy.wait_dma, pq 625 drivers/infiniband/hw/hfi1/user_sdma.c pq_update(pq); pq 626 drivers/infiniband/hw/hfi1/user_sdma.c set_comp_state(pq, cq, info.comp_idx, ERROR, ret); pq 679 drivers/infiniband/hw/hfi1/user_sdma.c trace_hfi1_sdma_user_compute_length(req->pq->dd, pq 680 drivers/infiniband/hw/hfi1/user_sdma.c req->pq->ctxt, pq 681 drivers/infiniband/hw/hfi1/user_sdma.c req->pq->subctxt, pq 707 drivers/infiniband/hw/hfi1/user_sdma.c struct hfi1_user_sdma_pkt_q *pq = req->pq; pq 730 drivers/infiniband/hw/hfi1/user_sdma.c ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq, &tx->hdr, sizeof(tx->hdr)); pq 732 drivers/infiniband/hw/hfi1/user_sdma.c sdma_txclean(pq->dd, &tx->txreq); pq 747 drivers/infiniband/hw/hfi1/user_sdma.c struct hfi1_user_sdma_pkt_q *pq = req->pq; pq 756 drivers/infiniband/hw/hfi1/user_sdma.c ret = sdma_txadd_page(pq->dd, &tx->txreq, iovec->pages[pageidx], pq 784 drivers/infiniband/hw/hfi1/user_sdma.c struct hfi1_user_sdma_pkt_q *pq = NULL; pq 787 drivers/infiniband/hw/hfi1/user_sdma.c if (!req->pq) pq 790 drivers/infiniband/hw/hfi1/user_sdma.c pq = req->pq; pq 820 drivers/infiniband/hw/hfi1/user_sdma.c tx = kmem_cache_alloc(pq->txreq_cache, GFP_KERNEL); pq 935 drivers/infiniband/hw/hfi1/user_sdma.c iowait_get_ib_work(&pq->busy), pq 951 drivers/infiniband/hw/hfi1/user_sdma.c sdma_txclean(pq->dd, &tx->txreq); pq 953 drivers/infiniband/hw/hfi1/user_sdma.c kmem_cache_free(pq->txreq_cache, tx); pq 957 drivers/infiniband/hw/hfi1/user_sdma.c static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages) pq 963 drivers/infiniband/hw/hfi1/user_sdma.c hfi1_mmu_rb_evict(pq->handler, &evict_data); pq 974 drivers/infiniband/hw/hfi1/user_sdma.c struct hfi1_user_sdma_pkt_q *pq = req->pq; pq 983 drivers/infiniband/hw/hfi1/user_sdma.c if (!hfi1_can_pin_pages(pq->dd, pq->mm, pq 984 drivers/infiniband/hw/hfi1/user_sdma.c atomic_read(&pq->n_locked), npages)) { pq 985 drivers/infiniband/hw/hfi1/user_sdma.c cleared = sdma_cache_evict(pq, npages); pq 989 drivers/infiniband/hw/hfi1/user_sdma.c pinned = hfi1_acquire_user_pages(pq->mm, pq 998 drivers/infiniband/hw/hfi1/user_sdma.c unpin_vector_pages(pq->mm, pages, node->npages, pinned); pq 1004 drivers/infiniband/hw/hfi1/user_sdma.c atomic_add(pinned, &pq->n_locked); pq 1011 drivers/infiniband/hw/hfi1/user_sdma.c unpin_vector_pages(node->pq->mm, node->pages, 0, node->npages); pq 1012 drivers/infiniband/hw/hfi1/user_sdma.c atomic_sub(node->npages, &node->pq->n_locked); pq 1020 drivers/infiniband/hw/hfi1/user_sdma.c struct hfi1_user_sdma_pkt_q *pq = req->pq; pq 1027 drivers/infiniband/hw/hfi1/user_sdma.c hfi1_mmu_rb_remove_unless_exact(pq->handler, pq 1048 drivers/infiniband/hw/hfi1/user_sdma.c node->pq = pq; pq 1067 drivers/infiniband/hw/hfi1/user_sdma.c ret = hfi1_mmu_rb_insert(req->pq->handler, &node->rb); pq 1159 drivers/infiniband/hw/hfi1/user_sdma.c struct hfi1_user_sdma_pkt_q *pq = req->pq; pq 1256 drivers/infiniband/hw/hfi1/user_sdma.c pq->dd, pq->ctxt, pq->subctxt, req->info.comp_idx, pq 1265 drivers/infiniband/hw/hfi1/user_sdma.c trace_hfi1_sdma_user_header(pq->dd, pq->ctxt, pq->subctxt, pq 1267 drivers/infiniband/hw/hfi1/user_sdma.c return sdma_txadd_kvaddr(pq->dd, &tx->txreq, hdr, sizeof(*hdr)); pq 1276 drivers/infiniband/hw/hfi1/user_sdma.c struct hfi1_user_sdma_pkt_q *pq = req->pq; pq 1375 drivers/infiniband/hw/hfi1/user_sdma.c trace_hfi1_sdma_user_header_ahg(pq->dd, pq->ctxt, pq->subctxt, pq 1402 drivers/infiniband/hw/hfi1/user_sdma.c struct hfi1_user_sdma_pkt_q *pq; pq 1410 drivers/infiniband/hw/hfi1/user_sdma.c pq = req->pq; pq 1421 drivers/infiniband/hw/hfi1/user_sdma.c kmem_cache_free(pq->txreq_cache, tx); pq 1428 drivers/infiniband/hw/hfi1/user_sdma.c set_comp_state(pq, cq, req->info.comp_idx, state, status); pq 1429 drivers/infiniband/hw/hfi1/user_sdma.c pq_update(pq); pq 1432 drivers/infiniband/hw/hfi1/user_sdma.c static inline void pq_update(struct hfi1_user_sdma_pkt_q *pq) pq 1434 drivers/infiniband/hw/hfi1/user_sdma.c if (atomic_dec_and_test(&pq->n_reqs)) pq 1435 drivers/infiniband/hw/hfi1/user_sdma.c wake_up(&pq->wait); pq 1449 drivers/infiniband/hw/hfi1/user_sdma.c sdma_txclean(req->pq->dd, t); pq 1450 drivers/infiniband/hw/hfi1/user_sdma.c kmem_cache_free(req->pq->txreq_cache, tx); pq 1463 drivers/infiniband/hw/hfi1/user_sdma.c hfi1_mmu_rb_remove(req->pq->handler, pq 1470 drivers/infiniband/hw/hfi1/user_sdma.c clear_bit(req->info.comp_idx, req->pq->req_in_use); pq 1473 drivers/infiniband/hw/hfi1/user_sdma.c static inline void set_comp_state(struct hfi1_user_sdma_pkt_q *pq, pq 1482 drivers/infiniband/hw/hfi1/user_sdma.c trace_hfi1_sdma_user_completion(pq->dd, pq->ctxt, pq->subctxt, pq 116 drivers/infiniband/hw/hfi1/user_sdma.h hfi1_cdbg(SDMA, "[%u:%u:%u:%u] " fmt, (req)->pq->dd->unit, \ pq 117 drivers/infiniband/hw/hfi1/user_sdma.h (req)->pq->ctxt, (req)->pq->subctxt, (req)->info.comp_idx, \ pq 146 drivers/infiniband/hw/hfi1/user_sdma.h struct hfi1_user_sdma_pkt_q *pq; pq 178 drivers/infiniband/hw/hfi1/user_sdma.h struct hfi1_user_sdma_pkt_q *pq ____cacheline_aligned_in_smp; pq 1105 drivers/infiniband/hw/qib/qib.h struct qib_user_sdma_queue *pq; pq 1181 drivers/infiniband/hw/qib/qib.h (((struct qib_filedata *)(fp)->private_data)->pq) pq 1570 drivers/infiniband/hw/qib/qib_file_ops.c fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev, pq 1574 drivers/infiniband/hw/qib/qib_file_ops.c if (!fd->pq) pq 1814 drivers/infiniband/hw/qib/qib_file_ops.c if (fd->pq) { pq 1815 drivers/infiniband/hw/qib/qib_file_ops.c qib_user_sdma_queue_drain(rcd->ppd, fd->pq); pq 1816 drivers/infiniband/hw/qib/qib_file_ops.c qib_user_sdma_queue_destroy(fd->pq); pq 1909 drivers/infiniband/hw/qib/qib_file_ops.c static int qib_sdma_get_inflight(struct qib_user_sdma_queue *pq, pq 1912 drivers/infiniband/hw/qib/qib_file_ops.c const u32 val = qib_user_sdma_inflight_counter(pq); pq 1921 drivers/infiniband/hw/qib/qib_file_ops.c struct qib_user_sdma_queue *pq, pq 1927 drivers/infiniband/hw/qib/qib_file_ops.c if (!pq) pq 1930 drivers/infiniband/hw/qib/qib_file_ops.c err = qib_user_sdma_make_progress(ppd, pq); pq 1934 drivers/infiniband/hw/qib/qib_file_ops.c val = qib_user_sdma_complete_counter(pq); pq 2246 drivers/infiniband/hw/qib/qib_file_ops.c struct qib_user_sdma_queue *pq = fp->pq; pq 2248 drivers/infiniband/hw/qib/qib_file_ops.c if (!iter_is_iovec(from) || !from->nr_segs || !pq) pq 2251 drivers/infiniband/hw/qib/qib_file_ops.c return qib_user_sdma_writev(rcd, pq, from->iov, from->nr_segs); pq 81 drivers/infiniband/hw/qib/qib_user_sdma.c struct qib_user_sdma_queue *pq; /* which pq this pkt belongs to */ pq 185 drivers/infiniband/hw/qib/qib_user_sdma.c struct qib_user_sdma_queue *pq = pq 189 drivers/infiniband/hw/qib/qib_user_sdma.c if (!pq) pq 192 drivers/infiniband/hw/qib/qib_user_sdma.c pq->counter = 0; pq 193 drivers/infiniband/hw/qib/qib_user_sdma.c pq->sent_counter = 0; pq 194 drivers/infiniband/hw/qib/qib_user_sdma.c pq->num_pending = 0; pq 195 drivers/infiniband/hw/qib/qib_user_sdma.c pq->num_sending = 0; pq 196 drivers/infiniband/hw/qib/qib_user_sdma.c pq->added = 0; pq 197 drivers/infiniband/hw/qib/qib_user_sdma.c pq->sdma_rb_node = NULL; pq 199 drivers/infiniband/hw/qib/qib_user_sdma.c INIT_LIST_HEAD(&pq->sent); pq 200 drivers/infiniband/hw/qib/qib_user_sdma.c spin_lock_init(&pq->sent_lock); pq 201 drivers/infiniband/hw/qib/qib_user_sdma.c mutex_init(&pq->lock); pq 203 drivers/infiniband/hw/qib/qib_user_sdma.c snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name), pq 205 drivers/infiniband/hw/qib/qib_user_sdma.c pq->pkt_slab = kmem_cache_create(pq->pkt_slab_name, pq 209 drivers/infiniband/hw/qib/qib_user_sdma.c if (!pq->pkt_slab) pq 212 drivers/infiniband/hw/qib/qib_user_sdma.c snprintf(pq->header_cache_name, sizeof(pq->header_cache_name), pq 214 drivers/infiniband/hw/qib/qib_user_sdma.c pq->header_cache = dma_pool_create(pq->header_cache_name, pq 218 drivers/infiniband/hw/qib/qib_user_sdma.c if (!pq->header_cache) pq 221 drivers/infiniband/hw/qib/qib_user_sdma.c pq->dma_pages_root = RB_ROOT; pq 238 drivers/infiniband/hw/qib/qib_user_sdma.c pq->sdma_rb_node = sdma_rb_node; pq 243 drivers/infiniband/hw/qib/qib_user_sdma.c dma_pool_destroy(pq->header_cache); pq 245 drivers/infiniband/hw/qib/qib_user_sdma.c kmem_cache_destroy(pq->pkt_slab); pq 247 drivers/infiniband/hw/qib/qib_user_sdma.c kfree(pq); pq 248 drivers/infiniband/hw/qib/qib_user_sdma.c pq = NULL; pq 251 drivers/infiniband/hw/qib/qib_user_sdma.c return pq; pq 273 drivers/infiniband/hw/qib/qib_user_sdma.c static void *qib_user_sdma_alloc_header(struct qib_user_sdma_queue *pq, pq 279 drivers/infiniband/hw/qib/qib_user_sdma.c hdr = dma_pool_alloc(pq->header_cache, GFP_KERNEL, pq 296 drivers/infiniband/hw/qib/qib_user_sdma.c struct qib_user_sdma_queue *pq, pq 433 drivers/infiniband/hw/qib/qib_user_sdma.c pbcvaddr = qib_user_sdma_alloc_header(pq, pbclen, &pbcdaddr); pq 558 drivers/infiniband/hw/qib/qib_user_sdma.c struct qib_user_sdma_queue *pq, pq 591 drivers/infiniband/hw/qib/qib_user_sdma.c ret = qib_user_sdma_page_to_frags(dd, pq, pkt, pq 616 drivers/infiniband/hw/qib/qib_user_sdma.c struct qib_user_sdma_queue *pq, pq 648 drivers/infiniband/hw/qib/qib_user_sdma.c dma_pool_free(pq->header_cache, pq 659 drivers/infiniband/hw/qib/qib_user_sdma.c struct qib_user_sdma_queue *pq, pq 687 drivers/infiniband/hw/qib/qib_user_sdma.c ret = qib_user_sdma_page_to_frags(dd, pq, pkt, pq 716 drivers/infiniband/hw/qib/qib_user_sdma.c struct qib_user_sdma_queue *pq, pq 728 drivers/infiniband/hw/qib/qib_user_sdma.c ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr, pq 739 drivers/infiniband/hw/qib/qib_user_sdma.c qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx); pq 758 drivers/infiniband/hw/qib/qib_user_sdma.c struct qib_user_sdma_queue *pq, pq 767 drivers/infiniband/hw/qib/qib_user_sdma.c ret = qib_user_sdma_coalesce(dd, pq, pkt, iov, niov); pq 769 drivers/infiniband/hw/qib/qib_user_sdma.c ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov); pq 776 drivers/infiniband/hw/qib/qib_user_sdma.c struct qib_user_sdma_queue *pq, pq 785 drivers/infiniband/hw/qib/qib_user_sdma.c qib_user_sdma_free_pkt_frag(dev, pq, pkt, i); pq 790 drivers/infiniband/hw/qib/qib_user_sdma.c kmem_cache_free(pq->pkt_slab, pkt); pq 804 drivers/infiniband/hw/qib/qib_user_sdma.c struct qib_user_sdma_queue *pq, pq 818 drivers/infiniband/hw/qib/qib_user_sdma.c u32 counter = pq->counter; pq 841 drivers/infiniband/hw/qib/qib_user_sdma.c pbc = qib_user_sdma_alloc_header(pq, len, &dma_addr); pq 959 drivers/infiniband/hw/qib/qib_user_sdma.c pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL); pq 984 drivers/infiniband/hw/qib/qib_user_sdma.c ret = qib_user_sdma_init_payload(dd, pq, pkt, pq 1013 drivers/infiniband/hw/qib/qib_user_sdma.c pkt->pq = pq; pq 1028 drivers/infiniband/hw/qib/qib_user_sdma.c kmem_cache_free(pq->pkt_slab, pkt); pq 1031 drivers/infiniband/hw/qib/qib_user_sdma.c dma_pool_free(pq->header_cache, pbc, dma_addr); pq 1035 drivers/infiniband/hw/qib/qib_user_sdma.c qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list); pq 1040 drivers/infiniband/hw/qib/qib_user_sdma.c static void qib_user_sdma_set_complete_counter(struct qib_user_sdma_queue *pq, pq 1043 drivers/infiniband/hw/qib/qib_user_sdma.c pq->sent_counter = c; pq 1048 drivers/infiniband/hw/qib/qib_user_sdma.c struct qib_user_sdma_queue *pq) pq 1057 drivers/infiniband/hw/qib/qib_user_sdma.c if (!pq->num_sending) pq 1067 drivers/infiniband/hw/qib/qib_user_sdma.c spin_lock_irqsave(&pq->sent_lock, flags); pq 1068 drivers/infiniband/hw/qib/qib_user_sdma.c list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) { pq 1078 drivers/infiniband/hw/qib/qib_user_sdma.c pq->num_sending--; pq 1080 drivers/infiniband/hw/qib/qib_user_sdma.c spin_unlock_irqrestore(&pq->sent_lock, flags); pq 1089 drivers/infiniband/hw/qib/qib_user_sdma.c qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list); pq 1090 drivers/infiniband/hw/qib/qib_user_sdma.c qib_user_sdma_set_complete_counter(pq, counter); pq 1096 drivers/infiniband/hw/qib/qib_user_sdma.c void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq) pq 1098 drivers/infiniband/hw/qib/qib_user_sdma.c if (!pq) pq 1101 drivers/infiniband/hw/qib/qib_user_sdma.c pq->sdma_rb_node->refcount--; pq 1102 drivers/infiniband/hw/qib/qib_user_sdma.c if (pq->sdma_rb_node->refcount == 0) { pq 1103 drivers/infiniband/hw/qib/qib_user_sdma.c rb_erase(&pq->sdma_rb_node->node, &qib_user_sdma_rb_root); pq 1104 drivers/infiniband/hw/qib/qib_user_sdma.c kfree(pq->sdma_rb_node); pq 1106 drivers/infiniband/hw/qib/qib_user_sdma.c dma_pool_destroy(pq->header_cache); pq 1107 drivers/infiniband/hw/qib/qib_user_sdma.c kmem_cache_destroy(pq->pkt_slab); pq 1108 drivers/infiniband/hw/qib/qib_user_sdma.c kfree(pq); pq 1126 drivers/infiniband/hw/qib/qib_user_sdma.c struct qib_user_sdma_queue *pq) pq 1132 drivers/infiniband/hw/qib/qib_user_sdma.c if (!pq) pq 1136 drivers/infiniband/hw/qib/qib_user_sdma.c mutex_lock(&pq->lock); pq 1137 drivers/infiniband/hw/qib/qib_user_sdma.c if (!pq->num_pending && !pq->num_sending) { pq 1138 drivers/infiniband/hw/qib/qib_user_sdma.c mutex_unlock(&pq->lock); pq 1142 drivers/infiniband/hw/qib/qib_user_sdma.c qib_user_sdma_queue_clean(ppd, pq); pq 1143 drivers/infiniband/hw/qib/qib_user_sdma.c mutex_unlock(&pq->lock); pq 1147 drivers/infiniband/hw/qib/qib_user_sdma.c if (pq->num_pending || pq->num_sending) { pq 1152 drivers/infiniband/hw/qib/qib_user_sdma.c mutex_lock(&pq->lock); pq 1157 drivers/infiniband/hw/qib/qib_user_sdma.c if (pq->num_pending) { pq 1160 drivers/infiniband/hw/qib/qib_user_sdma.c if (pkt->pq == pq) { pq 1161 drivers/infiniband/hw/qib/qib_user_sdma.c list_move_tail(&pkt->list, &pq->sent); pq 1162 drivers/infiniband/hw/qib/qib_user_sdma.c pq->num_pending--; pq 1163 drivers/infiniband/hw/qib/qib_user_sdma.c pq->num_sending++; pq 1171 drivers/infiniband/hw/qib/qib_user_sdma.c list_splice_init(&pq->sent, &free_list); pq 1172 drivers/infiniband/hw/qib/qib_user_sdma.c pq->num_sending = 0; pq 1173 drivers/infiniband/hw/qib/qib_user_sdma.c qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list); pq 1174 drivers/infiniband/hw/qib/qib_user_sdma.c mutex_unlock(&pq->lock); pq 1299 drivers/infiniband/hw/qib/qib_user_sdma.c pkt->pq->added = pkt->added; pq 1300 drivers/infiniband/hw/qib/qib_user_sdma.c pkt->pq->num_pending--; pq 1301 drivers/infiniband/hw/qib/qib_user_sdma.c spin_lock(&pkt->pq->sent_lock); pq 1302 drivers/infiniband/hw/qib/qib_user_sdma.c pkt->pq->num_sending++; pq 1303 drivers/infiniband/hw/qib/qib_user_sdma.c list_move_tail(&pkt->list, &pkt->pq->sent); pq 1304 drivers/infiniband/hw/qib/qib_user_sdma.c spin_unlock(&pkt->pq->sent_lock); pq 1322 drivers/infiniband/hw/qib/qib_user_sdma.c struct qib_user_sdma_queue *pq, pq 1331 drivers/infiniband/hw/qib/qib_user_sdma.c if (pq->sdma_rb_node->refcount > 1) { pq 1337 drivers/infiniband/hw/qib/qib_user_sdma.c pq->num_pending += count; pq 1351 drivers/infiniband/hw/qib/qib_user_sdma.c pq->num_pending += count; pq 1374 drivers/infiniband/hw/qib/qib_user_sdma.c struct qib_user_sdma_queue *pq, pq 1386 drivers/infiniband/hw/qib/qib_user_sdma.c mutex_lock(&pq->lock); pq 1393 drivers/infiniband/hw/qib/qib_user_sdma.c if (pq->added > ppd->sdma_descq_removed) pq 1396 drivers/infiniband/hw/qib/qib_user_sdma.c if (pq->num_sending) pq 1397 drivers/infiniband/hw/qib/qib_user_sdma.c qib_user_sdma_queue_clean(ppd, pq); pq 1403 drivers/infiniband/hw/qib/qib_user_sdma.c ret = qib_user_sdma_queue_pkts(dd, ppd, pq, pq 1419 drivers/infiniband/hw/qib/qib_user_sdma.c if (pq->num_sending) pq 1420 drivers/infiniband/hw/qib/qib_user_sdma.c qib_user_sdma_queue_clean(ppd, pq); pq 1423 drivers/infiniband/hw/qib/qib_user_sdma.c ret = qib_user_sdma_push_pkts(ppd, pq, &list, mxp); pq 1428 drivers/infiniband/hw/qib/qib_user_sdma.c pq->counter += mxp; pq 1435 drivers/infiniband/hw/qib/qib_user_sdma.c qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &list); pq 1436 drivers/infiniband/hw/qib/qib_user_sdma.c mutex_unlock(&pq->lock); pq 1442 drivers/infiniband/hw/qib/qib_user_sdma.c struct qib_user_sdma_queue *pq) pq 1446 drivers/infiniband/hw/qib/qib_user_sdma.c mutex_lock(&pq->lock); pq 1448 drivers/infiniband/hw/qib/qib_user_sdma.c ret = qib_user_sdma_queue_clean(ppd, pq); pq 1449 drivers/infiniband/hw/qib/qib_user_sdma.c mutex_unlock(&pq->lock); pq 1454 drivers/infiniband/hw/qib/qib_user_sdma.c u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq) pq 1456 drivers/infiniband/hw/qib/qib_user_sdma.c return pq ? pq->sent_counter : 0; pq 1459 drivers/infiniband/hw/qib/qib_user_sdma.c u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq) pq 1461 drivers/infiniband/hw/qib/qib_user_sdma.c return pq ? pq->counter : 0; pq 38 drivers/infiniband/hw/qib/qib_user_sdma.h void qib_user_sdma_queue_destroy(struct qib_user_sdma_queue *pq); pq 41 drivers/infiniband/hw/qib/qib_user_sdma.h struct qib_user_sdma_queue *pq, pq 46 drivers/infiniband/hw/qib/qib_user_sdma.h struct qib_user_sdma_queue *pq); pq 49 drivers/infiniband/hw/qib/qib_user_sdma.h struct qib_user_sdma_queue *pq); pq 51 drivers/infiniband/hw/qib/qib_user_sdma.h u32 qib_user_sdma_complete_counter(const struct qib_user_sdma_queue *pq); pq 52 drivers/infiniband/hw/qib/qib_user_sdma.h u32 qib_user_sdma_inflight_counter(struct qib_user_sdma_queue *pq); pq 609 drivers/mtd/ubi/ubi.h struct list_head pq[UBI_PROT_QUEUE_LEN]; pq 1029 drivers/mtd/ubi/ubi.h list_for_each_entry((e), &(ubi->pq[(i)]), u.list) pq 281 drivers/mtd/ubi/wl.c list_for_each_entry(p, &ubi->pq[i], u.list) pq 305 drivers/mtd/ubi/wl.c list_add_tail(&e->u.list, &ubi->pq[pq_tail]); pq 518 drivers/mtd/ubi/wl.c list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) { pq 1758 drivers/mtd/ubi/wl.c INIT_LIST_HEAD(&ubi->pq[i]); pq 1906 drivers/mtd/ubi/wl.c list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) { pq 1987 drivers/net/ethernet/qlogic/qed/qed_dev.c struct init_qm_pq_params *pq; pq 2049 drivers/net/ethernet/qlogic/qed/qed_dev.c pq = &(qm_info->qm_pq_params[i]); pq 2054 drivers/net/ethernet/qlogic/qed/qed_dev.c pq->port_id, pq 2055 drivers/net/ethernet/qlogic/qed/qed_dev.c pq->vport_id, pq 2056 drivers/net/ethernet/qlogic/qed/qed_dev.c pq->tc_id, pq->wrr_group, pq->rl_valid); pq 2452 drivers/net/ethernet/qlogic/qed/qed_sriov.c u16 pq; pq 2494 drivers/net/ethernet/qlogic/qed/qed_sriov.c pq = qed_get_cm_pq_idx_vf(p_hwfn, vf->relative_vf_id); pq 2496 drivers/net/ethernet/qlogic/qed/qed_sriov.c req->pbl_addr, req->pbl_size, pq); pq 977 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c brcmf_fws_flow_control_check(struct brcmf_fws_info *fws, struct pktq *pq, pq 986 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c pq->len <= BRCMF_FWS_FLOWCONTROL_LOWATER) pq 990 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c pq->len >= BRCMF_FWS_FLOWCONTROL_HIWATER) { pq 1255 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c struct pktq *pq; pq 1275 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c pq = &entry->psq; pq 1276 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c if (pktq_full(pq) || pktq_pfull(pq, prec)) { pq 1280 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c queue = &pq->q[prec].skblist; pq 1317 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c pq->len++; pq 1318 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c if (pq->hi_prec < prec) pq 1319 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c pq->hi_prec = (u8) prec; pq 47 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec, pq 52 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c if (pktq_full(pq) || pktq_pfull(pq, prec)) pq 55 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c q = &pq->q[prec].skblist; pq 57 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c pq->len++; pq 59 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c if (pq->hi_prec < prec) pq 60 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c pq->hi_prec = (u8) prec; pq 66 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec, pq 71 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c if (pktq_full(pq) || pktq_pfull(pq, prec)) pq 74 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c q = &pq->q[prec].skblist; pq 76 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c pq->len++; pq 78 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c if (pq->hi_prec < prec) pq 79 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c pq->hi_prec = (u8) prec; pq 85 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec) pq 90 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c q = &pq->q[prec].skblist; pq 95 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c pq->len--; pq 106 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c struct sk_buff *brcmu_pktq_pdeq_match(struct pktq *pq, int prec, pq 113 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c q = &pq->q[prec].skblist; pq 117 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c pq->len--; pq 125 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec) pq 130 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c q = &pq->q[prec].skblist; pq 135 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c pq->len--; pq 141 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c brcmu_pktq_pflush(struct pktq *pq, int prec, bool dir, pq 147 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c q = &pq->q[prec].skblist; pq 152 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c pq->len--; pq 158 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c void brcmu_pktq_flush(struct pktq *pq, bool dir, pq 162 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c for (prec = 0; prec < pq->num_prec; prec++) pq 163 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c brcmu_pktq_pflush(pq, prec, dir, fn, arg); pq 167 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len) pq 172 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c memset(pq, 0, pq 175 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c pq->num_prec = (u16) num_prec; pq 177 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c pq->max = (u16) max_len; pq 180 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c pq->q[prec].max = pq->max; pq 181 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c skb_queue_head_init(&pq->q[prec].skblist); pq 186 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out) pq 190 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c if (pq->len == 0) pq 193 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c for (prec = 0; prec < pq->hi_prec; prec++) pq 194 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c if (!skb_queue_empty(&pq->q[prec].skblist)) pq 200 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c return skb_peek_tail(&pq->q[prec].skblist); pq 205 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp) pq 211 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c for (prec = 0; prec <= pq->hi_prec; prec++) pq 213 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c len += pq->q[prec].skblist.qlen; pq 220 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp, pq 227 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c if (pq->len == 0) pq 230 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c while ((prec = pq->hi_prec) > 0 && pq 231 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c skb_queue_empty(&pq->q[prec].skblist)) pq 232 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c pq->hi_prec--; pq 235 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c skb_queue_empty(&pq->q[prec].skblist)) pq 239 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c q = &pq->q[prec].skblist; pq 244 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c pq->len--; pq 76 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h static inline int pktq_plen(struct pktq *pq, int prec) pq 78 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h return pq->q[prec].skblist.qlen; pq 81 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h static inline int pktq_pavail(struct pktq *pq, int prec) pq 83 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h return pq->q[prec].max - pq->q[prec].skblist.qlen; pq 86 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h static inline bool pktq_pfull(struct pktq *pq, int prec) pq 88 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h return pq->q[prec].skblist.qlen >= pq->q[prec].max; pq 91 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h static inline bool pktq_pempty(struct pktq *pq, int prec) pq 93 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h return skb_queue_empty(&pq->q[prec].skblist); pq 96 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h static inline struct sk_buff *pktq_ppeek(struct pktq *pq, int prec) pq 98 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h return skb_peek(&pq->q[prec].skblist); pq 101 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h static inline struct sk_buff *pktq_ppeek_tail(struct pktq *pq, int prec) pq 103 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h return skb_peek_tail(&pq->q[prec].skblist); pq 106 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec, struct sk_buff *p); pq 107 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec, pq 109 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec); pq 110 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec); pq 111 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h struct sk_buff *brcmu_pktq_pdeq_match(struct pktq *pq, int prec, pq 122 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h void brcmu_pktq_pflush(struct pktq *pq, int prec, bool dir, pq 127 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp); pq 128 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out); pq 132 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h static inline int pktq_len(struct pktq *pq) pq 134 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h return (int)pq->len; pq 137 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h static inline int pktq_max(struct pktq *pq) pq 139 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h return (int)pq->max; pq 142 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h static inline int pktq_avail(struct pktq *pq) pq 144 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h return (int)(pq->max - pq->len); pq 147 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h static inline bool pktq_full(struct pktq *pq) pq 149 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h return pq->len >= pq->max; pq 152 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h static inline bool pktq_empty(struct pktq *pq) pq 154 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h return pq->len == 0; pq 157 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len); pq 159 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out); pq 160 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h void brcmu_pktq_flush(struct pktq *pq, bool dir, pq 69 drivers/zorro/gen-devlist.c pq(devf, c); pq 90 drivers/zorro/gen-devlist.c pq(devf, c); pq 1182 fs/fuse/dev.c struct fuse_pqueue *fpq = &fud->pq; pq 1828 fs/fuse/dev.c struct fuse_pqueue *fpq = &fud->pq; pq 2106 fs/fuse/dev.c struct fuse_pqueue *fpq = &fud->pq; pq 2166 fs/fuse/dev.c struct fuse_pqueue *fpq = &fud->pq; pq 463 fs/fuse/fuse_i.h struct fuse_pqueue pq; pq 1062 fs/fuse/inode.c struct list_head *pq; pq 1068 fs/fuse/inode.c pq = kcalloc(FUSE_PQ_HASH_SIZE, sizeof(struct list_head), GFP_KERNEL); pq 1069 fs/fuse/inode.c if (!pq) { pq 1074 fs/fuse/inode.c fud->pq.processing = pq; pq 1075 fs/fuse/inode.c fuse_pqueue_init(&fud->pq); pq 1114 fs/fuse/inode.c kfree(fud->pq.processing); pq 70 fs/fuse/virtio_fs.c return &vq_to_fsvq(vq)->fud->pq; pq 450 fs/fuse/virtio_fs.c struct fuse_pqueue *fpq = &fsvq->fud->pq; pq 948 fs/fuse/virtio_fs.c fpq = &fsvq->fud->pq; pq 1622 fs/xfs/xfs_qm.c struct xfs_dquot *pq = NULL; pq 1699 fs/xfs/xfs_qm.c true, &pq); pq 1704 fs/xfs/xfs_qm.c xfs_dqunlock(pq); pq 1709 fs/xfs/xfs_qm.c pq = xfs_qm_dqhold(ip->i_pdquot); pq 1725 fs/xfs/xfs_qm.c *O_pdqpp = pq; pq 1727 fs/xfs/xfs_qm.c xfs_qm_dqrele(pq); pq 767 include/linux/dmaengine.h struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, pq 1409 net/xfrm/xfrm_policy.c struct xfrm_policy_queue *pq = &old->polq; pq 1412 net/xfrm/xfrm_policy.c if (skb_queue_empty(&pq->hold_queue)) pq 1417 net/xfrm/xfrm_policy.c spin_lock_bh(&pq->hold_queue.lock); pq 1418 net/xfrm/xfrm_policy.c skb_queue_splice_init(&pq->hold_queue, &list); pq 1419 net/xfrm/xfrm_policy.c if (del_timer(&pq->hold_timer)) pq 1421 net/xfrm/xfrm_policy.c spin_unlock_bh(&pq->hold_queue.lock); pq 1423 net/xfrm/xfrm_policy.c pq = &new->polq; pq 1425 net/xfrm/xfrm_policy.c spin_lock_bh(&pq->hold_queue.lock); pq 1426 net/xfrm/xfrm_policy.c skb_queue_splice(&list, &pq->hold_queue); pq 1427 net/xfrm/xfrm_policy.c pq->timeout = XFRM_QUEUE_TMO_MIN; pq 1428 net/xfrm/xfrm_policy.c if (!mod_timer(&pq->hold_timer, jiffies)) pq 1430 net/xfrm/xfrm_policy.c spin_unlock_bh(&pq->hold_queue.lock); pq 2756 net/xfrm/xfrm_policy.c struct xfrm_policy_queue *pq = &pol->polq; pq 2760 net/xfrm/xfrm_policy.c spin_lock(&pq->hold_queue.lock); pq 2761 net/xfrm/xfrm_policy.c skb = skb_peek(&pq->hold_queue); pq 2763 net/xfrm/xfrm_policy.c spin_unlock(&pq->hold_queue.lock); pq 2769 net/xfrm/xfrm_policy.c spin_unlock(&pq->hold_queue.lock); pq 2779 net/xfrm/xfrm_policy.c if (pq->timeout >= XFRM_QUEUE_TMO_MAX) pq 2782 net/xfrm/xfrm_policy.c pq->timeout = pq->timeout << 1; pq 2783 net/xfrm/xfrm_policy.c if (!mod_timer(&pq->hold_timer, jiffies + pq->timeout)) pq 2792 net/xfrm/xfrm_policy.c spin_lock(&pq->hold_queue.lock); pq 2793 net/xfrm/xfrm_policy.c pq->timeout = 0; pq 2794 net/xfrm/xfrm_policy.c skb_queue_splice_init(&pq->hold_queue, &list); pq 2795 net/xfrm/xfrm_policy.c spin_unlock(&pq->hold_queue.lock); pq 2820 net/xfrm/xfrm_policy.c pq->timeout = 0; pq 2821 net/xfrm/xfrm_policy.c skb_queue_purge(&pq->hold_queue); pq 2831 net/xfrm/xfrm_policy.c struct xfrm_policy_queue *pq = &pol->polq; pq 2838 net/xfrm/xfrm_policy.c if (pq->hold_queue.qlen > XFRM_MAX_QUEUE_LEN) { pq 2845 net/xfrm/xfrm_policy.c spin_lock_bh(&pq->hold_queue.lock); pq 2847 net/xfrm/xfrm_policy.c if (!pq->timeout) pq 2848 net/xfrm/xfrm_policy.c pq->timeout = XFRM_QUEUE_TMO_MIN; pq 2850 net/xfrm/xfrm_policy.c sched_next = jiffies + pq->timeout; pq 2852 net/xfrm/xfrm_policy.c if (del_timer(&pq->hold_timer)) { pq 2853 net/xfrm/xfrm_policy.c if (time_before(pq->hold_timer.expires, sched_next)) pq 2854 net/xfrm/xfrm_policy.c sched_next = pq->hold_timer.expires; pq 2858 net/xfrm/xfrm_policy.c __skb_queue_tail(&pq->hold_queue, skb); pq 2859 net/xfrm/xfrm_policy.c if (!mod_timer(&pq->hold_timer, sched_next)) pq 2862 net/xfrm/xfrm_policy.c spin_unlock_bh(&pq->hold_queue.lock); pq 932 sound/pci/hda/hda_auto_parser.c const struct snd_hda_pin_quirk *pq; pq 937 sound/pci/hda/hda_auto_parser.c for (pq = pin_quirk; pq->subvendor; pq++) { pq 938 sound/pci/hda/hda_auto_parser.c if ((codec->core.subsystem_id & 0xffff0000) != (pq->subvendor << 16)) pq 940 sound/pci/hda/hda_auto_parser.c if (codec->core.vendor_id != pq->codec) pq 942 sound/pci/hda/hda_auto_parser.c if (pin_config_match(codec, pq->pins, match_all_pins)) { pq 943 sound/pci/hda/hda_auto_parser.c codec->fixup_id = pq->value; pq 945 sound/pci/hda/hda_auto_parser.c codec->fixup_name = pq->name;