wq 261 arch/arm/common/bL_switcher.c wait_queue_head_t wq; wq 284 arch/arm/common/bL_switcher.c wait_event_interruptible(t->wq, wq 370 arch/arm/common/bL_switcher.c wake_up(&t->wq); wq 580 arch/arm/common/bL_switcher.c init_waitqueue_head(&t->wq); wq 507 arch/mips/kvm/mips.c if (swq_has_sleeper(&dvcpu->wq)) wq 508 arch/mips/kvm/mips.c swake_up_one(&dvcpu->wq); wq 1222 arch/mips/kvm/mips.c if (swq_has_sleeper(&vcpu->wq)) wq 1223 arch/mips/kvm/mips.c swake_up_one(&vcpu->wq); wq 81 arch/powerpc/include/asm/kvm_book3s.h struct swait_queue_head wq; wq 59 arch/powerpc/include/asm/pnv-pci.h struct workqueue_struct *wq; wq 2102 arch/powerpc/kvm/book3s_hv.c init_swait_queue_head(&vcore->wq); wq 3818 arch/powerpc/kvm/book3s_hv.c prepare_to_swait_exclusive(&vc->wq, &wait, TASK_INTERRUPTIBLE); wq 3821 arch/powerpc/kvm/book3s_hv.c finish_swait(&vc->wq, &wait); wq 3835 arch/powerpc/kvm/book3s_hv.c finish_swait(&vc->wq, &wait); wq 3946 arch/powerpc/kvm/book3s_hv.c swake_up_one(&vc->wq); wq 4285 arch/powerpc/kvm/book3s_hv.c vcpu->arch.wqp = &vcpu->arch.vcore->wq; wq 724 arch/powerpc/kvm/powerpc.c vcpu->arch.wqp = &vcpu->wq; wq 305 arch/powerpc/platforms/cell/spufs/spufs.h #define spufs_wait(wq, condition) \ wq 310 arch/powerpc/platforms/cell/spufs/spufs.h prepare_to_wait(&(wq), &__wait, TASK_INTERRUPTIBLE); \ wq 323 arch/powerpc/platforms/cell/spufs/spufs.h finish_wait(&(wq), &__wait); \ wq 72 arch/x86/kernel/kvm.c struct swait_queue_head wq; wq 129 arch/x86/kernel/kvm.c init_swait_queue_head(&n.wq); wq 135 arch/x86/kernel/kvm.c prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE); wq 156 arch/x86/kernel/kvm.c finish_swait(&n.wq, &wait); wq 168 arch/x86/kernel/kvm.c else if (swq_has_sleeper(&n->wq)) wq 169 arch/x86/kernel/kvm.c swake_up_one(&n->wq); wq 221 arch/x86/kernel/kvm.c init_swait_queue_head(&n->wq); wq 1787 arch/x86/kvm/lapic.c WARN_ON(swait_active(&vcpu->wq)); wq 1117 block/blk-mq.c struct wait_queue_head *wq; wq 1139 block/blk-mq.c wq = &bt_wait_ptr(sbq, hctx)->wait; wq 1141 block/blk-mq.c spin_lock_irq(&wq->lock); wq 1145 block/blk-mq.c spin_unlock_irq(&wq->lock); wq 1151 block/blk-mq.c __add_wait_queue(wq, wait); wq 1161 block/blk-mq.c spin_unlock_irq(&wq->lock); wq 1172 block/blk-mq.c spin_unlock_irq(&wq->lock); wq 204 block/blk-rq-qos.c struct wait_queue_entry wq; wq 217 block/blk-rq-qos.c wq); wq 254 block/blk-rq-qos.c .wq = { wq 256 block/blk-rq-qos.c .entry = LIST_HEAD_INIT(data.wq.entry), wq 269 block/blk-rq-qos.c prepare_to_wait_exclusive(&rqw->wait, &data.wq, TASK_UNINTERRUPTIBLE); wq 276 block/blk-rq-qos.c finish_wait(&rqw->wait, &data.wq); wq 292 block/blk-rq-qos.c finish_wait(&rqw->wait, &data.wq); wq 720 crypto/af_alg.c struct socket_wq *wq; wq 726 crypto/af_alg.c wq = rcu_dereference(sk->sk_wq); wq 727 crypto/af_alg.c if (skwq_has_sleeper(wq)) wq 728 crypto/af_alg.c wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | wq 784 crypto/af_alg.c struct socket_wq *wq; wq 790 crypto/af_alg.c wq = rcu_dereference(sk->sk_wq); wq 791 crypto/af_alg.c if (skwq_has_sleeper(wq)) wq 792 crypto/af_alg.c wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | wq 798 drivers/block/drbd/drbd_int.h struct workqueue_struct *wq; wq 2275 drivers/block/drbd/drbd_main.c struct workqueue_struct *wq; wq 2348 drivers/block/drbd/drbd_main.c queue_work(retry.wq, &retry.worker); wq 2393 drivers/block/drbd/drbd_main.c if (retry.wq) wq 2394 drivers/block/drbd/drbd_main.c destroy_workqueue(retry.wq); wq 2473 drivers/block/drbd/drbd_main.c static void drbd_init_workqueue(struct drbd_work_queue* wq) wq 2475 drivers/block/drbd/drbd_main.c spin_lock_init(&wq->q_lock); wq 2476 drivers/block/drbd/drbd_main.c INIT_LIST_HEAD(&wq->q); wq 2477 drivers/block/drbd/drbd_main.c init_waitqueue_head(&wq->q_wait); wq 2765 drivers/block/drbd/drbd_main.c device->submit.wq = wq 2767 drivers/block/drbd/drbd_main.c if (!device->submit.wq) wq 3004 drivers/block/drbd/drbd_main.c retry.wq = create_singlethread_workqueue("drbd-reissue"); wq 3005 drivers/block/drbd/drbd_main.c if (!retry.wq) { wq 1198 drivers/block/drbd/drbd_req.c queue_work(device->submit.wq, &device->submit.worker); wq 582 drivers/block/xen-blkback/blkback.c wake_up(&ring->wq); wq 629 drivers/block/xen-blkback/blkback.c ring->wq, wq 258 drivers/block/xen-blkback/common.h wait_queue_head_t wq; wq 143 drivers/block/xen-blkback/xenbus.c init_waitqueue_head(&ring->wq); wq 282 drivers/bluetooth/bluecard_cs.c DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); wq 199 drivers/char/tlclk.c static DECLARE_WAIT_QUEUE_HEAD(wq); wq 252 drivers/char/tlclk.c wait_event_interruptible(wq, got_event); wq 873 drivers/char/tlclk.c wake_up(&wq); wq 928 drivers/char/tlclk.c wake_up(&wq); wq 115 drivers/char/tpm/tpm_ibmvtpm.c sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd); wq 226 drivers/char/tpm/tpm_ibmvtpm.c sig = wait_event_interruptible(ibmvtpm->wq, !ibmvtpm->tpm_processing_cmd); wq 555 drivers/char/tpm/tpm_ibmvtpm.c wake_up_interruptible(&ibmvtpm->wq); wq 663 drivers/char/tpm/tpm_ibmvtpm.c init_waitqueue_head(&ibmvtpm->wq); wq 40 drivers/char/tpm/tpm_ibmvtpm.h wait_queue_head_t wq; wq 33 drivers/char/tpm/tpm_vtpm_proxy.c wait_queue_head_t wq; wq 79 drivers/char/tpm/tpm_vtpm_proxy.c sig = wait_event_interruptible(proxy_dev->wq, wq 158 drivers/char/tpm/tpm_vtpm_proxy.c wake_up_interruptible(&proxy_dev->wq); wq 176 drivers/char/tpm/tpm_vtpm_proxy.c poll_wait(filp, &proxy_dev->wq, wait); wq 222 drivers/char/tpm/tpm_vtpm_proxy.c wake_up_interruptible(&proxy_dev->wq); wq 361 drivers/char/tpm/tpm_vtpm_proxy.c wake_up_interruptible(&proxy_dev->wq); wq 497 drivers/char/tpm/tpm_vtpm_proxy.c init_waitqueue_head(&proxy_dev->wq); wq 147 drivers/crypto/chelsio/chtls/chtls_cm.h struct socket_wq *wq; wq 150 drivers/crypto/chelsio/chtls/chtls_cm.h wq = rcu_dereference(sk->sk_wq); wq 151 drivers/crypto/chelsio/chtls/chtls_cm.h if (skwq_has_sleeper(wq)) { wq 500 drivers/crypto/hisilicon/qm.c queue_work(qp->wq, &qp->work); wq 1148 drivers/crypto/hisilicon/qm.c qp->wq = alloc_workqueue("hisi_qm", WQ_UNBOUND | WQ_HIGHPRI | wq 1150 drivers/crypto/hisilicon/qm.c if (!qp->wq) { wq 193 drivers/crypto/hisilicon/qm.h struct workqueue_struct *wq; wq 35 drivers/dma-buf/sync_file.c init_waitqueue_head(&sync_file->wq); wq 52 drivers/dma-buf/sync_file.c wake_up_all(&sync_file->wq); wq 311 drivers/dma-buf/sync_file.c poll_wait(file, &sync_file->wq, wait); wq 317 drivers/dma-buf/sync_file.c wake_up_all(&sync_file->wq); wq 4 drivers/edac/wq.c static struct workqueue_struct *wq; wq 8 drivers/edac/wq.c return queue_delayed_work(wq, work, delay); wq 14 drivers/edac/wq.c return mod_delayed_work(wq, work, delay); wq 23 drivers/edac/wq.c flush_workqueue(wq); wq 31 drivers/edac/wq.c wq = alloc_ordered_workqueue("edac-poller", WQ_MEM_RECLAIM); wq 32 drivers/edac/wq.c if (!wq) wq 40 drivers/edac/wq.c flush_workqueue(wq); wq 41 drivers/edac/wq.c destroy_workqueue(wq); wq 42 drivers/edac/wq.c wq = NULL; wq 354 drivers/gpu/drm/amd/amdgpu/amdgpu.h wait_queue_head_t wq; wq 56 drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c init_waitqueue_head(&sa_manager->wq); wq 299 drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c spin_lock(&sa_manager->wq.lock); wq 309 drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c spin_unlock(&sa_manager->wq.lock); wq 321 drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c spin_unlock(&sa_manager->wq.lock); wq 329 drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c spin_lock(&sa_manager->wq.lock); wq 333 drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c sa_manager->wq, wq 340 drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c spin_unlock(&sa_manager->wq.lock); wq 356 drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c spin_lock(&sa_manager->wq.lock); wq 366 drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c wake_up_all_locked(&sa_manager->wq); wq 367 drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c spin_unlock(&sa_manager->wq.lock); wq 378 drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c spin_lock(&sa_manager->wq.lock); wq 396 drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c spin_unlock(&sa_manager->wq.lock); wq 243 drivers/gpu/drm/amd/amdkfd/kfd_events.c list_for_each_entry(waiter, &ev->wq.head, wait.entry) wq 245 drivers/gpu/drm/amd/amdkfd/kfd_events.c wake_up_all(&ev->wq); wq 337 drivers/gpu/drm/amd/amdkfd/kfd_events.c init_waitqueue_head(&ev->wq); wq 398 drivers/gpu/drm/amd/amdkfd/kfd_events.c ev->signaled = !ev->auto_reset || !waitqueue_active(&ev->wq); wq 400 drivers/gpu/drm/amd/amdkfd/kfd_events.c list_for_each_entry(waiter, &ev->wq.head, wait.entry) wq 403 drivers/gpu/drm/amd/amdkfd/kfd_events.c wake_up_all(&ev->wq); wq 568 drivers/gpu/drm/amd/amdkfd/kfd_events.c add_wait_queue(&ev->wq, &waiter->wait); wq 659 drivers/gpu/drm/amd/amdkfd/kfd_events.c remove_wait_queue(&waiters[i].event->wq, wq 61 drivers/gpu/drm/amd/amdkfd/kfd_events.h wait_queue_head_t wq; /* List of event waiters. */ wq 182 drivers/gpu/drm/arm/malidp_drv.c ret = wait_event_interruptible_timeout(malidp->wq, wq 833 drivers/gpu/drm/arm/malidp_drv.c init_waitqueue_head(&malidp->wq); wq 35 drivers/gpu/drm/arm/malidp_drv.h wait_queue_head_t wq; wq 1222 drivers/gpu/drm/arm/malidp_hw.c wake_up(&malidp->wq); wq 646 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c queue_work(dc->wq, &commit->work); wq 721 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c dc->wq = alloc_ordered_workqueue("atmel-hlcdc-dc", 0); wq 722 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c if (!dc->wq) wq 782 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c destroy_workqueue(dc->wq); wq 791 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c flush_workqueue(dc->wq); wq 806 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c destroy_workqueue(dc->wq); wq 344 drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h struct workqueue_struct *wq; wq 348 drivers/gpu/drm/bridge/adv7511/adv7511.h wait_queue_head_t wq; wq 475 drivers/gpu/drm/bridge/adv7511/adv7511_drv.c wake_up_all(&adv7511->wq); wq 503 drivers/gpu/drm/bridge/adv7511/adv7511_drv.c ret = wait_event_interruptible_timeout(adv7511->wq, wq 1198 drivers/gpu/drm/bridge/adv7511/adv7511_drv.c init_waitqueue_head(&adv7511->wq); wq 151 drivers/gpu/drm/drm_crtc.c init_waitqueue_head(&crtc->crc.wq); wq 300 drivers/gpu/drm/drm_debugfs_crc.c ret = wait_event_interruptible_lock_irq(crc->wq, wq 343 drivers/gpu/drm/drm_debugfs_crc.c poll_wait(file, &crc->wq, wait); wq 429 drivers/gpu/drm/drm_debugfs_crc.c wake_up_interruptible(&crc->wq); wq 102 drivers/gpu/drm/drm_flip_work.c struct workqueue_struct *wq) wq 110 drivers/gpu/drm/drm_flip_work.c queue_work(wq, &work->worker); wq 1416 drivers/gpu/drm/etnaviv/etnaviv_gpu.c queue_work(gpu->wq, &gpu->sync_point_work); wq 1620 drivers/gpu/drm/etnaviv/etnaviv_gpu.c gpu->wq = alloc_ordered_workqueue(dev_name(dev), 0); wq 1621 drivers/gpu/drm/etnaviv/etnaviv_gpu.c if (!gpu->wq) { wq 1658 drivers/gpu/drm/etnaviv/etnaviv_gpu.c destroy_workqueue(gpu->wq); wq 1674 drivers/gpu/drm/etnaviv/etnaviv_gpu.c flush_workqueue(gpu->wq); wq 1675 drivers/gpu/drm/etnaviv/etnaviv_gpu.c destroy_workqueue(gpu->wq); wq 101 drivers/gpu/drm/etnaviv/etnaviv_gpu.h struct workqueue_struct *wq; wq 102 drivers/gpu/drm/i915/display/intel_sprite.c wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); wq 144 drivers/gpu/drm/i915/display/intel_sprite.c prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); wq 163 drivers/gpu/drm/i915/display/intel_sprite.c finish_wait(wq, &wait); wq 379 drivers/gpu/drm/i915/gem/i915_gem_context.c queue_work(i915->wq, &i915->contexts.free_work); wq 248 drivers/gpu/drm/i915/gem/i915_gem_object.c queue_work(i915->wq, &i915->mm.free_work); wq 61 drivers/gpu/drm/i915/gem/i915_gem_pm.c queue_delayed_work(i915->wq, wq 79 drivers/gpu/drm/i915/gem/i915_gem_pm.c queue_delayed_work(i915->wq, wq 94 drivers/gpu/drm/i915/gem/i915_gem_pm.c queue_delayed_work(i915->wq, wq 100 drivers/gpu/drm/i915/gem/i915_gem_pm.c queue_work(i915->wq, &i915->gem.idle_work); wq 148 drivers/gpu/drm/i915/gem/i915_gem_pm.c flush_workqueue(i915->wq); wq 478 drivers/gpu/drm/i915/i915_drv.c dev_priv->wq = alloc_ordered_workqueue("i915", 0); wq 479 drivers/gpu/drm/i915/i915_drv.c if (dev_priv->wq == NULL) wq 489 drivers/gpu/drm/i915/i915_drv.c destroy_workqueue(dev_priv->wq); wq 499 drivers/gpu/drm/i915/i915_drv.c destroy_workqueue(dev_priv->wq); wq 1429 drivers/gpu/drm/i915/i915_drv.h struct workqueue_struct *wq; wq 2280 drivers/gpu/drm/i915/i915_drv.h flush_workqueue(i915->wq); wq 2284 drivers/gpu/drm/i915/i915_drv.h drain_workqueue(i915->wq); wq 542 drivers/gpu/drm/i915/i915_gem_gtt.c queue_rcu_work(vm->i915->wq, &vm->rcu); wq 2757 drivers/gpu/drm/i915/i915_gem_gtt.c flush_workqueue(i915->wq); wq 236 drivers/gpu/drm/i915/i915_sw_fence.c static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags, void *key) wq 238 drivers/gpu/drm/i915/i915_sw_fence.c i915_sw_fence_set_error_once(wq->private, flags); wq 240 drivers/gpu/drm/i915/i915_sw_fence.c list_del(&wq->entry); wq 241 drivers/gpu/drm/i915/i915_sw_fence.c __i915_sw_fence_complete(wq->private, key); wq 243 drivers/gpu/drm/i915/i915_sw_fence.c if (wq->flags & I915_SW_FENCE_FLAG_ALLOC) wq 244 drivers/gpu/drm/i915/i915_sw_fence.c kfree(wq); wq 251 drivers/gpu/drm/i915/i915_sw_fence.c wait_queue_entry_t *wq; wq 259 drivers/gpu/drm/i915/i915_sw_fence.c list_for_each_entry(wq, &fence->wait.head, entry) { wq 260 drivers/gpu/drm/i915/i915_sw_fence.c if (wq->func != i915_sw_fence_wake) wq 263 drivers/gpu/drm/i915/i915_sw_fence.c if (__i915_sw_fence_check_if_after(wq->private, signaler)) wq 272 drivers/gpu/drm/i915/i915_sw_fence.c wait_queue_entry_t *wq; wq 277 drivers/gpu/drm/i915/i915_sw_fence.c list_for_each_entry(wq, &fence->wait.head, entry) { wq 278 drivers/gpu/drm/i915/i915_sw_fence.c if (wq->func != i915_sw_fence_wake) wq 281 drivers/gpu/drm/i915/i915_sw_fence.c __i915_sw_fence_clear_checked_bit(wq->private); wq 304 drivers/gpu/drm/i915/i915_sw_fence.c wait_queue_entry_t *wq, gfp_t gfp) wq 324 drivers/gpu/drm/i915/i915_sw_fence.c if (!wq) { wq 325 drivers/gpu/drm/i915/i915_sw_fence.c wq = kmalloc(sizeof(*wq), gfp); wq 326 drivers/gpu/drm/i915/i915_sw_fence.c if (!wq) { wq 338 drivers/gpu/drm/i915/i915_sw_fence.c INIT_LIST_HEAD(&wq->entry); wq 339 drivers/gpu/drm/i915/i915_sw_fence.c wq->flags = pending; wq 340 drivers/gpu/drm/i915/i915_sw_fence.c wq->func = i915_sw_fence_wake; wq 341 drivers/gpu/drm/i915/i915_sw_fence.c wq->private = fence; wq 347 drivers/gpu/drm/i915/i915_sw_fence.c __add_wait_queue_entry_tail(&signaler->wait, wq); wq 350 drivers/gpu/drm/i915/i915_sw_fence.c i915_sw_fence_wake(wq, 0, signaler->error, NULL); wq 360 drivers/gpu/drm/i915/i915_sw_fence.c wait_queue_entry_t *wq) wq 362 drivers/gpu/drm/i915/i915_sw_fence.c return __i915_sw_fence_await_sw_fence(fence, signaler, wq, 0); wq 67 drivers/gpu/drm/i915/i915_sw_fence.h wait_queue_entry_t *wq); wq 73 drivers/gpu/drm/i915/selftests/mock_gem_device.c drain_workqueue(i915->wq); wq 80 drivers/gpu/drm/i915/selftests/mock_gem_device.c destroy_workqueue(i915->wq); wq 186 drivers/gpu/drm/i915/selftests/mock_gem_device.c i915->wq = alloc_ordered_workqueue("mock", 0); wq 187 drivers/gpu/drm/i915/selftests/mock_gem_device.c if (!i915->wq) wq 230 drivers/gpu/drm/i915/selftests/mock_gem_device.c destroy_workqueue(i915->wq); wq 976 drivers/gpu/drm/msm/adreno/a5xx_gpu.c queue_work(priv->wq, &gpu->recover_work); wq 87 drivers/gpu/drm/msm/adreno/a5xx_preempt.c queue_work(priv->wq, &gpu->recover_work); wq 184 drivers/gpu/drm/msm/adreno/a5xx_preempt.c queue_work(priv->wq, &gpu->recover_work); wq 28 drivers/gpu/drm/msm/adreno/a6xx_gmu.c queue_work(priv->wq, &gpu->recover_work); wq 689 drivers/gpu/drm/msm/adreno/a6xx_gpu.c queue_work(priv->wq, &gpu->recover_work); wq 823 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work, wq 1505 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c rc = wait_event_timeout(*(info->wq), wq 292 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h wait_queue_head_t *wq; wq 257 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c wait_info.wq = &phys_enc->pending_kickoff_wq; wq 634 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c wait_info.wq = &phys_enc->pending_kickoff_wq; wq 708 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c wait_info.wq = &cmd_enc->pending_vblank_wq; wq 501 drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c wait_info.wq = &phys_enc->pending_kickoff_wq; wq 512 drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq); wq 1087 drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq); wq 76 drivers/gpu/drm/msm/msm_atomic.c queue_work(priv->wq, &timer->work); wq 205 drivers/gpu/drm/msm/msm_drv.c queue_work(priv->wq, &vbl_work->work); wq 236 drivers/gpu/drm/msm/msm_drv.c flush_workqueue(priv->wq); wq 282 drivers/gpu/drm/msm/msm_drv.c destroy_workqueue(priv->wq); wq 426 drivers/gpu/drm/msm/msm_drv.c priv->wq = alloc_ordered_workqueue("msm", 0); wq 181 drivers/gpu/drm/msm/msm_drv.h struct workqueue_struct *wq; wq 890 drivers/gpu/drm/msm/msm_gem.c queue_work(priv->wq, &priv->free_work); wq 539 drivers/gpu/drm/msm/msm_gpu.c queue_work(priv->wq, &gpu->recover_work); wq 547 drivers/gpu/drm/msm/msm_gpu.c queue_work(priv->wq, &gpu->retire_work); wq 724 drivers/gpu/drm/msm/msm_gpu.c queue_work(priv->wq, &gpu->retire_work); wq 151 drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c queue_work(disp->wq, &disp->supervisor); wq 136 drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c queue_work(disp->wq, &disp->supervisor); wq 75 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c if (disp->wq) wq 76 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c destroy_workqueue(disp->wq); wq 171 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c disp->wq = create_singlethread_workqueue("nvkm-disp"); wq 172 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c if (!disp->wq) wq 678 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c queue_work(disp->wq, &disp->supervisor); wq 14 drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h struct workqueue_struct *wq; wq 582 drivers/gpu/drm/omapdrm/omap_drv.c priv->wq = alloc_ordered_workqueue("omapdrm", 0); wq 635 drivers/gpu/drm/omapdrm/omap_drv.c destroy_workqueue(priv->wq); wq 662 drivers/gpu/drm/omapdrm/omap_drv.c destroy_workqueue(priv->wq); wq 61 drivers/gpu/drm/omapdrm/omap_drv.h struct workqueue_struct *wq; wq 64 drivers/gpu/drm/omapdrm/omap_fbdev.c queue_work(priv->wq, &fbdev->work); wq 13 drivers/gpu/drm/omapdrm/omap_irq.c wait_queue_head_t wq; wq 38 drivers/gpu/drm/omapdrm/omap_irq.c wake_up(&wait->wq); wq 48 drivers/gpu/drm/omapdrm/omap_irq.c init_waitqueue_head(&wait->wq); wq 67 drivers/gpu/drm/omapdrm/omap_irq.c ret = wait_event_timeout(wait->wq, (wait->count <= 0), timeout); wq 543 drivers/gpu/drm/radeon/radeon.h wait_queue_head_t wq; wq 56 drivers/gpu/drm/radeon/radeon_sa.c init_waitqueue_head(&sa_manager->wq); wq 333 drivers/gpu/drm/radeon/radeon_sa.c spin_lock(&sa_manager->wq.lock); wq 345 drivers/gpu/drm/radeon/radeon_sa.c spin_unlock(&sa_manager->wq.lock); wq 355 drivers/gpu/drm/radeon/radeon_sa.c spin_unlock(&sa_manager->wq.lock); wq 359 drivers/gpu/drm/radeon/radeon_sa.c spin_lock(&sa_manager->wq.lock); wq 363 drivers/gpu/drm/radeon/radeon_sa.c sa_manager->wq, wq 370 drivers/gpu/drm/radeon/radeon_sa.c spin_unlock(&sa_manager->wq.lock); wq 386 drivers/gpu/drm/radeon/radeon_sa.c spin_lock(&sa_manager->wq.lock); wq 394 drivers/gpu/drm/radeon/radeon_sa.c wake_up_all_locked(&sa_manager->wq); wq 395 drivers/gpu/drm/radeon/radeon_sa.c spin_unlock(&sa_manager->wq.lock); wq 405 drivers/gpu/drm/radeon/radeon_sa.c spin_lock(&sa_manager->wq.lock); wq 422 drivers/gpu/drm/radeon/radeon_sa.c spin_unlock(&sa_manager->wq.lock); wq 575 drivers/gpu/drm/tilcdc/tilcdc_crtc.c flush_workqueue(priv->wq); wq 209 drivers/gpu/drm/tilcdc/tilcdc_drv.c if (priv->wq) { wq 210 drivers/gpu/drm/tilcdc/tilcdc_drv.c flush_workqueue(priv->wq); wq 211 drivers/gpu/drm/tilcdc/tilcdc_drv.c destroy_workqueue(priv->wq); wq 246 drivers/gpu/drm/tilcdc/tilcdc_drv.c priv->wq = alloc_ordered_workqueue("tilcdc", 0); wq 247 drivers/gpu/drm/tilcdc/tilcdc_drv.c if (!priv->wq) { wq 68 drivers/gpu/drm/tilcdc/tilcdc_drv.h struct workqueue_struct *wq; wq 530 drivers/gpu/drm/ttm/ttm_bo.c schedule_delayed_work(&bdev->wq, wq 664 drivers/gpu/drm/ttm/ttm_bo.c container_of(work, struct ttm_bo_device, wq.work); wq 667 drivers/gpu/drm/ttm/ttm_bo.c schedule_delayed_work(&bdev->wq, wq 697 drivers/gpu/drm/ttm/ttm_bo.c return cancel_delayed_work_sync(&bdev->wq); wq 704 drivers/gpu/drm/ttm/ttm_bo.c schedule_delayed_work(&bdev->wq, wq 1710 drivers/gpu/drm/ttm/ttm_bo.c cancel_delayed_work_sync(&bdev->wq); wq 1757 drivers/gpu/drm/ttm/ttm_bo.c INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); wq 353 drivers/gpu/drm/via/via_dmablit.c schedule_work(&blitq->wq); wq 495 drivers/gpu/drm/via/via_dmablit.c drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq); wq 563 drivers/gpu/drm/via/via_dmablit.c INIT_WORK(&blitq->wq, via_dmablit_workqueue); wq 77 drivers/gpu/drm/via/via_dmablit.h struct work_struct wq; wq 288 drivers/gpu/host1x/cdma.c schedule_delayed_work(&cdma->timeout.wq, wq 298 drivers/gpu/host1x/cdma.c cancel_delayed_work(&cdma->timeout.wq); wq 44 drivers/gpu/host1x/cdma.h struct delayed_work wq; /* work queue */ wq 254 drivers/gpu/host1x/hw/cdma_hw.c timeout.wq); wq 300 drivers/gpu/host1x/hw/cdma_hw.c INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler); wq 312 drivers/gpu/host1x/hw/cdma_hw.c cancel_delayed_work(&cdma->timeout.wq); wq 113 drivers/gpu/host1x/intr.c wait_queue_head_t *wq = waiter->data; wq 115 drivers/gpu/host1x/intr.c wake_up(wq); wq 120 drivers/gpu/host1x/intr.c wait_queue_head_t *wq = waiter->data; wq 122 drivers/gpu/host1x/intr.c wake_up_interruptible(wq); wq 208 drivers/gpu/host1x/syncpt.c DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); wq 249 drivers/gpu/host1x/syncpt.c &wq, waiter, &ref); wq 263 drivers/gpu/host1x/syncpt.c remain = wait_event_interruptible_timeout(wq, wq 190 drivers/greybus/connection.c connection->wq = alloc_workqueue("%s:%d", WQ_UNBOUND, 1, wq 192 drivers/greybus/connection.c if (!connection->wq) { wq 899 drivers/greybus/connection.c destroy_workqueue(connection->wq); wq 946 drivers/greybus/operation.c queue_work(connection->wq, &operation->work); wq 1128 drivers/greybus/svc.c queue_work(svc->wq, &dr->work); wq 1293 drivers/greybus/svc.c destroy_workqueue(svc->wq); wq 1310 drivers/greybus/svc.c svc->wq = alloc_workqueue("%s:svc", WQ_UNBOUND, 1, dev_name(&hd->dev)); wq 1311 drivers/greybus/svc.c if (!svc->wq) { wq 1387 drivers/greybus/svc.c flush_workqueue(svc->wq); wq 34 drivers/hid/hid-elo.c static struct workqueue_struct *wq; wq 182 drivers/hid/hid-elo.c queue_delayed_work(wq, &priv->work, ELO_PERIODIC_READ_INTERVAL); wq 255 drivers/hid/hid-elo.c queue_delayed_work(wq, &priv->work, ELO_PERIODIC_READ_INTERVAL); wq 293 drivers/hid/hid-elo.c wq = create_singlethread_workqueue("elousb"); wq 294 drivers/hid/hid-elo.c if (!wq) wq 299 drivers/hid/hid-elo.c destroy_workqueue(wq); wq 308 drivers/hid/hid-elo.c destroy_workqueue(wq); wq 1689 drivers/hid/hid-logitech-hidpp.c struct workqueue_struct *wq; wq 1835 drivers/hid/hid-logitech-hidpp.c queue_work(data->wq, &wd->work); wq 2089 drivers/hid/hid-logitech-hidpp.c destroy_workqueue(data->wq); wq 2145 drivers/hid/hid-logitech-hidpp.c data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue"); wq 2146 drivers/hid/hid-logitech-hidpp.c if (!data->wq) { wq 550 drivers/hv/channel_mgmt.c struct workqueue_struct *wq; wq 626 drivers/hv/channel_mgmt.c wq = fnew ? vmbus_connection.handle_primary_chan_wq : wq 628 drivers/hv/channel_mgmt.c queue_work(wq, &newchannel->add_channel_work); wq 333 drivers/i2c/busses/i2c-ibm_iic.c wake_up_interruptible(&dev->wq); wq 414 drivers/i2c/busses/i2c-ibm_iic.c ret = wait_event_interruptible_timeout(dev->wq, wq 711 drivers/i2c/busses/i2c-ibm_iic.c init_waitqueue_head(&dev->wq); wq 43 drivers/i2c/busses/i2c-ibm_iic.h wait_queue_head_t wq; wq 30 drivers/i2c/busses/i2c-taos-evm.c static DECLARE_WAIT_QUEUE_HEAD(wq); wq 104 drivers/i2c/busses/i2c-taos-evm.c wait_event_interruptible_timeout(wq, taos->state == TAOS_STATE_IDLE, wq 161 drivers/i2c/busses/i2c-taos-evm.c wake_up_interruptible(&wq); wq 166 drivers/i2c/busses/i2c-taos-evm.c wake_up_interruptible(&wq); wq 173 drivers/i2c/busses/i2c-taos-evm.c wake_up_interruptible(&wq); wq 226 drivers/i2c/busses/i2c-taos-evm.c wait_event_interruptible_timeout(wq, taos->state == TAOS_STATE_IDLE, wq 248 drivers/i2c/busses/i2c-taos-evm.c wait_event_interruptible_timeout(wq, taos->state == TAOS_STATE_IDLE, wq 517 drivers/i3c/master.c if (master->wq) wq 518 drivers/i3c/master.c destroy_workqueue(master->wq); wq 2184 drivers/i3c/master.c queue_work(dev->common.master->wq, &slot->work); wq 2472 drivers/i3c/master.c master->wq = alloc_workqueue("%s", 0, 0, dev_name(parent)); wq 2473 drivers/i3c/master.c if (!master->wq) { wq 1346 drivers/i3c/master/i3c-master-cdns.c queue_work(master->base.wq, &master->hj_work); wq 75 drivers/iio/adc/berlin2-adc.c wait_queue_head_t wq; wq 126 drivers/iio/adc/berlin2-adc.c ret = wait_event_interruptible_timeout(priv->wq, priv->data_available, wq 177 drivers/iio/adc/berlin2-adc.c ret = wait_event_interruptible_timeout(priv->wq, priv->data_available, wq 253 drivers/iio/adc/berlin2-adc.c wake_up_interruptible(&priv->wq); wq 273 drivers/iio/adc/berlin2-adc.c wake_up_interruptible(&priv->wq); wq 321 drivers/iio/adc/berlin2-adc.c init_waitqueue_head(&priv->wq); wq 24 drivers/iio/adc/npcm_adc.c wait_queue_head_t wq; wq 86 drivers/iio/adc/npcm_adc.c wake_up_interruptible(&info->wq); wq 105 drivers/iio/adc/npcm_adc.c ret = wait_event_interruptible_timeout(info->wq, info->int_status, wq 265 drivers/iio/adc/npcm_adc.c init_waitqueue_head(&info->wq); wq 132 drivers/infiniband/core/cm.c struct workqueue_struct *wq; wq 987 drivers/infiniband/core/cm.c queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work, wq 3931 drivers/infiniband/core/cm.c queue_delayed_work(cm.wq, &work->work, 0); wq 4062 drivers/infiniband/core/cm.c queue_delayed_work(cm.wq, &work->work, 0); wq 4454 drivers/infiniband/core/cm.c flush_workqueue(cm.wq); wq 4490 drivers/infiniband/core/cm.c cm.wq = alloc_workqueue("ib_cm", 0, 1); wq 4491 drivers/infiniband/core/cm.c if (!cm.wq) { wq 4502 drivers/infiniband/core/cm.c destroy_workqueue(cm.wq); wq 4519 drivers/infiniband/core/cm.c destroy_workqueue(cm.wq); wq 641 drivers/infiniband/core/mad.c flush_workqueue(port_priv->wq); wq 975 drivers/infiniband/core/mad.c queue_work(mad_agent_priv->qp_info->port_priv->wq, wq 2408 drivers/infiniband/core/mad.c mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, wq 2443 drivers/infiniband/core/mad.c mod_delayed_work(mad_agent_priv->qp_info->port_priv->wq, wq 2896 drivers/infiniband/core/mad.c port_priv->wq, wq 3252 drivers/infiniband/core/mad.c port_priv->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); wq 3253 drivers/infiniband/core/mad.c if (!port_priv->wq) { wq 3275 drivers/infiniband/core/mad.c destroy_workqueue(port_priv->wq); wq 3312 drivers/infiniband/core/mad.c destroy_workqueue(port_priv->wq); wq 208 drivers/infiniband/core/mad_priv.h struct workqueue_struct *wq; wq 106 drivers/infiniband/core/mad_rmpp.c flush_workqueue(agent->qp_info->port_priv->wq); wq 472 drivers/infiniband/core/mad_rmpp.c queue_delayed_work(rmpp_recv->agent->qp_info->port_priv->wq, wq 565 drivers/infiniband/core/mad_rmpp.c queue_delayed_work(agent->qp_info->port_priv->wq, wq 2898 drivers/infiniband/core/uverbs_cmd.c struct ib_wq *wq; wq 2936 drivers/infiniband/core/uverbs_cmd.c wq = pd->device->ops.create_wq(pd, &wq_init_attr, &attrs->driver_udata); wq 2937 drivers/infiniband/core/uverbs_cmd.c if (IS_ERR(wq)) { wq 2938 drivers/infiniband/core/uverbs_cmd.c err = PTR_ERR(wq); wq 2942 drivers/infiniband/core/uverbs_cmd.c wq->uobject = &obj->uevent.uobject; wq 2943 drivers/infiniband/core/uverbs_cmd.c obj->uevent.uobject.object = wq; wq 2944 drivers/infiniband/core/uverbs_cmd.c wq->wq_type = wq_init_attr.wq_type; wq 2945 drivers/infiniband/core/uverbs_cmd.c wq->cq = cq; wq 2946 drivers/infiniband/core/uverbs_cmd.c wq->pd = pd; wq 2947 drivers/infiniband/core/uverbs_cmd.c wq->device = pd->device; wq 2948 drivers/infiniband/core/uverbs_cmd.c wq->wq_context = wq_init_attr.wq_context; wq 2949 drivers/infiniband/core/uverbs_cmd.c atomic_set(&wq->usecnt, 0); wq 2952 drivers/infiniband/core/uverbs_cmd.c wq->uobject = &obj->uevent.uobject; wq 2953 drivers/infiniband/core/uverbs_cmd.c obj->uevent.uobject.object = wq; wq 2959 drivers/infiniband/core/uverbs_cmd.c resp.wqn = wq->wq_num; wq 2970 drivers/infiniband/core/uverbs_cmd.c ib_destroy_wq(wq, uverbs_get_cleared_udata(attrs)); wq 3012 drivers/infiniband/core/uverbs_cmd.c struct ib_wq *wq; wq 3026 drivers/infiniband/core/uverbs_cmd.c wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, cmd.wq_handle, attrs); wq 3027 drivers/infiniband/core/uverbs_cmd.c if (!wq) wq 3036 drivers/infiniband/core/uverbs_cmd.c ret = wq->device->ops.modify_wq(wq, &wq_attr, cmd.attr_mask, wq 3038 drivers/infiniband/core/uverbs_cmd.c uobj_put_obj_read(wq); wq 3052 drivers/infiniband/core/uverbs_cmd.c struct ib_wq *wq = NULL; wq 3091 drivers/infiniband/core/uverbs_cmd.c wq = uobj_get_obj_read(wq, UVERBS_OBJECT_WQ, wq 3093 drivers/infiniband/core/uverbs_cmd.c if (!wq) { wq 3098 drivers/infiniband/core/uverbs_cmd.c wqs[num_read_wqs] = wq; wq 514 drivers/infiniband/core/uverbs_main.c struct ib_uevent_object *uobj = container_of(event->element.wq->uobject, wq 132 drivers/infiniband/core/uverbs_std_types.c struct ib_wq *wq = uobject->object; wq 137 drivers/infiniband/core/uverbs_std_types.c ret = ib_destroy_wq(wq, &attrs->driver_udata); wq 2276 drivers/infiniband/core/verbs.c struct ib_wq *wq; wq 2281 drivers/infiniband/core/verbs.c wq = pd->device->ops.create_wq(pd, wq_attr, NULL); wq 2282 drivers/infiniband/core/verbs.c if (!IS_ERR(wq)) { wq 2283 drivers/infiniband/core/verbs.c wq->event_handler = wq_attr->event_handler; wq 2284 drivers/infiniband/core/verbs.c wq->wq_context = wq_attr->wq_context; wq 2285 drivers/infiniband/core/verbs.c wq->wq_type = wq_attr->wq_type; wq 2286 drivers/infiniband/core/verbs.c wq->cq = wq_attr->cq; wq 2287 drivers/infiniband/core/verbs.c wq->device = pd->device; wq 2288 drivers/infiniband/core/verbs.c wq->pd = pd; wq 2289 drivers/infiniband/core/verbs.c wq->uobject = NULL; wq 2292 drivers/infiniband/core/verbs.c atomic_set(&wq->usecnt, 0); wq 2294 drivers/infiniband/core/verbs.c return wq; wq 2303 drivers/infiniband/core/verbs.c int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata) wq 2305 drivers/infiniband/core/verbs.c struct ib_cq *cq = wq->cq; wq 2306 drivers/infiniband/core/verbs.c struct ib_pd *pd = wq->pd; wq 2308 drivers/infiniband/core/verbs.c if (atomic_read(&wq->usecnt)) wq 2311 drivers/infiniband/core/verbs.c wq->device->ops.destroy_wq(wq, udata); wq 2327 drivers/infiniband/core/verbs.c int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, wq 2332 drivers/infiniband/core/verbs.c if (!wq->device->ops.modify_wq) wq 2335 drivers/infiniband/core/verbs.c err = wq->device->ops.modify_wq(wq, wq_attr, wq_attr_mask, NULL); wq 258 drivers/infiniband/hw/cxgb3/cxio_hal.c struct t3_wq *wq, struct cxio_ucontext *uctx) wq 260 drivers/infiniband/hw/cxgb3/cxio_hal.c int depth = 1UL << wq->size_log2; wq 261 drivers/infiniband/hw/cxgb3/cxio_hal.c int rqsize = 1UL << wq->rq_size_log2; wq 263 drivers/infiniband/hw/cxgb3/cxio_hal.c wq->qpid = get_qpid(rdev_p, uctx); wq 264 drivers/infiniband/hw/cxgb3/cxio_hal.c if (!wq->qpid) wq 267 drivers/infiniband/hw/cxgb3/cxio_hal.c wq->rq = kcalloc(depth, sizeof(struct t3_swrq), GFP_KERNEL); wq 268 drivers/infiniband/hw/cxgb3/cxio_hal.c if (!wq->rq) wq 271 drivers/infiniband/hw/cxgb3/cxio_hal.c wq->rq_addr = cxio_hal_rqtpool_alloc(rdev_p, rqsize); wq 272 drivers/infiniband/hw/cxgb3/cxio_hal.c if (!wq->rq_addr) wq 275 drivers/infiniband/hw/cxgb3/cxio_hal.c wq->sq = kcalloc(depth, sizeof(struct t3_swsq), GFP_KERNEL); wq 276 drivers/infiniband/hw/cxgb3/cxio_hal.c if (!wq->sq) wq 279 drivers/infiniband/hw/cxgb3/cxio_hal.c wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), wq 281 drivers/infiniband/hw/cxgb3/cxio_hal.c &(wq->dma_addr), GFP_KERNEL); wq 282 drivers/infiniband/hw/cxgb3/cxio_hal.c if (!wq->queue) wq 285 drivers/infiniband/hw/cxgb3/cxio_hal.c dma_unmap_addr_set(wq, mapping, wq->dma_addr); wq 286 drivers/infiniband/hw/cxgb3/cxio_hal.c wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr; wq 288 drivers/infiniband/hw/cxgb3/cxio_hal.c wq->udb = (u64)rdev_p->rnic_info.udbell_physbase + wq 289 drivers/infiniband/hw/cxgb3/cxio_hal.c (wq->qpid << rdev_p->qpshift); wq 290 drivers/infiniband/hw/cxgb3/cxio_hal.c wq->rdev = rdev_p; wq 292 drivers/infiniband/hw/cxgb3/cxio_hal.c __func__, wq->qpid, wq->doorbell, (unsigned long long)wq->udb); wq 295 drivers/infiniband/hw/cxgb3/cxio_hal.c kfree(wq->sq); wq 297 drivers/infiniband/hw/cxgb3/cxio_hal.c cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, rqsize); wq 299 drivers/infiniband/hw/cxgb3/cxio_hal.c kfree(wq->rq); wq 301 drivers/infiniband/hw/cxgb3/cxio_hal.c put_qpid(rdev_p, wq->qpid, uctx); wq 316 drivers/infiniband/hw/cxgb3/cxio_hal.c int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq, wq 320 drivers/infiniband/hw/cxgb3/cxio_hal.c (1UL << (wq->size_log2)) wq 321 drivers/infiniband/hw/cxgb3/cxio_hal.c * sizeof(union t3_wr), wq->queue, wq 322 drivers/infiniband/hw/cxgb3/cxio_hal.c dma_unmap_addr(wq, mapping)); wq 323 drivers/infiniband/hw/cxgb3/cxio_hal.c kfree(wq->sq); wq 324 drivers/infiniband/hw/cxgb3/cxio_hal.c cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2)); wq 325 drivers/infiniband/hw/cxgb3/cxio_hal.c kfree(wq->rq); wq 326 drivers/infiniband/hw/cxgb3/cxio_hal.c put_qpid(rdev_p, wq->qpid, uctx); wq 330 drivers/infiniband/hw/cxgb3/cxio_hal.c static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq) wq 335 drivers/infiniband/hw/cxgb3/cxio_hal.c wq, cq, cq->sw_rptr, cq->sw_wptr); wq 341 drivers/infiniband/hw/cxgb3/cxio_hal.c V_CQE_QPID(wq->qpid) | wq 348 drivers/infiniband/hw/cxgb3/cxio_hal.c int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count) wq 353 drivers/infiniband/hw/cxgb3/cxio_hal.c pr_debug("%s wq %p cq %p\n", __func__, wq, cq); wq 357 drivers/infiniband/hw/cxgb3/cxio_hal.c wq->rq_rptr, wq->rq_wptr, count); wq 358 drivers/infiniband/hw/cxgb3/cxio_hal.c ptr = wq->rq_rptr + count; wq 359 drivers/infiniband/hw/cxgb3/cxio_hal.c while (ptr++ != wq->rq_wptr) { wq 360 drivers/infiniband/hw/cxgb3/cxio_hal.c insert_recv_cqe(wq, cq); wq 366 drivers/infiniband/hw/cxgb3/cxio_hal.c static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq, wq 372 drivers/infiniband/hw/cxgb3/cxio_hal.c wq, cq, cq->sw_rptr, cq->sw_wptr); wq 378 drivers/infiniband/hw/cxgb3/cxio_hal.c V_CQE_QPID(wq->qpid) | wq 387 drivers/infiniband/hw/cxgb3/cxio_hal.c int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count) wq 389 drivers/infiniband/hw/cxgb3/cxio_hal.c __u32 ptr = wq->sq_rptr + count; wq 391 drivers/infiniband/hw/cxgb3/cxio_hal.c struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); wq 393 drivers/infiniband/hw/cxgb3/cxio_hal.c while (ptr != wq->sq_wptr) { wq 395 drivers/infiniband/hw/cxgb3/cxio_hal.c insert_sq_cqe(wq, cq, sqp); wq 397 drivers/infiniband/hw/cxgb3/cxio_hal.c sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); wq 424 drivers/infiniband/hw/cxgb3/cxio_hal.c static int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq) wq 436 drivers/infiniband/hw/cxgb3/cxio_hal.c Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) wq 442 drivers/infiniband/hw/cxgb3/cxio_hal.c void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count) wq 452 drivers/infiniband/hw/cxgb3/cxio_hal.c ((CQE_OPCODE(*cqe) == T3_READ_RESP) && wq->oldest_read)) && wq 453 drivers/infiniband/hw/cxgb3/cxio_hal.c (CQE_QPID(*cqe) == wq->qpid)) wq 460 drivers/infiniband/hw/cxgb3/cxio_hal.c void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count) wq 471 drivers/infiniband/hw/cxgb3/cxio_hal.c (CQE_QPID(*cqe) == wq->qpid) && cqe_completes_wr(cqe, wq)) wq 1042 drivers/infiniband/hw/cxgb3/cxio_hal.c static void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq) wq 1045 drivers/infiniband/hw/cxgb3/cxio_hal.c __u32 ptr = wq->sq_rptr; wq 1046 drivers/infiniband/hw/cxgb3/cxio_hal.c int count = Q_COUNT(wq->sq_rptr, wq->sq_wptr); wq 1048 drivers/infiniband/hw/cxgb3/cxio_hal.c sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); wq 1052 drivers/infiniband/hw/cxgb3/cxio_hal.c sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); wq 1059 drivers/infiniband/hw/cxgb3/cxio_hal.c __func__, Q_PTR2IDX(ptr, wq->sq_size_log2), wq 1071 drivers/infiniband/hw/cxgb3/cxio_hal.c static void create_read_req_cqe(struct t3_wq *wq, struct t3_cqe *hw_cqe, wq 1074 drivers/infiniband/hw/cxgb3/cxio_hal.c read_cqe->u.scqe.wrid_hi = wq->oldest_read->sq_wptr; wq 1075 drivers/infiniband/hw/cxgb3/cxio_hal.c read_cqe->len = wq->oldest_read->read_len; wq 1085 drivers/infiniband/hw/cxgb3/cxio_hal.c static void advance_oldest_read(struct t3_wq *wq) wq 1088 drivers/infiniband/hw/cxgb3/cxio_hal.c u32 rptr = wq->oldest_read - wq->sq + 1; wq 1089 drivers/infiniband/hw/cxgb3/cxio_hal.c u32 wptr = Q_PTR2IDX(wq->sq_wptr, wq->sq_size_log2); wq 1091 drivers/infiniband/hw/cxgb3/cxio_hal.c while (Q_PTR2IDX(rptr, wq->sq_size_log2) != wptr) { wq 1092 drivers/infiniband/hw/cxgb3/cxio_hal.c wq->oldest_read = wq->sq + Q_PTR2IDX(rptr, wq->sq_size_log2); wq 1094 drivers/infiniband/hw/cxgb3/cxio_hal.c if (wq->oldest_read->opcode == T3_READ_REQ) wq 1098 drivers/infiniband/hw/cxgb3/cxio_hal.c wq->oldest_read = NULL; wq 1116 drivers/infiniband/hw/cxgb3/cxio_hal.c int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe, wq 1135 drivers/infiniband/hw/cxgb3/cxio_hal.c if (wq == NULL) { wq 1154 drivers/infiniband/hw/cxgb3/cxio_hal.c if (!wq->oldest_read) { wq 1156 drivers/infiniband/hw/cxgb3/cxio_hal.c wq->error = 1; wq 1165 drivers/infiniband/hw/cxgb3/cxio_hal.c create_read_req_cqe(wq, hw_cqe, &read_cqe); wq 1167 drivers/infiniband/hw/cxgb3/cxio_hal.c advance_oldest_read(wq); wq 1175 drivers/infiniband/hw/cxgb3/cxio_hal.c wq->error = 1; wq 1179 drivers/infiniband/hw/cxgb3/cxio_hal.c if (CQE_STATUS(*hw_cqe) || wq->error) { wq 1180 drivers/infiniband/hw/cxgb3/cxio_hal.c *cqe_flushed = wq->error; wq 1181 drivers/infiniband/hw/cxgb3/cxio_hal.c wq->error = 1; wq 1201 drivers/infiniband/hw/cxgb3/cxio_hal.c Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) { wq 1221 drivers/infiniband/hw/cxgb3/cxio_hal.c if (Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) { wq 1222 drivers/infiniband/hw/cxgb3/cxio_hal.c wq->error = 1; wq 1227 drivers/infiniband/hw/cxgb3/cxio_hal.c if (unlikely((CQE_WRID_MSN(*hw_cqe) != (wq->rq_rptr + 1)))) { wq 1228 drivers/infiniband/hw/cxgb3/cxio_hal.c wq->error = 1; wq 1246 drivers/infiniband/hw/cxgb3/cxio_hal.c if (!SW_CQE(*hw_cqe) && (CQE_WRID_SQ_WPTR(*hw_cqe) != wq->sq_rptr)) { wq 1252 drivers/infiniband/hw/cxgb3/cxio_hal.c wq->sq_size_log2)); wq 1253 drivers/infiniband/hw/cxgb3/cxio_hal.c sqp = wq->sq + wq 1254 drivers/infiniband/hw/cxgb3/cxio_hal.c Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2); wq 1269 drivers/infiniband/hw/cxgb3/cxio_hal.c wq->sq_rptr = CQE_WRID_SQ_WPTR(*hw_cqe); wq 1271 drivers/infiniband/hw/cxgb3/cxio_hal.c Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2)); wq 1272 drivers/infiniband/hw/cxgb3/cxio_hal.c *cookie = wq->sq[Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2)].wr_id; wq 1273 drivers/infiniband/hw/cxgb3/cxio_hal.c wq->sq_rptr++; wq 1276 drivers/infiniband/hw/cxgb3/cxio_hal.c Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)); wq 1277 drivers/infiniband/hw/cxgb3/cxio_hal.c *cookie = wq->rq[Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)].wr_id; wq 1278 drivers/infiniband/hw/cxgb3/cxio_hal.c if (wq->rq[Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)].pbl_addr) wq 1279 drivers/infiniband/hw/cxgb3/cxio_hal.c cxio_hal_pblpool_free(wq->rdev, wq 1280 drivers/infiniband/hw/cxgb3/cxio_hal.c wq->rq[Q_PTR2IDX(wq->rq_rptr, wq 1281 drivers/infiniband/hw/cxgb3/cxio_hal.c wq->rq_size_log2)].pbl_addr, T3_STAG0_PBL_SIZE); wq 1282 drivers/infiniband/hw/cxgb3/cxio_hal.c BUG_ON(Q_EMPTY(wq->rq_rptr, wq->rq_wptr)); wq 1283 drivers/infiniband/hw/cxgb3/cxio_hal.c wq->rq_rptr++; wq 1290 drivers/infiniband/hw/cxgb3/cxio_hal.c flush_completed_wrs(wq, cq); wq 164 drivers/infiniband/hw/cxgb3/cxio_hal.h int cxio_create_qp(struct cxio_rdev *rdev, u32 kernel_domain, struct t3_wq *wq, wq 166 drivers/infiniband/hw/cxgb3/cxio_hal.h int cxio_destroy_qp(struct cxio_rdev *rdev, struct t3_wq *wq, wq 189 drivers/infiniband/hw/cxgb3/cxio_hal.h int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count); wq 190 drivers/infiniband/hw/cxgb3/cxio_hal.h int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count); wq 191 drivers/infiniband/hw/cxgb3/cxio_hal.h void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count); wq 192 drivers/infiniband/hw/cxgb3/cxio_hal.h void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count); wq 194 drivers/infiniband/hw/cxgb3/cxio_hal.h int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe, wq 747 drivers/infiniband/hw/cxgb3/cxio_wr.h static inline void cxio_set_wq_in_error(struct t3_wq *wq) wq 749 drivers/infiniband/hw/cxgb3/cxio_wr.h wq->queue->wq_in_err.err |= 1; wq 752 drivers/infiniband/hw/cxgb3/cxio_wr.h static inline void cxio_disable_wq_db(struct t3_wq *wq) wq 754 drivers/infiniband/hw/cxgb3/cxio_wr.h wq->queue->wq_in_err.err |= 2; wq 757 drivers/infiniband/hw/cxgb3/cxio_wr.h static inline void cxio_enable_wq_db(struct t3_wq *wq) wq 759 drivers/infiniband/hw/cxgb3/cxio_wr.h wq->queue->wq_in_err.err &= ~2; wq 762 drivers/infiniband/hw/cxgb3/cxio_wr.h static inline int cxio_wq_db_enabled(struct t3_wq *wq) wq 764 drivers/infiniband/hw/cxgb3/cxio_wr.h return !(wq->queue->wq_in_err.err & 2); wq 72 drivers/infiniband/hw/cxgb3/iwch.c cxio_disable_wq_db(&qhp->wq); wq 85 drivers/infiniband/hw/cxgb3/iwch.c qhp->wq.qpid); wq 86 drivers/infiniband/hw/cxgb3/iwch.c cxio_enable_wq_db(&qhp->wq); wq 38 drivers/infiniband/hw/cxgb3/iwch_cq.c struct t3_wq *wq = qhp ? &qhp->wq : NULL; wq 45 drivers/infiniband/hw/cxgb3/iwch_cq.c ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, wq 66 drivers/infiniband/hw/cxgb3/iwch_ev.c qhp->attr.state, qhp->wq.qpid, wq 141 drivers/infiniband/hw/cxgb3/iwch_ev.c __func__, qhp->wq.qpid, qhp->ep); wq 145 drivers/infiniband/hw/cxgb3/iwch_ev.c qhp->wq.qpid); wq 222 drivers/infiniband/hw/cxgb3/iwch_ev.c CQE_STATUS(rsp_msg->cqe), qhp->wq.qpid); wq 679 drivers/infiniband/hw/cxgb3/iwch_provider.c xa_erase_irq(&rhp->qps, qhp->wq.qpid); wq 686 drivers/infiniband/hw/cxgb3/iwch_provider.c cxio_destroy_qp(&rhp->rdev, &qhp->wq, wq 690 drivers/infiniband/hw/cxgb3/iwch_provider.c ib_qp, qhp->wq.qpid, qhp); wq 755 drivers/infiniband/hw/cxgb3/iwch_provider.c qhp->wq.size_log2 = ilog2(wqsize); wq 756 drivers/infiniband/hw/cxgb3/iwch_provider.c qhp->wq.rq_size_log2 = ilog2(rqsize); wq 757 drivers/infiniband/hw/cxgb3/iwch_provider.c qhp->wq.sq_size_log2 = ilog2(sqsize); wq 758 drivers/infiniband/hw/cxgb3/iwch_provider.c if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq, wq 795 drivers/infiniband/hw/cxgb3/iwch_provider.c if (xa_store_irq(&rhp->qps, qhp->wq.qpid, qhp, GFP_KERNEL)) { wq 796 drivers/infiniband/hw/cxgb3/iwch_provider.c cxio_destroy_qp(&rhp->rdev, &qhp->wq, wq 819 drivers/infiniband/hw/cxgb3/iwch_provider.c uresp.qpid = qhp->wq.qpid; wq 820 drivers/infiniband/hw/cxgb3/iwch_provider.c uresp.size_log2 = qhp->wq.size_log2; wq 821 drivers/infiniband/hw/cxgb3/iwch_provider.c uresp.sq_size_log2 = qhp->wq.sq_size_log2; wq 822 drivers/infiniband/hw/cxgb3/iwch_provider.c uresp.rq_size_log2 = qhp->wq.rq_size_log2; wq 836 drivers/infiniband/hw/cxgb3/iwch_provider.c mm1->addr = virt_to_phys(qhp->wq.queue); wq 840 drivers/infiniband/hw/cxgb3/iwch_provider.c mm2->addr = qhp->wq.udb & PAGE_MASK; wq 844 drivers/infiniband/hw/cxgb3/iwch_provider.c qhp->ibqp.qp_num = qhp->wq.qpid; wq 848 drivers/infiniband/hw/cxgb3/iwch_provider.c qhp->wq.qpid, qhp, &qhp->wq.dma_addr, 1 << qhp->wq.size_log2, wq 849 drivers/infiniband/hw/cxgb3/iwch_provider.c qhp->wq.rq_addr); wq 166 drivers/infiniband/hw/cxgb3/iwch_provider.h struct t3_wq wq; wq 150 drivers/infiniband/hw/cxgb3/iwch_qp.c u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq) wq 175 drivers/infiniband/hw/cxgb3/iwch_qp.c wqe = (union t3_wr *)(wq->queue + wq 176 drivers/infiniband/hw/cxgb3/iwch_qp.c Q_PTR2IDX((wq->wptr+1), wq->size_log2)); wq 178 drivers/infiniband/hw/cxgb3/iwch_qp.c Q_GENBIT(wq->wptr + 1, wq->size_log2), wq 281 drivers/infiniband/hw/cxgb3/iwch_qp.c qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr, wq 282 drivers/infiniband/hw/cxgb3/iwch_qp.c qhp->wq.rq_size_log2)].wr_id = wr->wr_id; wq 283 drivers/infiniband/hw/cxgb3/iwch_qp.c qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr, wq 284 drivers/infiniband/hw/cxgb3/iwch_qp.c qhp->wq.rq_size_log2)].pbl_addr = 0; wq 344 drivers/infiniband/hw/cxgb3/iwch_qp.c qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr, wq 345 drivers/infiniband/hw/cxgb3/iwch_qp.c qhp->wq.rq_size_log2)].wr_id = wr->wr_id; wq 346 drivers/infiniband/hw/cxgb3/iwch_qp.c qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr, wq 347 drivers/infiniband/hw/cxgb3/iwch_qp.c qhp->wq.rq_size_log2)].pbl_addr = pbl_addr; wq 373 drivers/infiniband/hw/cxgb3/iwch_qp.c num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr, wq 374 drivers/infiniband/hw/cxgb3/iwch_qp.c qhp->wq.sq_size_log2); wq 385 drivers/infiniband/hw/cxgb3/iwch_qp.c idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2); wq 386 drivers/infiniband/hw/cxgb3/iwch_qp.c wqe = (union t3_wr *) (qhp->wq.queue + idx); wq 392 drivers/infiniband/hw/cxgb3/iwch_qp.c sqp = qhp->wq.sq + wq 393 drivers/infiniband/hw/cxgb3/iwch_qp.c Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2); wq 415 drivers/infiniband/hw/cxgb3/iwch_qp.c if (!qhp->wq.oldest_read) wq 416 drivers/infiniband/hw/cxgb3/iwch_qp.c qhp->wq.oldest_read = sqp; wq 421 drivers/infiniband/hw/cxgb3/iwch_qp.c &wr_cnt, &qhp->wq); wq 436 drivers/infiniband/hw/cxgb3/iwch_qp.c wqe->send.wrid.id0.hi = qhp->wq.sq_wptr; wq 439 drivers/infiniband/hw/cxgb3/iwch_qp.c sqp->sq_wptr = qhp->wq.sq_wptr; wq 444 drivers/infiniband/hw/cxgb3/iwch_qp.c Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), wq 449 drivers/infiniband/hw/cxgb3/iwch_qp.c Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2), wq 453 drivers/infiniband/hw/cxgb3/iwch_qp.c qhp->wq.wptr += wr_cnt; wq 454 drivers/infiniband/hw/cxgb3/iwch_qp.c ++(qhp->wq.sq_wptr); wq 457 drivers/infiniband/hw/cxgb3/iwch_qp.c if (cxio_wq_db_enabled(&qhp->wq)) wq 458 drivers/infiniband/hw/cxgb3/iwch_qp.c ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); wq 483 drivers/infiniband/hw/cxgb3/iwch_qp.c num_wrs = Q_FREECNT(qhp->wq.rq_rptr, qhp->wq.rq_wptr, wq 484 drivers/infiniband/hw/cxgb3/iwch_qp.c qhp->wq.rq_size_log2) - 1; wq 495 drivers/infiniband/hw/cxgb3/iwch_qp.c idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2); wq 496 drivers/infiniband/hw/cxgb3/iwch_qp.c wqe = (union t3_wr *) (qhp->wq.queue + idx); wq 509 drivers/infiniband/hw/cxgb3/iwch_qp.c Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), wq 513 drivers/infiniband/hw/cxgb3/iwch_qp.c idx, qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe); wq 514 drivers/infiniband/hw/cxgb3/iwch_qp.c ++(qhp->wq.rq_wptr); wq 515 drivers/infiniband/hw/cxgb3/iwch_qp.c ++(qhp->wq.wptr); wq 520 drivers/infiniband/hw/cxgb3/iwch_qp.c if (cxio_wq_db_enabled(&qhp->wq)) wq 521 drivers/infiniband/hw/cxgb3/iwch_qp.c ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); wq 742 drivers/infiniband/hw/cxgb3/iwch_qp.c cxio_count_rcqes(&rchp->cq, &qhp->wq, &count); wq 743 drivers/infiniband/hw/cxgb3/iwch_qp.c flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count); wq 756 drivers/infiniband/hw/cxgb3/iwch_qp.c cxio_count_scqes(&schp->cq, &qhp->wq, &count); wq 757 drivers/infiniband/hw/cxgb3/iwch_qp.c flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count); wq 781 drivers/infiniband/hw/cxgb3/iwch_qp.c cxio_set_wq_in_error(&qhp->wq); wq 804 drivers/infiniband/hw/cxgb3/iwch_qp.c union t3_wr *wqe = qhp->wq.queue; wq 823 drivers/infiniband/hw/cxgb3/iwch_qp.c init_attr.qpid = qhp->wq.qpid; wq 827 drivers/infiniband/hw/cxgb3/iwch_qp.c init_attr.rq_addr = qhp->wq.rq_addr; wq 828 drivers/infiniband/hw/cxgb3/iwch_qp.c init_attr.rq_size = 1 << qhp->wq.rq_size_log2; wq 844 drivers/infiniband/hw/cxgb3/iwch_qp.c init_attr.qp_dma_addr = qhp->wq.dma_addr; wq 845 drivers/infiniband/hw/cxgb3/iwch_qp.c init_attr.qp_dma_size = (1UL << qhp->wq.size_log2); wq 882 drivers/infiniband/hw/cxgb3/iwch_qp.c qhp, qhp->wq.qpid, qhp->ep, qhp->attr.state, wq 977 drivers/infiniband/hw/cxgb3/iwch_qp.c cxio_set_wq_in_error(&qhp->wq); wq 1023 drivers/infiniband/hw/cxgb3/iwch_qp.c if (!Q_EMPTY(qhp->wq.sq_rptr, qhp->wq.sq_wptr) || wq 1024 drivers/infiniband/hw/cxgb3/iwch_qp.c !Q_EMPTY(qhp->wq.rq_rptr, qhp->wq.rq_wptr)) { wq 1046 drivers/infiniband/hw/cxgb3/iwch_qp.c qhp->wq.qpid); wq 1879 drivers/infiniband/hw/cxgb4/cm.c __func__, ep->com.qp->wq.sq.qid, ep, wq 1911 drivers/infiniband/hw/cxgb4/cm.c t4_set_wq_in_error(&ep->com.qp->wq, srqidx); wq 3032 drivers/infiniband/hw/cxgb4/cm.c ep->com.qp->wq.sq.qid); wq 184 drivers/infiniband/hw/cxgb4/cq.c static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq, u32 srqidx) wq 189 drivers/infiniband/hw/cxgb4/cq.c wq, cq, cq->sw_cidx, cq->sw_pidx); wq 195 drivers/infiniband/hw/cxgb4/cq.c CQE_QPID_V(wq->sq.qid)); wq 203 drivers/infiniband/hw/cxgb4/cq.c int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count) wq 206 drivers/infiniband/hw/cxgb4/cq.c int in_use = wq->rq.in_use - count; wq 209 drivers/infiniband/hw/cxgb4/cq.c wq, cq, wq->rq.in_use, count); wq 211 drivers/infiniband/hw/cxgb4/cq.c insert_recv_cqe(wq, cq, 0); wq 217 drivers/infiniband/hw/cxgb4/cq.c static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq, wq 223 drivers/infiniband/hw/cxgb4/cq.c wq, cq, cq->sw_cidx, cq->sw_pidx); wq 229 drivers/infiniband/hw/cxgb4/cq.c CQE_QPID_V(wq->sq.qid)); wq 236 drivers/infiniband/hw/cxgb4/cq.c static void advance_oldest_read(struct t4_wq *wq); wq 241 drivers/infiniband/hw/cxgb4/cq.c struct t4_wq *wq = &qhp->wq; wq 247 drivers/infiniband/hw/cxgb4/cq.c if (wq->sq.flush_cidx == -1) wq 248 drivers/infiniband/hw/cxgb4/cq.c wq->sq.flush_cidx = wq->sq.cidx; wq 249 drivers/infiniband/hw/cxgb4/cq.c idx = wq->sq.flush_cidx; wq 250 drivers/infiniband/hw/cxgb4/cq.c while (idx != wq->sq.pidx) { wq 251 drivers/infiniband/hw/cxgb4/cq.c swsqe = &wq->sq.sw_sq[idx]; wq 253 drivers/infiniband/hw/cxgb4/cq.c insert_sq_cqe(wq, cq, swsqe); wq 254 drivers/infiniband/hw/cxgb4/cq.c if (wq->sq.oldest_read == swsqe) { wq 255 drivers/infiniband/hw/cxgb4/cq.c advance_oldest_read(wq); wq 258 drivers/infiniband/hw/cxgb4/cq.c if (++idx == wq->sq.size) wq 261 drivers/infiniband/hw/cxgb4/cq.c wq->sq.flush_cidx += flushed; wq 262 drivers/infiniband/hw/cxgb4/cq.c if (wq->sq.flush_cidx >= wq->sq.size) wq 263 drivers/infiniband/hw/cxgb4/cq.c wq->sq.flush_cidx -= wq->sq.size; wq 267 drivers/infiniband/hw/cxgb4/cq.c static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq) wq 272 drivers/infiniband/hw/cxgb4/cq.c if (wq->sq.flush_cidx == -1) wq 273 drivers/infiniband/hw/cxgb4/cq.c wq->sq.flush_cidx = wq->sq.cidx; wq 274 drivers/infiniband/hw/cxgb4/cq.c cidx = wq->sq.flush_cidx; wq 276 drivers/infiniband/hw/cxgb4/cq.c while (cidx != wq->sq.pidx) { wq 277 drivers/infiniband/hw/cxgb4/cq.c swsqe = &wq->sq.sw_sq[cidx]; wq 279 drivers/infiniband/hw/cxgb4/cq.c if (++cidx == wq->sq.size) wq 292 drivers/infiniband/hw/cxgb4/cq.c if (++cidx == wq->sq.size) wq 294 drivers/infiniband/hw/cxgb4/cq.c wq->sq.flush_cidx = cidx; wq 300 drivers/infiniband/hw/cxgb4/cq.c static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe, wq 303 drivers/infiniband/hw/cxgb4/cq.c read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx; wq 304 drivers/infiniband/hw/cxgb4/cq.c read_cqe->len = htonl(wq->sq.oldest_read->read_len); wq 312 drivers/infiniband/hw/cxgb4/cq.c static void advance_oldest_read(struct t4_wq *wq) wq 315 drivers/infiniband/hw/cxgb4/cq.c u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1; wq 317 drivers/infiniband/hw/cxgb4/cq.c if (rptr == wq->sq.size) wq 319 drivers/infiniband/hw/cxgb4/cq.c while (rptr != wq->sq.pidx) { wq 320 drivers/infiniband/hw/cxgb4/cq.c wq->sq.oldest_read = &wq->sq.sw_sq[rptr]; wq 322 drivers/infiniband/hw/cxgb4/cq.c if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ) wq 324 drivers/infiniband/hw/cxgb4/cq.c if (++rptr == wq->sq.size) wq 327 drivers/infiniband/hw/cxgb4/cq.c wq->sq.oldest_read = NULL; wq 362 drivers/infiniband/hw/cxgb4/cq.c if (qhp->wq.flushed == 1) wq 386 drivers/infiniband/hw/cxgb4/cq.c if (!qhp->wq.sq.oldest_read->signaled) { wq 387 drivers/infiniband/hw/cxgb4/cq.c advance_oldest_read(&qhp->wq); wq 395 drivers/infiniband/hw/cxgb4/cq.c create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe); wq 397 drivers/infiniband/hw/cxgb4/cq.c advance_oldest_read(&qhp->wq); wq 404 drivers/infiniband/hw/cxgb4/cq.c swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)]; wq 407 drivers/infiniband/hw/cxgb4/cq.c flush_completed_wrs(&qhp->wq, &chp->cq); wq 422 drivers/infiniband/hw/cxgb4/cq.c static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) wq 425 drivers/infiniband/hw/cxgb4/cq.c WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid); wq 438 drivers/infiniband/hw/cxgb4/cq.c if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq)) wq 443 drivers/infiniband/hw/cxgb4/cq.c void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count) wq 454 drivers/infiniband/hw/cxgb4/cq.c (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq)) wq 544 drivers/infiniband/hw/cxgb4/cq.c static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, wq 566 drivers/infiniband/hw/cxgb4/cq.c if (wq == NULL) { wq 574 drivers/infiniband/hw/cxgb4/cq.c if (wq->flushed && !SW_CQE(hw_cqe)) { wq 611 drivers/infiniband/hw/cxgb4/cq.c t4_set_wq_in_error(wq, 0); wq 622 drivers/infiniband/hw/cxgb4/cq.c t4_set_wq_in_error(wq, 0); wq 630 drivers/infiniband/hw/cxgb4/cq.c if (!wq->sq.oldest_read->signaled) { wq 631 drivers/infiniband/hw/cxgb4/cq.c advance_oldest_read(wq); wq 640 drivers/infiniband/hw/cxgb4/cq.c create_read_req_cqe(wq, hw_cqe, &read_cqe); wq 642 drivers/infiniband/hw/cxgb4/cq.c advance_oldest_read(wq); wq 645 drivers/infiniband/hw/cxgb4/cq.c if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) { wq 647 drivers/infiniband/hw/cxgb4/cq.c t4_set_wq_in_error(wq, 0); wq 662 drivers/infiniband/hw/cxgb4/cq.c CQE_WRID_MSN(hw_cqe) != wq->rq.msn)) { wq 663 drivers/infiniband/hw/cxgb4/cq.c t4_set_wq_in_error(wq, 0); wq 680 drivers/infiniband/hw/cxgb4/cq.c if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) { wq 685 drivers/infiniband/hw/cxgb4/cq.c swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)]; wq 710 drivers/infiniband/hw/cxgb4/cq.c if (idx < wq->sq.cidx) wq 711 drivers/infiniband/hw/cxgb4/cq.c wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx; wq 713 drivers/infiniband/hw/cxgb4/cq.c wq->sq.in_use -= idx - wq->sq.cidx; wq 715 drivers/infiniband/hw/cxgb4/cq.c wq->sq.cidx = (uint16_t)idx; wq 716 drivers/infiniband/hw/cxgb4/cq.c pr_debug("completing sq idx %u\n", wq->sq.cidx); wq 717 drivers/infiniband/hw/cxgb4/cq.c *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id; wq 719 drivers/infiniband/hw/cxgb4/cq.c c4iw_log_wr_stats(wq, hw_cqe); wq 720 drivers/infiniband/hw/cxgb4/cq.c t4_sq_consume(wq); wq 723 drivers/infiniband/hw/cxgb4/cq.c pr_debug("completing rq idx %u\n", wq->rq.cidx); wq 724 drivers/infiniband/hw/cxgb4/cq.c *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id; wq 726 drivers/infiniband/hw/cxgb4/cq.c c4iw_log_wr_stats(wq, hw_cqe); wq 727 drivers/infiniband/hw/cxgb4/cq.c t4_rq_consume(wq); wq 731 drivers/infiniband/hw/cxgb4/cq.c wq->rq.msn++; wq 739 drivers/infiniband/hw/cxgb4/cq.c flush_completed_wrs(wq, cq); wq 758 drivers/infiniband/hw/cxgb4/cq.c struct t4_wq *wq = qhp ? &qhp->wq : NULL; wq 764 drivers/infiniband/hw/cxgb4/cq.c ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit, wq 765 drivers/infiniband/hw/cxgb4/cq.c srq ? &srq->wq : NULL); wq 778 drivers/infiniband/hw/cxgb4/cq.c srq->wq.in_use < srq->srq_limit) wq 1176 drivers/infiniband/hw/cxgb4/cq.c insert_recv_cqe(&qhp->wq, &rchp->cq, srqidx); wq 92 drivers/infiniband/hw/cxgb4/device.c void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe) wq 97 drivers/infiniband/hw/cxgb4/device.c if (!wq->rdev->wr_log) wq 100 drivers/infiniband/hw/cxgb4/device.c idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) & wq 101 drivers/infiniband/hw/cxgb4/device.c (wq->rdev->wr_log_size - 1); wq 102 drivers/infiniband/hw/cxgb4/device.c le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]); wq 107 drivers/infiniband/hw/cxgb4/device.c le.qid = wq->sq.qid; wq 109 drivers/infiniband/hw/cxgb4/device.c le.post_host_time = wq->sq.sw_sq[wq->sq.cidx].host_time; wq 110 drivers/infiniband/hw/cxgb4/device.c le.post_sge_ts = wq->sq.sw_sq[wq->sq.cidx].sge_ts; wq 113 drivers/infiniband/hw/cxgb4/device.c le.qid = wq->rq.qid; wq 115 drivers/infiniband/hw/cxgb4/device.c le.post_host_time = wq->rq.sw_rq[wq->rq.cidx].host_time; wq 116 drivers/infiniband/hw/cxgb4/device.c le.post_sge_ts = wq->rq.sw_rq[wq->rq.cidx].sge_ts; wq 119 drivers/infiniband/hw/cxgb4/device.c wq->rdev->wr_log[idx] = le; wq 250 drivers/infiniband/hw/cxgb4/device.c if (id != qp->wq.sq.qid) wq 271 drivers/infiniband/hw/cxgb4/device.c qp->wq.sq.qid, qp->srq ? "srq" : "rq", wq 272 drivers/infiniband/hw/cxgb4/device.c qp->srq ? qp->srq->idx : qp->wq.rq.qid, wq 274 drivers/infiniband/hw/cxgb4/device.c qp->wq.sq.flags & T4_SQ_ONCHIP, wq 292 drivers/infiniband/hw/cxgb4/device.c qp->wq.sq.qid, qp->wq.rq.qid, wq 294 drivers/infiniband/hw/cxgb4/device.c qp->wq.sq.flags & T4_SQ_ONCHIP, wq 306 drivers/infiniband/hw/cxgb4/device.c qp->wq.sq.qid, qp->wq.rq.qid, wq 308 drivers/infiniband/hw/cxgb4/device.c qp->wq.sq.flags & T4_SQ_ONCHIP); wq 1277 drivers/infiniband/hw/cxgb4/device.c t4_disable_wq_db(&qp->wq); wq 1287 drivers/infiniband/hw/cxgb4/device.c t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc, NULL); wq 1288 drivers/infiniband/hw/cxgb4/device.c qp->wq.sq.wq_pidx_inc = 0; wq 1289 drivers/infiniband/hw/cxgb4/device.c t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc, NULL); wq 1290 drivers/infiniband/hw/cxgb4/device.c qp->wq.rq.wq_pidx_inc = 0; wq 1325 drivers/infiniband/hw/cxgb4/device.c t4_enable_wq_db(&qp->wq); wq 1378 drivers/infiniband/hw/cxgb4/device.c qp->wq.sq.qid, wq 1379 drivers/infiniband/hw/cxgb4/device.c t4_sq_host_wq_pidx(&qp->wq), wq 1380 drivers/infiniband/hw/cxgb4/device.c t4_sq_wq_size(&qp->wq)); wq 1383 drivers/infiniband/hw/cxgb4/device.c pci_name(ctx->lldi.pdev), qp->wq.sq.qid); wq 1388 drivers/infiniband/hw/cxgb4/device.c qp->wq.sq.wq_pidx_inc = 0; wq 1391 drivers/infiniband/hw/cxgb4/device.c qp->wq.rq.qid, wq 1392 drivers/infiniband/hw/cxgb4/device.c t4_rq_host_wq_pidx(&qp->wq), wq 1393 drivers/infiniband/hw/cxgb4/device.c t4_rq_wq_size(&qp->wq)); wq 1397 drivers/infiniband/hw/cxgb4/device.c pci_name(ctx->lldi.pdev), qp->wq.rq.qid); wq 1402 drivers/infiniband/hw/cxgb4/device.c qp->wq.rq.wq_pidx_inc = 0; wq 211 drivers/infiniband/hw/cxgb4/ev.c CQE_STATUS(err_cqe), qhp->wq.sq.qid); wq 490 drivers/infiniband/hw/cxgb4/iw_cxgb4.h struct t4_wq wq; wq 511 drivers/infiniband/hw/cxgb4/iw_cxgb4.h struct t4_srq wq; wq 1021 drivers/infiniband/hw/cxgb4/iw_cxgb4.h void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count); wq 1023 drivers/infiniband/hw/cxgb4/iw_cxgb4.h int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count); wq 1043 drivers/infiniband/hw/cxgb4/iw_cxgb4.h extern void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe); wq 150 drivers/infiniband/hw/cxgb4/qp.c static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, wq 157 drivers/infiniband/hw/cxgb4/qp.c dealloc_sq(rdev, &wq->sq); wq 158 drivers/infiniband/hw/cxgb4/qp.c kfree(wq->sq.sw_sq); wq 159 drivers/infiniband/hw/cxgb4/qp.c c4iw_put_qpid(rdev, wq->sq.qid, uctx); wq 163 drivers/infiniband/hw/cxgb4/qp.c wq->rq.memsize, wq->rq.queue, wq 164 drivers/infiniband/hw/cxgb4/qp.c dma_unmap_addr(&wq->rq, mapping)); wq 165 drivers/infiniband/hw/cxgb4/qp.c c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); wq 166 drivers/infiniband/hw/cxgb4/qp.c kfree(wq->rq.sw_rq); wq 167 drivers/infiniband/hw/cxgb4/qp.c c4iw_put_qpid(rdev, wq->rq.qid, uctx); wq 199 drivers/infiniband/hw/cxgb4/qp.c static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, wq 213 drivers/infiniband/hw/cxgb4/qp.c wq->sq.qid = c4iw_get_qpid(rdev, uctx); wq 214 drivers/infiniband/hw/cxgb4/qp.c if (!wq->sq.qid) wq 218 drivers/infiniband/hw/cxgb4/qp.c wq->rq.qid = c4iw_get_qpid(rdev, uctx); wq 219 drivers/infiniband/hw/cxgb4/qp.c if (!wq->rq.qid) { wq 226 drivers/infiniband/hw/cxgb4/qp.c wq->sq.sw_sq = kcalloc(wq->sq.size, sizeof(*wq->sq.sw_sq), wq 228 drivers/infiniband/hw/cxgb4/qp.c if (!wq->sq.sw_sq) { wq 234 drivers/infiniband/hw/cxgb4/qp.c wq->rq.sw_rq = kcalloc(wq->rq.size, wq 235 drivers/infiniband/hw/cxgb4/qp.c sizeof(*wq->rq.sw_rq), wq 237 drivers/infiniband/hw/cxgb4/qp.c if (!wq->rq.sw_rq) { wq 248 drivers/infiniband/hw/cxgb4/qp.c wq->rq.rqt_size = wq 249 drivers/infiniband/hw/cxgb4/qp.c roundup_pow_of_two(max_t(u16, wq->rq.size, 16)); wq 250 drivers/infiniband/hw/cxgb4/qp.c wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size); wq 251 drivers/infiniband/hw/cxgb4/qp.c if (!wq->rq.rqt_hwaddr) { wq 257 drivers/infiniband/hw/cxgb4/qp.c ret = alloc_sq(rdev, &wq->sq, user); wq 260 drivers/infiniband/hw/cxgb4/qp.c memset(wq->sq.queue, 0, wq->sq.memsize); wq 261 drivers/infiniband/hw/cxgb4/qp.c dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); wq 264 drivers/infiniband/hw/cxgb4/qp.c wq->rq.queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, wq 265 drivers/infiniband/hw/cxgb4/qp.c wq->rq.memsize, wq 266 drivers/infiniband/hw/cxgb4/qp.c &wq->rq.dma_addr, wq 268 drivers/infiniband/hw/cxgb4/qp.c if (!wq->rq.queue) { wq 273 drivers/infiniband/hw/cxgb4/qp.c wq->sq.queue, wq 274 drivers/infiniband/hw/cxgb4/qp.c (unsigned long long)virt_to_phys(wq->sq.queue), wq 275 drivers/infiniband/hw/cxgb4/qp.c wq->rq.queue, wq 276 drivers/infiniband/hw/cxgb4/qp.c (unsigned long long)virt_to_phys(wq->rq.queue)); wq 277 drivers/infiniband/hw/cxgb4/qp.c dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr); wq 280 drivers/infiniband/hw/cxgb4/qp.c wq->db = rdev->lldi.db_reg; wq 282 drivers/infiniband/hw/cxgb4/qp.c wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid, wq 284 drivers/infiniband/hw/cxgb4/qp.c &wq->sq.bar2_qid, wq 285 drivers/infiniband/hw/cxgb4/qp.c user ? &wq->sq.bar2_pa : NULL); wq 287 drivers/infiniband/hw/cxgb4/qp.c wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid, wq 289 drivers/infiniband/hw/cxgb4/qp.c &wq->rq.bar2_qid, wq 290 drivers/infiniband/hw/cxgb4/qp.c user ? &wq->rq.bar2_pa : NULL); wq 295 drivers/infiniband/hw/cxgb4/qp.c if (user && (!wq->sq.bar2_pa || (need_rq && !wq->rq.bar2_pa))) { wq 297 drivers/infiniband/hw/cxgb4/qp.c pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid); wq 301 drivers/infiniband/hw/cxgb4/qp.c wq->rdev = rdev; wq 302 drivers/infiniband/hw/cxgb4/qp.c wq->rq.msn = 1; wq 329 drivers/infiniband/hw/cxgb4/qp.c eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + wq 336 drivers/infiniband/hw/cxgb4/qp.c (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_ONCHIP_F : 0) | wq 342 drivers/infiniband/hw/cxgb4/qp.c (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_FBMAX_V(2) : wq 347 drivers/infiniband/hw/cxgb4/qp.c res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid); wq 348 drivers/infiniband/hw/cxgb4/qp.c res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr); wq 358 drivers/infiniband/hw/cxgb4/qp.c eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + wq 376 drivers/infiniband/hw/cxgb4/qp.c res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid); wq 377 drivers/infiniband/hw/cxgb4/qp.c res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr); wq 381 drivers/infiniband/hw/cxgb4/qp.c ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->sq.qid, __func__); wq 386 drivers/infiniband/hw/cxgb4/qp.c wq->sq.qid, wq->rq.qid, wq->db, wq 387 drivers/infiniband/hw/cxgb4/qp.c wq->sq.bar2_va, wq->rq.bar2_va); wq 393 drivers/infiniband/hw/cxgb4/qp.c wq->rq.memsize, wq->rq.queue, wq 394 drivers/infiniband/hw/cxgb4/qp.c dma_unmap_addr(&wq->rq, mapping)); wq 396 drivers/infiniband/hw/cxgb4/qp.c dealloc_sq(rdev, &wq->sq); wq 399 drivers/infiniband/hw/cxgb4/qp.c c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); wq 402 drivers/infiniband/hw/cxgb4/qp.c kfree(wq->rq.sw_rq); wq 404 drivers/infiniband/hw/cxgb4/qp.c kfree(wq->sq.sw_sq); wq 407 drivers/infiniband/hw/cxgb4/qp.c c4iw_put_qpid(rdev, wq->rq.qid, uctx); wq 409 drivers/infiniband/hw/cxgb4/qp.c c4iw_put_qpid(rdev, wq->sq.qid, uctx); wq 705 drivers/infiniband/hw/cxgb4/qp.c wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue + wq 706 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE); wq 707 drivers/infiniband/hw/cxgb4/qp.c build_rdma_write_cmpl(&qhp->wq.sq, &wqe->write_cmpl, wr, &len16); wq 710 drivers/infiniband/hw/cxgb4/qp.c swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; wq 712 drivers/infiniband/hw/cxgb4/qp.c swsqe->idx = qhp->wq.sq.pidx; wq 723 drivers/infiniband/hw/cxgb4/qp.c write_wrid = qhp->wq.sq.pidx; wq 726 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.sq.in_use++; wq 727 drivers/infiniband/hw/cxgb4/qp.c if (++qhp->wq.sq.pidx == qhp->wq.sq.size) wq 728 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.sq.pidx = 0; wq 731 drivers/infiniband/hw/cxgb4/qp.c swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; wq 736 drivers/infiniband/hw/cxgb4/qp.c swsqe->idx = qhp->wq.sq.pidx; wq 748 drivers/infiniband/hw/cxgb4/qp.c wqe->write_cmpl.wrid_send = qhp->wq.sq.pidx; wq 752 drivers/infiniband/hw/cxgb4/qp.c t4_sq_produce(&qhp->wq, len16); wq 755 drivers/infiniband/hw/cxgb4/qp.c t4_ring_sq_db(&qhp->wq, idx, wqe); wq 763 drivers/infiniband/hw/cxgb4/qp.c ret = build_isgl((__be64 *)qhp->wq.rq.queue, wq 764 drivers/infiniband/hw/cxgb4/qp.c (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size], wq 918 drivers/infiniband/hw/cxgb4/qp.c t4_ring_sq_db(&qhp->wq, inc, NULL); wq 921 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.sq.wq_pidx_inc += inc; wq 935 drivers/infiniband/hw/cxgb4/qp.c t4_ring_rq_db(&qhp->wq, inc, NULL); wq 938 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.rq.wq_pidx_inc += inc; wq 1000 drivers/infiniband/hw/cxgb4/qp.c CQE_QPID_V(qhp->wq.sq.qid)); wq 1051 drivers/infiniband/hw/cxgb4/qp.c CQE_QPID_V(qhp->wq.sq.qid)); wq 1099 drivers/infiniband/hw/cxgb4/qp.c if (qhp->wq.flushed) { wq 1104 drivers/infiniband/hw/cxgb4/qp.c num_wrs = t4_sq_avail(&qhp->wq); wq 1141 drivers/infiniband/hw/cxgb4/qp.c wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue + wq 1142 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE); wq 1149 drivers/infiniband/hw/cxgb4/qp.c swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; wq 1160 drivers/infiniband/hw/cxgb4/qp.c err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16); wq 1172 drivers/infiniband/hw/cxgb4/qp.c err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16); wq 1188 drivers/infiniband/hw/cxgb4/qp.c if (!qhp->wq.sq.oldest_read) wq 1189 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.sq.oldest_read = swsqe; wq 1202 drivers/infiniband/hw/cxgb4/qp.c err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr), wq 1228 drivers/infiniband/hw/cxgb4/qp.c swsqe->idx = qhp->wq.sq.pidx; wq 1240 drivers/infiniband/hw/cxgb4/qp.c init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16); wq 1243 drivers/infiniband/hw/cxgb4/qp.c (unsigned long long)wr->wr_id, qhp->wq.sq.pidx, wq 1247 drivers/infiniband/hw/cxgb4/qp.c t4_sq_produce(&qhp->wq, len16); wq 1251 drivers/infiniband/hw/cxgb4/qp.c t4_ring_sq_db(&qhp->wq, idx, wqe); wq 1278 drivers/infiniband/hw/cxgb4/qp.c if (qhp->wq.flushed) { wq 1283 drivers/infiniband/hw/cxgb4/qp.c num_wrs = t4_rq_avail(&qhp->wq); wq 1295 drivers/infiniband/hw/cxgb4/qp.c wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue + wq 1296 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.rq.wq_pidx * wq 1307 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id; wq 1309 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts = wq 1312 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_time = wq 1318 drivers/infiniband/hw/cxgb4/qp.c wqe->recv.wrid = qhp->wq.rq.pidx; wq 1324 drivers/infiniband/hw/cxgb4/qp.c (unsigned long long)wr->wr_id, qhp->wq.rq.pidx); wq 1325 drivers/infiniband/hw/cxgb4/qp.c t4_rq_produce(&qhp->wq, len16); wq 1331 drivers/infiniband/hw/cxgb4/qp.c t4_ring_rq_db(&qhp->wq, idx, wqe); wq 1369 drivers/infiniband/hw/cxgb4/qp.c num_wrs = t4_srq_avail(&srq->wq); wq 1392 drivers/infiniband/hw/cxgb4/qp.c wqe->recv.wrid = srq->wq.pidx; wq 1398 drivers/infiniband/hw/cxgb4/qp.c if (srq->wq.ooo_count || wq 1399 drivers/infiniband/hw/cxgb4/qp.c srq->wq.pending_in_use || wq 1400 drivers/infiniband/hw/cxgb4/qp.c srq->wq.sw_rq[srq->wq.pidx].valid) { wq 1401 drivers/infiniband/hw/cxgb4/qp.c defer_srq_wr(&srq->wq, wqe, wr->wr_id, len16); wq 1403 drivers/infiniband/hw/cxgb4/qp.c srq->wq.sw_rq[srq->wq.pidx].wr_id = wr->wr_id; wq 1404 drivers/infiniband/hw/cxgb4/qp.c srq->wq.sw_rq[srq->wq.pidx].valid = 1; wq 1405 drivers/infiniband/hw/cxgb4/qp.c c4iw_copy_wr_to_srq(&srq->wq, wqe, len16); wq 1407 drivers/infiniband/hw/cxgb4/qp.c __func__, srq->wq.cidx, wq 1408 drivers/infiniband/hw/cxgb4/qp.c srq->wq.pidx, srq->wq.wq_pidx, wq 1409 drivers/infiniband/hw/cxgb4/qp.c srq->wq.in_use, wq 1411 drivers/infiniband/hw/cxgb4/qp.c t4_srq_produce(&srq->wq, len16); wq 1418 drivers/infiniband/hw/cxgb4/qp.c t4_ring_srq_db(&srq->wq, idx, len16, wqe); wq 1566 drivers/infiniband/hw/cxgb4/qp.c pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid, wq 1610 drivers/infiniband/hw/cxgb4/qp.c if (qhp->wq.flushed) { wq 1617 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.flushed = 1; wq 1618 drivers/infiniband/hw/cxgb4/qp.c t4_set_wq_in_error(&qhp->wq, 0); wq 1622 drivers/infiniband/hw/cxgb4/qp.c c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); wq 1623 drivers/infiniband/hw/cxgb4/qp.c rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); wq 1670 drivers/infiniband/hw/cxgb4/qp.c if (qhp->wq.flushed) wq 1673 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.flushed = 1; wq 1674 drivers/infiniband/hw/cxgb4/qp.c t4_set_wq_in_error(&qhp->wq, 0); wq 1698 drivers/infiniband/hw/cxgb4/qp.c pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid, ep->hwtid); wq 1718 drivers/infiniband/hw/cxgb4/qp.c qhp->ep->hwtid, qhp->wq.sq.qid, __func__); wq 1755 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord); wq 1798 drivers/infiniband/hw/cxgb4/qp.c wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq)); wq 1800 drivers/infiniband/hw/cxgb4/qp.c wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid); wq 1801 drivers/infiniband/hw/cxgb4/qp.c wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid); wq 1806 drivers/infiniband/hw/cxgb4/qp.c wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid); wq 1807 drivers/infiniband/hw/cxgb4/qp.c wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size); wq 1808 drivers/infiniband/hw/cxgb4/qp.c wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr - wq 1821 drivers/infiniband/hw/cxgb4/qp.c qhp->ep->hwtid, qhp->wq.sq.qid, __func__); wq 1845 drivers/infiniband/hw/cxgb4/qp.c qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state, wq 1933 drivers/infiniband/hw/cxgb4/qp.c t4_set_wq_in_error(&qhp->wq, 0); wq 1946 drivers/infiniband/hw/cxgb4/qp.c t4_set_wq_in_error(&qhp->wq, 0); wq 1963 drivers/infiniband/hw/cxgb4/qp.c t4_set_wq_in_error(&qhp->wq, 0); wq 2009 drivers/infiniband/hw/cxgb4/qp.c if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) { wq 2031 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.sq.qid); wq 2089 drivers/infiniband/hw/cxgb4/qp.c __xa_erase(&rhp->qps, qhp->wq.sq.qid); wq 2099 drivers/infiniband/hw/cxgb4/qp.c pr_debug("ib_qp %p qpid 0x%0x\n", ib_qp, qhp->wq.sq.qid); wq 2102 drivers/infiniband/hw/cxgb4/qp.c destroy_qp(&rhp->rdev, &qhp->wq, wq 2166 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.sq.size = sqsize; wq 2167 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.sq.memsize = wq 2169 drivers/infiniband/hw/cxgb4/qp.c sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64); wq 2170 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.sq.flush_cidx = -1; wq 2172 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.rq.size = rqsize; wq 2173 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.rq.memsize = wq 2175 drivers/infiniband/hw/cxgb4/qp.c sizeof(*qhp->wq.rq.queue); wq 2179 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE); wq 2181 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.rq.memsize = wq 2182 drivers/infiniband/hw/cxgb4/qp.c roundup(qhp->wq.rq.memsize, PAGE_SIZE); wq 2185 drivers/infiniband/hw/cxgb4/qp.c ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq, wq 2220 drivers/infiniband/hw/cxgb4/qp.c ret = xa_insert_irq(&rhp->qps, qhp->wq.sq.qid, qhp, GFP_KERNEL); wq 2251 drivers/infiniband/hw/cxgb4/qp.c if (t4_sq_onchip(&qhp->wq.sq)) { wq 2263 drivers/infiniband/hw/cxgb4/qp.c uresp.sqid = qhp->wq.sq.qid; wq 2264 drivers/infiniband/hw/cxgb4/qp.c uresp.sq_size = qhp->wq.sq.size; wq 2265 drivers/infiniband/hw/cxgb4/qp.c uresp.sq_memsize = qhp->wq.sq.memsize; wq 2267 drivers/infiniband/hw/cxgb4/qp.c uresp.rqid = qhp->wq.rq.qid; wq 2268 drivers/infiniband/hw/cxgb4/qp.c uresp.rq_size = qhp->wq.rq.size; wq 2269 drivers/infiniband/hw/cxgb4/qp.c uresp.rq_memsize = qhp->wq.rq.memsize; wq 2293 drivers/infiniband/hw/cxgb4/qp.c sq_key_mm->addr = qhp->wq.sq.phys_addr; wq 2294 drivers/infiniband/hw/cxgb4/qp.c sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize); wq 2298 drivers/infiniband/hw/cxgb4/qp.c rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue); wq 2299 drivers/infiniband/hw/cxgb4/qp.c rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize); wq 2303 drivers/infiniband/hw/cxgb4/qp.c sq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.sq.bar2_pa; wq 2309 drivers/infiniband/hw/cxgb4/qp.c (u64)(unsigned long)qhp->wq.rq.bar2_pa; wq 2325 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.qp_errp = wq 2326 drivers/infiniband/hw/cxgb4/qp.c &qhp->wq.rq.queue[qhp->wq.rq.size].status.qp_err; wq 2328 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.qp_errp = wq 2329 drivers/infiniband/hw/cxgb4/qp.c &qhp->wq.sq.queue[qhp->wq.sq.size].status.qp_err; wq 2330 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.srqidxp = wq 2331 drivers/infiniband/hw/cxgb4/qp.c &qhp->wq.sq.queue[qhp->wq.sq.size].status.srqidx; wq 2334 drivers/infiniband/hw/cxgb4/qp.c qhp->ibqp.qp_num = qhp->wq.sq.qid; wq 2339 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize, wq 2340 drivers/infiniband/hw/cxgb4/qp.c attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size, wq 2341 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.rq.memsize, attrs->cap.max_recv_wr); wq 2356 drivers/infiniband/hw/cxgb4/qp.c xa_erase_irq(&rhp->qps, qhp->wq.sq.qid); wq 2358 drivers/infiniband/hw/cxgb4/qp.c destroy_qp(&rhp->rdev, &qhp->wq, wq 2485 drivers/infiniband/hw/cxgb4/qp.c struct t4_srq *wq = &srq->wq; wq 2504 drivers/infiniband/hw/cxgb4/qp.c res->u.srq.eqid = cpu_to_be32(wq->qid); wq 2510 drivers/infiniband/hw/cxgb4/qp.c wq->memsize, wq->queue, wq 2511 drivers/infiniband/hw/cxgb4/qp.c dma_unmap_addr(wq, mapping)); wq 2512 drivers/infiniband/hw/cxgb4/qp.c c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size); wq 2513 drivers/infiniband/hw/cxgb4/qp.c kfree(wq->sw_rq); wq 2514 drivers/infiniband/hw/cxgb4/qp.c c4iw_put_qpid(rdev, wq->qid, uctx); wq 2522 drivers/infiniband/hw/cxgb4/qp.c struct t4_srq *wq = &srq->wq; wq 2530 drivers/infiniband/hw/cxgb4/qp.c wq->qid = c4iw_get_qpid(rdev, uctx); wq 2531 drivers/infiniband/hw/cxgb4/qp.c if (!wq->qid) wq 2535 drivers/infiniband/hw/cxgb4/qp.c wq->sw_rq = kcalloc(wq->size, sizeof(*wq->sw_rq), wq 2537 drivers/infiniband/hw/cxgb4/qp.c if (!wq->sw_rq) wq 2539 drivers/infiniband/hw/cxgb4/qp.c wq->pending_wrs = kcalloc(srq->wq.size, wq 2540 drivers/infiniband/hw/cxgb4/qp.c sizeof(*srq->wq.pending_wrs), wq 2542 drivers/infiniband/hw/cxgb4/qp.c if (!wq->pending_wrs) wq 2546 drivers/infiniband/hw/cxgb4/qp.c wq->rqt_size = wq->size; wq 2547 drivers/infiniband/hw/cxgb4/qp.c wq->rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rqt_size); wq 2548 drivers/infiniband/hw/cxgb4/qp.c if (!wq->rqt_hwaddr) wq 2550 drivers/infiniband/hw/cxgb4/qp.c wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >> wq 2553 drivers/infiniband/hw/cxgb4/qp.c wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, wq->memsize, wq 2554 drivers/infiniband/hw/cxgb4/qp.c &wq->dma_addr, GFP_KERNEL); wq 2555 drivers/infiniband/hw/cxgb4/qp.c if (!wq->queue) wq 2558 drivers/infiniband/hw/cxgb4/qp.c dma_unmap_addr_set(wq, mapping, wq->dma_addr); wq 2560 drivers/infiniband/hw/cxgb4/qp.c wq->bar2_va = c4iw_bar2_addrs(rdev, wq->qid, CXGB4_BAR2_QTYPE_EGRESS, wq 2561 drivers/infiniband/hw/cxgb4/qp.c &wq->bar2_qid, wq 2562 drivers/infiniband/hw/cxgb4/qp.c user ? &wq->bar2_pa : NULL); wq 2568 drivers/infiniband/hw/cxgb4/qp.c if (user && !wq->bar2_va) { wq 2570 drivers/infiniband/hw/cxgb4/qp.c pci_name(rdev->lldi.pdev), wq->qid); wq 2597 drivers/infiniband/hw/cxgb4/qp.c eqsize = wq->size * T4_RQ_NUM_SLOTS + wq 2599 drivers/infiniband/hw/cxgb4/qp.c res->u.srq.eqid = cpu_to_be32(wq->qid); wq 2614 drivers/infiniband/hw/cxgb4/qp.c res->u.srq.eqaddr = cpu_to_be64(wq->dma_addr); wq 2617 drivers/infiniband/hw/cxgb4/qp.c res->u.srq.hwsrqsize = cpu_to_be32(wq->rqt_size); wq 2618 drivers/infiniband/hw/cxgb4/qp.c res->u.srq.hwsrqaddr = cpu_to_be32(wq->rqt_hwaddr - wq 2623 drivers/infiniband/hw/cxgb4/qp.c ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->qid, __func__); wq 2629 drivers/infiniband/hw/cxgb4/qp.c __func__, srq->idx, wq->qid, srq->pdid, wq->queue, wq 2630 drivers/infiniband/hw/cxgb4/qp.c (u64)virt_to_phys(wq->queue), wq->bar2_va, wq 2631 drivers/infiniband/hw/cxgb4/qp.c wq->rqt_hwaddr, wq->rqt_size); wq 2636 drivers/infiniband/hw/cxgb4/qp.c wq->memsize, wq->queue, wq 2637 drivers/infiniband/hw/cxgb4/qp.c dma_unmap_addr(wq, mapping)); wq 2639 drivers/infiniband/hw/cxgb4/qp.c c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size); wq 2642 drivers/infiniband/hw/cxgb4/qp.c kfree(wq->pending_wrs); wq 2645 drivers/infiniband/hw/cxgb4/qp.c kfree(wq->sw_rq); wq 2647 drivers/infiniband/hw/cxgb4/qp.c c4iw_put_qpid(rdev, wq->qid, uctx); wq 2724 drivers/infiniband/hw/cxgb4/qp.c srq->wq.size = rqsize; wq 2725 drivers/infiniband/hw/cxgb4/qp.c srq->wq.memsize = wq 2727 drivers/infiniband/hw/cxgb4/qp.c sizeof(*srq->wq.queue); wq 2729 drivers/infiniband/hw/cxgb4/qp.c srq->wq.memsize = roundup(srq->wq.memsize, PAGE_SIZE); wq 2754 drivers/infiniband/hw/cxgb4/qp.c uresp.srqid = srq->wq.qid; wq 2755 drivers/infiniband/hw/cxgb4/qp.c uresp.srq_size = srq->wq.size; wq 2756 drivers/infiniband/hw/cxgb4/qp.c uresp.srq_memsize = srq->wq.memsize; wq 2757 drivers/infiniband/hw/cxgb4/qp.c uresp.rqt_abs_idx = srq->wq.rqt_abs_idx; wq 2768 drivers/infiniband/hw/cxgb4/qp.c srq_key_mm->addr = virt_to_phys(srq->wq.queue); wq 2769 drivers/infiniband/hw/cxgb4/qp.c srq_key_mm->len = PAGE_ALIGN(srq->wq.memsize); wq 2772 drivers/infiniband/hw/cxgb4/qp.c srq_db_key_mm->addr = (u64)(unsigned long)srq->wq.bar2_pa; wq 2778 drivers/infiniband/hw/cxgb4/qp.c __func__, srq->wq.qid, srq->idx, srq->wq.size, wq 2779 drivers/infiniband/hw/cxgb4/qp.c (unsigned long)srq->wq.memsize, attrs->attr.max_wr); wq 2809 drivers/infiniband/hw/cxgb4/qp.c pr_debug("%s id %d\n", __func__, srq->wq.qid); wq 39 drivers/infiniband/hw/cxgb4/restrack.c static int fill_sq(struct sk_buff *msg, struct t4_wq *wq) wq 42 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "sqid", wq->sq.qid)) wq 44 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "flushed", wq->flushed)) wq 46 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "memsize", wq->sq.memsize)) wq 48 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "cidx", wq->sq.cidx)) wq 50 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "pidx", wq->sq.pidx)) wq 52 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "wq_pidx", wq->sq.wq_pidx)) wq 54 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "flush_cidx", wq->sq.flush_cidx)) wq 56 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "in_use", wq->sq.in_use)) wq 58 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "size", wq->sq.size)) wq 60 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32_hex(msg, "flags", wq->sq.flags)) wq 67 drivers/infiniband/hw/cxgb4/restrack.c static int fill_rq(struct sk_buff *msg, struct t4_wq *wq) wq 70 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "rqid", wq->rq.qid)) wq 72 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "memsize", wq->rq.memsize)) wq 74 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "cidx", wq->rq.cidx)) wq 76 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "pidx", wq->rq.pidx)) wq 78 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "wq_pidx", wq->rq.wq_pidx)) wq 80 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "msn", wq->rq.msn)) wq 82 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32_hex(msg, "rqt_hwaddr", wq->rq.rqt_hwaddr)) wq 84 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "rqt_size", wq->rq.rqt_size)) wq 86 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "in_use", wq->rq.in_use)) wq 88 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "size", wq->rq.size)) wq 146 drivers/infiniband/hw/cxgb4/restrack.c struct t4_wq wq; wq 158 drivers/infiniband/hw/cxgb4/restrack.c wq = qhp->wq; wq 161 drivers/infiniband/hw/cxgb4/restrack.c if (wq.sq.cidx != wq.sq.pidx) { wq 162 drivers/infiniband/hw/cxgb4/restrack.c first_sq_idx = wq.sq.cidx; wq 163 drivers/infiniband/hw/cxgb4/restrack.c first_sqe = qhp->wq.sq.sw_sq[first_sq_idx]; wq 165 drivers/infiniband/hw/cxgb4/restrack.c last_sq_idx = wq.sq.pidx; wq 167 drivers/infiniband/hw/cxgb4/restrack.c last_sq_idx = wq.sq.size - 1; wq 169 drivers/infiniband/hw/cxgb4/restrack.c last_sqe = qhp->wq.sq.sw_sq[last_sq_idx]; wq 175 drivers/infiniband/hw/cxgb4/restrack.c if (fill_sq(msg, &wq)) wq 178 drivers/infiniband/hw/cxgb4/restrack.c if (fill_swsqes(msg, &wq.sq, first_sq_idx, fsp, last_sq_idx, lsp)) wq 181 drivers/infiniband/hw/cxgb4/restrack.c if (fill_rq(msg, &wq)) wq 480 drivers/infiniband/hw/cxgb4/t4.h static inline int t4_rqes_posted(struct t4_wq *wq) wq 482 drivers/infiniband/hw/cxgb4/t4.h return wq->rq.in_use; wq 485 drivers/infiniband/hw/cxgb4/t4.h static inline int t4_rq_empty(struct t4_wq *wq) wq 487 drivers/infiniband/hw/cxgb4/t4.h return wq->rq.in_use == 0; wq 490 drivers/infiniband/hw/cxgb4/t4.h static inline int t4_rq_full(struct t4_wq *wq) wq 492 drivers/infiniband/hw/cxgb4/t4.h return wq->rq.in_use == (wq->rq.size - 1); wq 495 drivers/infiniband/hw/cxgb4/t4.h static inline u32 t4_rq_avail(struct t4_wq *wq) wq 497 drivers/infiniband/hw/cxgb4/t4.h return wq->rq.size - 1 - wq->rq.in_use; wq 500 drivers/infiniband/hw/cxgb4/t4.h static inline void t4_rq_produce(struct t4_wq *wq, u8 len16) wq 502 drivers/infiniband/hw/cxgb4/t4.h wq->rq.in_use++; wq 503 drivers/infiniband/hw/cxgb4/t4.h if (++wq->rq.pidx == wq->rq.size) wq 504 drivers/infiniband/hw/cxgb4/t4.h wq->rq.pidx = 0; wq 505 drivers/infiniband/hw/cxgb4/t4.h wq->rq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); wq 506 drivers/infiniband/hw/cxgb4/t4.h if (wq->rq.wq_pidx >= wq->rq.size * T4_RQ_NUM_SLOTS) wq 507 drivers/infiniband/hw/cxgb4/t4.h wq->rq.wq_pidx %= wq->rq.size * T4_RQ_NUM_SLOTS; wq 510 drivers/infiniband/hw/cxgb4/t4.h static inline void t4_rq_consume(struct t4_wq *wq) wq 512 drivers/infiniband/hw/cxgb4/t4.h wq->rq.in_use--; wq 513 drivers/infiniband/hw/cxgb4/t4.h if (++wq->rq.cidx == wq->rq.size) wq 514 drivers/infiniband/hw/cxgb4/t4.h wq->rq.cidx = 0; wq 517 drivers/infiniband/hw/cxgb4/t4.h static inline u16 t4_rq_host_wq_pidx(struct t4_wq *wq) wq 519 drivers/infiniband/hw/cxgb4/t4.h return wq->rq.queue[wq->rq.size].status.host_wq_pidx; wq 522 drivers/infiniband/hw/cxgb4/t4.h static inline u16 t4_rq_wq_size(struct t4_wq *wq) wq 524 drivers/infiniband/hw/cxgb4/t4.h return wq->rq.size * T4_RQ_NUM_SLOTS; wq 532 drivers/infiniband/hw/cxgb4/t4.h static inline int t4_sq_empty(struct t4_wq *wq) wq 534 drivers/infiniband/hw/cxgb4/t4.h return wq->sq.in_use == 0; wq 537 drivers/infiniband/hw/cxgb4/t4.h static inline int t4_sq_full(struct t4_wq *wq) wq 539 drivers/infiniband/hw/cxgb4/t4.h return wq->sq.in_use == (wq->sq.size - 1); wq 542 drivers/infiniband/hw/cxgb4/t4.h static inline u32 t4_sq_avail(struct t4_wq *wq) wq 544 drivers/infiniband/hw/cxgb4/t4.h return wq->sq.size - 1 - wq->sq.in_use; wq 547 drivers/infiniband/hw/cxgb4/t4.h static inline void t4_sq_produce(struct t4_wq *wq, u8 len16) wq 549 drivers/infiniband/hw/cxgb4/t4.h wq->sq.in_use++; wq 550 drivers/infiniband/hw/cxgb4/t4.h if (++wq->sq.pidx == wq->sq.size) wq 551 drivers/infiniband/hw/cxgb4/t4.h wq->sq.pidx = 0; wq 552 drivers/infiniband/hw/cxgb4/t4.h wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); wq 553 drivers/infiniband/hw/cxgb4/t4.h if (wq->sq.wq_pidx >= wq->sq.size * T4_SQ_NUM_SLOTS) wq 554 drivers/infiniband/hw/cxgb4/t4.h wq->sq.wq_pidx %= wq->sq.size * T4_SQ_NUM_SLOTS; wq 557 drivers/infiniband/hw/cxgb4/t4.h static inline void t4_sq_consume(struct t4_wq *wq) wq 559 drivers/infiniband/hw/cxgb4/t4.h if (wq->sq.cidx == wq->sq.flush_cidx) wq 560 drivers/infiniband/hw/cxgb4/t4.h wq->sq.flush_cidx = -1; wq 561 drivers/infiniband/hw/cxgb4/t4.h wq->sq.in_use--; wq 562 drivers/infiniband/hw/cxgb4/t4.h if (++wq->sq.cidx == wq->sq.size) wq 563 drivers/infiniband/hw/cxgb4/t4.h wq->sq.cidx = 0; wq 566 drivers/infiniband/hw/cxgb4/t4.h static inline u16 t4_sq_host_wq_pidx(struct t4_wq *wq) wq 568 drivers/infiniband/hw/cxgb4/t4.h return wq->sq.queue[wq->sq.size].status.host_wq_pidx; wq 571 drivers/infiniband/hw/cxgb4/t4.h static inline u16 t4_sq_wq_size(struct t4_wq *wq) wq 573 drivers/infiniband/hw/cxgb4/t4.h return wq->sq.size * T4_SQ_NUM_SLOTS; wq 611 drivers/infiniband/hw/cxgb4/t4.h static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe) wq 616 drivers/infiniband/hw/cxgb4/t4.h if (wq->sq.bar2_va) { wq 617 drivers/infiniband/hw/cxgb4/t4.h if (inc == 1 && wq->sq.bar2_qid == 0 && wqe) { wq 618 drivers/infiniband/hw/cxgb4/t4.h pr_debug("WC wq->sq.pidx = %d\n", wq->sq.pidx); wq 620 drivers/infiniband/hw/cxgb4/t4.h (wq->sq.bar2_va + SGE_UDB_WCDOORBELL), wq 623 drivers/infiniband/hw/cxgb4/t4.h pr_debug("DB wq->sq.pidx = %d\n", wq->sq.pidx); wq 624 drivers/infiniband/hw/cxgb4/t4.h writel(PIDX_T5_V(inc) | QID_V(wq->sq.bar2_qid), wq 625 drivers/infiniband/hw/cxgb4/t4.h wq->sq.bar2_va + SGE_UDB_KDOORBELL); wq 632 drivers/infiniband/hw/cxgb4/t4.h writel(QID_V(wq->sq.qid) | PIDX_V(inc), wq->db); wq 635 drivers/infiniband/hw/cxgb4/t4.h static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, wq 641 drivers/infiniband/hw/cxgb4/t4.h if (wq->rq.bar2_va) { wq 642 drivers/infiniband/hw/cxgb4/t4.h if (inc == 1 && wq->rq.bar2_qid == 0 && wqe) { wq 643 drivers/infiniband/hw/cxgb4/t4.h pr_debug("WC wq->rq.pidx = %d\n", wq->rq.pidx); wq 645 drivers/infiniband/hw/cxgb4/t4.h (wq->rq.bar2_va + SGE_UDB_WCDOORBELL), wq 648 drivers/infiniband/hw/cxgb4/t4.h pr_debug("DB wq->rq.pidx = %d\n", wq->rq.pidx); wq 649 drivers/infiniband/hw/cxgb4/t4.h writel(PIDX_T5_V(inc) | QID_V(wq->rq.bar2_qid), wq 650 drivers/infiniband/hw/cxgb4/t4.h wq->rq.bar2_va + SGE_UDB_KDOORBELL); wq 657 drivers/infiniband/hw/cxgb4/t4.h writel(QID_V(wq->rq.qid) | PIDX_V(inc), wq->db); wq 660 drivers/infiniband/hw/cxgb4/t4.h static inline int t4_wq_in_error(struct t4_wq *wq) wq 662 drivers/infiniband/hw/cxgb4/t4.h return *wq->qp_errp; wq 665 drivers/infiniband/hw/cxgb4/t4.h static inline void t4_set_wq_in_error(struct t4_wq *wq, u32 srqidx) wq 668 drivers/infiniband/hw/cxgb4/t4.h *wq->srqidxp = srqidx; wq 669 drivers/infiniband/hw/cxgb4/t4.h *wq->qp_errp = 1; wq 672 drivers/infiniband/hw/cxgb4/t4.h static inline void t4_disable_wq_db(struct t4_wq *wq) wq 674 drivers/infiniband/hw/cxgb4/t4.h wq->rq.queue[wq->rq.size].status.db_off = 1; wq 677 drivers/infiniband/hw/cxgb4/t4.h static inline void t4_enable_wq_db(struct t4_wq *wq) wq 679 drivers/infiniband/hw/cxgb4/t4.h wq->rq.queue[wq->rq.size].status.db_off = 0; wq 682 drivers/infiniband/hw/cxgb4/t4.h static inline int t4_wq_db_enabled(struct t4_wq *wq) wq 684 drivers/infiniband/hw/cxgb4/t4.h return !wq->rq.queue[wq->rq.size].status.db_off; wq 190 drivers/infiniband/hw/hfi1/iowait.h struct workqueue_struct *wq, int cpu) wq 192 drivers/infiniband/hw/hfi1/iowait.h return !!queue_work_on(cpu, wq, &wait->wait[IOWAIT_IB_SE].iowork); wq 202 drivers/infiniband/hw/hfi1/iowait.h struct workqueue_struct *wq, int cpu) wq 204 drivers/infiniband/hw/hfi1/iowait.h return !!queue_work_on(cpu, wq, &wait->wait[IOWAIT_TID_SE].iowork); wq 65 drivers/infiniband/hw/hfi1/mmu_rb.c struct workqueue_struct *wq; wq 97 drivers/infiniband/hw/hfi1/mmu_rb.c struct workqueue_struct *wq, wq 117 drivers/infiniband/hw/hfi1/mmu_rb.c handlr->wq = wq; wq 312 drivers/infiniband/hw/hfi1/mmu_rb.c queue_work(handler->wq, &handler->del_work); wq 76 drivers/infiniband/hw/hfi1/mmu_rb.h struct workqueue_struct *wq, wq 2195 drivers/infiniband/hw/hns/hns_roce_hw_v1.c struct hns_roce_wq *wq; wq 2321 drivers/infiniband/hw/hns/hns_roce_hw_v1.c wq = &(*cur_qp)->sq; wq 2331 drivers/infiniband/hw/hns/hns_roce_hw_v1.c wq->tail += (wqe_ctr - (u16)wq->tail) & wq 2332 drivers/infiniband/hw/hns/hns_roce_hw_v1.c (wq->wqe_cnt - 1); wq 2334 drivers/infiniband/hw/hns/hns_roce_hw_v1.c wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; wq 2335 drivers/infiniband/hw/hns/hns_roce_hw_v1.c ++wq->tail; wq 2368 drivers/infiniband/hw/hns/hns_roce_hw_v1.c wq = &(*cur_qp)->rq; wq 2369 drivers/infiniband/hw/hns/hns_roce_hw_v1.c wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; wq 2370 drivers/infiniband/hw/hns/hns_roce_hw_v1.c ++wq->tail; wq 2701 drivers/infiniband/hw/hns/hns_roce_hw_v2.c struct hns_roce_wq *wq; wq 2741 drivers/infiniband/hw/hns/hns_roce_hw_v2.c wq = &(*cur_qp)->sq; wq 2751 drivers/infiniband/hw/hns/hns_roce_hw_v2.c wq->tail += (wqe_ctr - (u16)wq->tail) & wq 2752 drivers/infiniband/hw/hns/hns_roce_hw_v2.c (wq->wqe_cnt - 1); wq 2755 drivers/infiniband/hw/hns/hns_roce_hw_v2.c wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; wq 2756 drivers/infiniband/hw/hns/hns_roce_hw_v2.c ++wq->tail; wq 2766 drivers/infiniband/hw/hns/hns_roce_hw_v2.c wq = &(*cur_qp)->rq; wq 2767 drivers/infiniband/hw/hns/hns_roce_hw_v2.c wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; wq 2768 drivers/infiniband/hw/hns/hns_roce_hw_v2.c ++wq->tail; wq 438 drivers/infiniband/hw/mlx4/alias_GUID.c queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq, wq 571 drivers/infiniband/hw/mlx4/alias_GUID.c queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq, wq 633 drivers/infiniband/hw/mlx4/alias_GUID.c queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq, wq 790 drivers/infiniband/hw/mlx4/alias_GUID.c queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq, wq 826 drivers/infiniband/hw/mlx4/alias_GUID.c flush_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); wq 827 drivers/infiniband/hw/mlx4/alias_GUID.c destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); wq 882 drivers/infiniband/hw/mlx4/alias_GUID.c dev->sriov.alias_guid.ports_guid[i].wq = wq 884 drivers/infiniband/hw/mlx4/alias_GUID.c if (!dev->sriov.alias_guid.ports_guid[i].wq) { wq 895 drivers/infiniband/hw/mlx4/alias_GUID.c destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); wq 896 drivers/infiniband/hw/mlx4/alias_GUID.c dev->sriov.alias_guid.ports_guid[i].wq = NULL; wq 615 drivers/infiniband/hw/mlx4/cq.c struct mlx4_ib_wq *wq; wq 619 drivers/infiniband/hw/mlx4/cq.c wq = is_send ? &qp->sq : &qp->rq; wq 620 drivers/infiniband/hw/mlx4/cq.c cur = wq->head - wq->tail; wq 626 drivers/infiniband/hw/mlx4/cq.c wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; wq 629 drivers/infiniband/hw/mlx4/cq.c wq->tail++; wq 667 drivers/infiniband/hw/mlx4/cq.c struct mlx4_ib_wq *wq; wq 737 drivers/infiniband/hw/mlx4/cq.c wq = &(*cur_qp)->sq; wq 740 drivers/infiniband/hw/mlx4/cq.c wq->tail += (u16) (wqe_ctr - (u16) wq->tail); wq 742 drivers/infiniband/hw/mlx4/cq.c wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; wq 743 drivers/infiniband/hw/mlx4/cq.c ++wq->tail; wq 755 drivers/infiniband/hw/mlx4/cq.c wq = &(*cur_qp)->rq; wq 756 drivers/infiniband/hw/mlx4/cq.c tail = wq->tail & (wq->wqe_cnt - 1); wq 757 drivers/infiniband/hw/mlx4/cq.c wc->wr_id = wq->wrid[tail]; wq 758 drivers/infiniband/hw/mlx4/cq.c ++wq->tail; wq 1306 drivers/infiniband/hw/mlx4/mad.c queue_work(ctx->wq, &ctx->work); wq 2048 drivers/infiniband/hw/mlx4/mad.c ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq; wq 2059 drivers/infiniband/hw/mlx4/mad.c ctx->wq = NULL; wq 2096 drivers/infiniband/hw/mlx4/mad.c flush_workqueue(ctx->wq); wq 2193 drivers/infiniband/hw/mlx4/mad.c ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); wq 2194 drivers/infiniband/hw/mlx4/mad.c if (!ctx->wq) { wq 2211 drivers/infiniband/hw/mlx4/mad.c destroy_workqueue(ctx->wq); wq 2212 drivers/infiniband/hw/mlx4/mad.c ctx->wq = NULL; wq 2228 drivers/infiniband/hw/mlx4/mad.c flush_workqueue(sqp_ctx->wq); wq 2257 drivers/infiniband/hw/mlx4/mad.c flush_workqueue(ctx->wq); wq 2264 drivers/infiniband/hw/mlx4/mad.c destroy_workqueue(ctx->wq); wq 86 drivers/infiniband/hw/mlx4/main.c static struct workqueue_struct *wq; wq 2908 drivers/infiniband/hw/mlx4/main.c flush_workqueue(wq); wq 3022 drivers/infiniband/hw/mlx4/main.c flush_workqueue(wq); wq 3235 drivers/infiniband/hw/mlx4/main.c queue_work(wq, &ew->work); wq 3256 drivers/infiniband/hw/mlx4/main.c queue_work(wq, &ew->work); wq 3303 drivers/infiniband/hw/mlx4/main.c queue_work(wq, &ew->work); wq 3362 drivers/infiniband/hw/mlx4/main.c wq = alloc_ordered_workqueue("mlx4_ib", WQ_MEM_RECLAIM); wq 3363 drivers/infiniband/hw/mlx4/main.c if (!wq) wq 3380 drivers/infiniband/hw/mlx4/main.c destroy_workqueue(wq); wq 3388 drivers/infiniband/hw/mlx4/main.c destroy_workqueue(wq); wq 407 drivers/infiniband/hw/mlx4/mlx4_ib.h struct workqueue_struct *wq; wq 461 drivers/infiniband/hw/mlx4/mlx4_ib.h struct workqueue_struct *wq; wq 468 drivers/infiniband/hw/mlx4/mlx4_ib.h struct workqueue_struct *wq; wq 909 drivers/infiniband/hw/mlx4/mlx4_ib.h void mlx4_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata); wq 910 drivers/infiniband/hw/mlx4/mlx4_ib.h int mlx4_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, wq 869 drivers/infiniband/hw/mlx4/qp.c struct mlx4_ib_create_wq wq; wq 886 drivers/infiniband/hw/mlx4/qp.c if (ib_copy_from_udata(&wq, udata, copy_len)) { wq 891 drivers/infiniband/hw/mlx4/qp.c if (wq.comp_mask || wq.reserved[0] || wq.reserved[1] || wq 892 drivers/infiniband/hw/mlx4/qp.c wq.reserved[2]) { wq 898 drivers/infiniband/hw/mlx4/qp.c if (wq.log_range_size > ilog2(dev->dev->caps.max_rss_tbl_sz)) { wq 904 drivers/infiniband/hw/mlx4/qp.c range_size = 1 << wq.log_range_size; wq 919 drivers/infiniband/hw/mlx4/qp.c qp->umem = ib_umem_get(udata, wq.buf_addr, qp->buf_size, 0, 0); wq 936 drivers/infiniband/hw/mlx4/qp.c err = mlx4_ib_db_map_user(udata, wq.db_addr, &qp->db); wq 1394 drivers/infiniband/hw/mlx4/qp.c struct mlx4_ib_qp *wq = to_mqp((struct ib_qp *)ibwq); wq 1396 drivers/infiniband/hw/mlx4/qp.c mutex_lock(&wq->mutex); wq 1398 drivers/infiniband/hw/mlx4/qp.c wq->rss_usecnt--; wq 1400 drivers/infiniband/hw/mlx4/qp.c mutex_unlock(&wq->mutex); wq 2037 drivers/infiniband/hw/mlx4/qp.c struct mlx4_ib_qp *wq = to_mqp((struct ib_qp *)ibwq); wq 2039 drivers/infiniband/hw/mlx4/qp.c mutex_lock(&wq->mutex); wq 2047 drivers/infiniband/hw/mlx4/qp.c if ((wq->rss_usecnt > 0) && (wq->port != port_num)) { wq 2049 drivers/infiniband/hw/mlx4/qp.c mutex_unlock(&wq->mutex); wq 2052 drivers/infiniband/hw/mlx4/qp.c wq->port = port_num; wq 2053 drivers/infiniband/hw/mlx4/qp.c if ((wq->rss_usecnt == 0) && (ibwq->state == IB_WQS_RDY)) { wq 2056 drivers/infiniband/hw/mlx4/qp.c mutex_unlock(&wq->mutex); wq 2060 drivers/infiniband/hw/mlx4/qp.c wq->rss_usecnt++; wq 2062 drivers/infiniband/hw/mlx4/qp.c mutex_unlock(&wq->mutex); wq 2070 drivers/infiniband/hw/mlx4/qp.c struct mlx4_ib_qp *wq = to_mqp((struct ib_qp *)ibwq); wq 2072 drivers/infiniband/hw/mlx4/qp.c mutex_lock(&wq->mutex); wq 2074 drivers/infiniband/hw/mlx4/qp.c if ((wq->rss_usecnt == 1) && wq 2080 drivers/infiniband/hw/mlx4/qp.c wq->rss_usecnt--; wq 2082 drivers/infiniband/hw/mlx4/qp.c mutex_unlock(&wq->mutex); wq 2096 drivers/infiniband/hw/mlx4/qp.c struct mlx4_ib_qp *wq = to_mqp((struct ib_qp *)ibwq); wq 2098 drivers/infiniband/hw/mlx4/qp.c mutex_lock(&wq->mutex); wq 2100 drivers/infiniband/hw/mlx4/qp.c if ((wq->rss_usecnt == 1) && (ibwq->state == IB_WQS_RDY)) wq 2104 drivers/infiniband/hw/mlx4/qp.c wq->rss_usecnt--; wq 2106 drivers/infiniband/hw/mlx4/qp.c mutex_unlock(&wq->mutex); wq 3289 drivers/infiniband/hw/mlx4/qp.c static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq) wq 3294 drivers/infiniband/hw/mlx4/qp.c cur = wq->head - wq->tail; wq 3295 drivers/infiniband/hw/mlx4/qp.c if (likely(cur + nreq < wq->max_post)) wq 3300 drivers/infiniband/hw/mlx4/qp.c cur = wq->head - wq->tail; wq 3303 drivers/infiniband/hw/mlx4/qp.c return cur + nreq >= wq->max_post; wq 98 drivers/infiniband/hw/mlx5/cq.c static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx) wq 100 drivers/infiniband/hw/mlx5/cq.c switch (wq->wr_data[idx]) { wq 117 drivers/infiniband/hw/mlx5/cq.c struct mlx5_ib_wq *wq, int idx) wq 155 drivers/infiniband/hw/mlx5/cq.c wc->opcode = get_umr_comp(wq, idx); wq 171 drivers/infiniband/hw/mlx5/cq.c struct mlx5_ib_wq *wq; wq 194 drivers/infiniband/hw/mlx5/cq.c wq = &qp->rq; wq 195 drivers/infiniband/hw/mlx5/cq.c wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; wq 196 drivers/infiniband/hw/mlx5/cq.c ++wq->tail; wq 389 drivers/infiniband/hw/mlx5/cq.c struct mlx5_ib_wq *wq; wq 394 drivers/infiniband/hw/mlx5/cq.c wq = (is_send) ? &qp->sq : &qp->rq; wq 395 drivers/infiniband/hw/mlx5/cq.c cur = wq->head - wq->tail; wq 404 drivers/infiniband/hw/mlx5/cq.c idx = (is_send) ? wq->last_poll : wq->tail; wq 405 drivers/infiniband/hw/mlx5/cq.c idx &= (wq->wqe_cnt - 1); wq 406 drivers/infiniband/hw/mlx5/cq.c wc->wr_id = wq->wrid[idx]; wq 409 drivers/infiniband/hw/mlx5/cq.c wq->tail++; wq 411 drivers/infiniband/hw/mlx5/cq.c wq->last_poll = wq->w_list[idx].next; wq 447 drivers/infiniband/hw/mlx5/cq.c struct mlx5_ib_wq *wq; wq 497 drivers/infiniband/hw/mlx5/cq.c wq = &(*cur_qp)->sq; wq 499 drivers/infiniband/hw/mlx5/cq.c idx = wqe_ctr & (wq->wqe_cnt - 1); wq 500 drivers/infiniband/hw/mlx5/cq.c handle_good_req(wc, cqe64, wq, idx); wq 501 drivers/infiniband/hw/mlx5/cq.c handle_atomics(*cur_qp, cqe64, wq->last_poll, idx); wq 502 drivers/infiniband/hw/mlx5/cq.c wc->wr_id = wq->wrid[idx]; wq 503 drivers/infiniband/hw/mlx5/cq.c wq->tail = wq->wqe_head[idx] + 1; wq 525 drivers/infiniband/hw/mlx5/cq.c wq = &(*cur_qp)->sq; wq 527 drivers/infiniband/hw/mlx5/cq.c idx = wqe_ctr & (wq->wqe_cnt - 1); wq 528 drivers/infiniband/hw/mlx5/cq.c wc->wr_id = wq->wrid[idx]; wq 529 drivers/infiniband/hw/mlx5/cq.c wq->tail = wq->wqe_head[idx] + 1; wq 539 drivers/infiniband/hw/mlx5/cq.c wq = &(*cur_qp)->rq; wq 540 drivers/infiniband/hw/mlx5/cq.c wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; wq 541 drivers/infiniband/hw/mlx5/cq.c ++wq->tail; wq 683 drivers/infiniband/hw/mlx5/devx.c void *rqc, *wq; wq 686 drivers/infiniband/hw/mlx5/devx.c wq = MLX5_ADDR_OF(rqc, rqc, wq); wq 687 drivers/infiniband/hw/mlx5/devx.c MLX5_SET(wq, wq, dbr_umem_valid, 1); wq 688 drivers/infiniband/hw/mlx5/devx.c MLX5_SET(wq, wq, wq_umem_valid, 1); wq 694 drivers/infiniband/hw/mlx5/devx.c void *sqc, *wq; wq 697 drivers/infiniband/hw/mlx5/devx.c wq = MLX5_ADDR_OF(sqc, sqc, wq); wq 698 drivers/infiniband/hw/mlx5/devx.c MLX5_SET(wq, wq, dbr_umem_valid, 1); wq 699 drivers/infiniband/hw/mlx5/devx.c MLX5_SET(wq, wq, wq_umem_valid, 1); wq 709 drivers/infiniband/hw/mlx5/devx.c void *rmpc, *wq; wq 712 drivers/infiniband/hw/mlx5/devx.c wq = MLX5_ADDR_OF(rmpc, rmpc, wq); wq 713 drivers/infiniband/hw/mlx5/devx.c MLX5_SET(wq, wq, dbr_umem_valid, 1); wq 714 drivers/infiniband/hw/mlx5/devx.c MLX5_SET(wq, wq, wq_umem_valid, 1); wq 720 drivers/infiniband/hw/mlx5/devx.c void *xrqc, *wq; wq 723 drivers/infiniband/hw/mlx5/devx.c wq = MLX5_ADDR_OF(xrqc, xrqc, wq); wq 724 drivers/infiniband/hw/mlx5/devx.c MLX5_SET(wq, wq, dbr_umem_valid, 1); wq 725 drivers/infiniband/hw/mlx5/devx.c MLX5_SET(wq, wq, wq_umem_valid, 1); wq 692 drivers/infiniband/hw/mlx5/mlx5_ib.h struct workqueue_struct *wq; wq 942 drivers/infiniband/hw/mlx5/mlx5_ib.h struct workqueue_struct *wq; wq 1232 drivers/infiniband/hw/mlx5/mlx5_ib.h void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata); wq 1233 drivers/infiniband/hw/mlx5/mlx5_ib.h int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, wq 371 drivers/infiniband/hw/mlx5/mr.c queue_delayed_work(cache->wq, &ent->dwork, wq 376 drivers/infiniband/hw/mlx5/mr.c queue_delayed_work(cache->wq, &ent->dwork, wq 379 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); wq 399 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); wq 401 drivers/infiniband/hw/mlx5/mr.c queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); wq 452 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); wq 487 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); wq 492 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); wq 519 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); wq 532 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); wq 614 drivers/infiniband/hw/mlx5/mr.c cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM); wq 615 drivers/infiniband/hw/mlx5/mr.c if (!cache->wq) { wq 652 drivers/infiniband/hw/mlx5/mr.c queue_work(cache->wq, &ent->work); wq 664 drivers/infiniband/hw/mlx5/mr.c if (!dev->cache.wq) wq 668 drivers/infiniband/hw/mlx5/mr.c flush_workqueue(dev->cache.wq); wq 676 drivers/infiniband/hw/mlx5/mr.c destroy_workqueue(dev->cache.wq); wq 1108 drivers/infiniband/hw/mlx5/odp.c struct mlx5_ib_wq *wq = &qp->rq; wq 1109 drivers/infiniband/hw/mlx5/odp.c int wqe_size = 1 << wq->wqe_shift; wq 1440 drivers/infiniband/hw/mlx5/odp.c queue_work(eq->wq, &pfault->work); wq 1507 drivers/infiniband/hw/mlx5/odp.c eq->wq = alloc_workqueue("mlx5_ib_page_fault", wq 1510 drivers/infiniband/hw/mlx5/odp.c if (!eq->wq) { wq 1536 drivers/infiniband/hw/mlx5/odp.c destroy_workqueue(eq->wq); wq 1550 drivers/infiniband/hw/mlx5/odp.c destroy_workqueue(eq->wq); wq 171 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_wq *wq = &qp->sq; wq 187 drivers/infiniband/hw/mlx5/qp.c wq->offset, wq 188 drivers/infiniband/hw/mlx5/qp.c wq->wqe_cnt, wq 189 drivers/infiniband/hw/mlx5/qp.c wq->wqe_shift, wq 217 drivers/infiniband/hw/mlx5/qp.c wq->offset, wq 218 drivers/infiniband/hw/mlx5/qp.c wq->wqe_cnt, wq 219 drivers/infiniband/hw/mlx5/qp.c wq->wqe_shift, wq 237 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_wq *wq = &qp->rq; wq 245 drivers/infiniband/hw/mlx5/qp.c wq->offset, wq 246 drivers/infiniband/hw/mlx5/qp.c wq->wqe_cnt, wq 247 drivers/infiniband/hw/mlx5/qp.c wq->wqe_shift, wq 1229 drivers/infiniband/hw/mlx5/qp.c void *wq; wq 1264 drivers/infiniband/hw/mlx5/qp.c wq = MLX5_ADDR_OF(sqc, sqc, wq); wq 1265 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); wq 1266 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd)); wq 1267 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(wq, wq, uar_page, MLX5_GET(qpc, qpc, uar_page)); wq 1268 drivers/infiniband/hw/mlx5/qp.c MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr)); wq 1269 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); wq 1270 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(qpc, qpc, log_sq_size)); wq 1271 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(wq, wq, log_wq_pg_sz, page_shift - MLX5_ADAPTER_PAGE_SHIFT); wq 1272 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(wq, wq, page_offset, offset); wq 1274 drivers/infiniband/hw/mlx5/qp.c pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas); wq 1325 drivers/infiniband/hw/mlx5/qp.c void *wq; wq 1352 drivers/infiniband/hw/mlx5/qp.c wq = MLX5_ADDR_OF(rqc, rqc, wq); wq 1353 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); wq 1355 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); wq 1356 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(wq, wq, page_offset, MLX5_GET(qpc, qpc, page_offset)); wq 1357 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd)); wq 1358 drivers/infiniband/hw/mlx5/qp.c MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr)); wq 1359 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(qpc, qpc, log_rq_stride) + 4); wq 1360 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(qpc, qpc, log_page_size)); wq 1361 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(qpc, qpc, log_rq_size)); wq 1363 drivers/infiniband/hw/mlx5/qp.c pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas); wq 4070 drivers/infiniband/hw/mlx5/qp.c static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq) wq 4075 drivers/infiniband/hw/mlx5/qp.c cur = wq->head - wq->tail; wq 4076 drivers/infiniband/hw/mlx5/qp.c if (likely(cur + nreq < wq->max_post)) wq 4081 drivers/infiniband/hw/mlx5/qp.c cur = wq->head - wq->tail; wq 4084 drivers/infiniband/hw/mlx5/qp.c return cur + nreq >= wq->max_post; wq 5883 drivers/infiniband/hw/mlx5/qp.c event.element.wq = &rwq->ibwq; wq 5926 drivers/infiniband/hw/mlx5/qp.c void *wq; wq 5945 drivers/infiniband/hw/mlx5/qp.c wq = MLX5_ADDR_OF(rqc, rqc, wq); wq 5946 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(wq, wq, wq_type, wq 5955 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); wq 5958 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(wq, wq, log_wq_stride, rwq->log_rq_stride); wq 5960 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(wq, wq, two_byte_shift_en, rwq->two_byte_shift_en); wq 5961 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(wq, wq, log_wqe_stride_size, wq 5964 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(wq, wq, log_wqe_num_of_strides, rwq->log_num_strides - wq 5967 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(wq, wq, log_wq_sz, rwq->log_rq_size); wq 5968 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(wq, wq, pd, to_mpd(pd)->pdn); wq 5969 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(wq, wq, page_offset, rwq->rq_page_offset); wq 5970 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(wq, wq, log_wq_pg_sz, rwq->log_page_size); wq 5971 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(wq, wq, wq_signature, rwq->wq_sig); wq 5972 drivers/infiniband/hw/mlx5/qp.c MLX5_SET64(wq, wq, dbr_addr, rwq->db.dma); wq 6000 drivers/infiniband/hw/mlx5/qp.c rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas); wq 6183 drivers/infiniband/hw/mlx5/qp.c void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata) wq 6185 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_dev *dev = to_mdev(wq->device); wq 6186 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_rwq *rwq = to_mrwq(wq); wq 6189 drivers/infiniband/hw/mlx5/qp.c destroy_user_rq(dev, wq->pd, rwq, udata); wq 6282 drivers/infiniband/hw/mlx5/qp.c int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, wq 6285 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_dev *dev = to_mdev(wq->device); wq 6286 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_rwq *rwq = to_mrwq(wq); wq 6319 drivers/infiniband/hw/mlx5/qp.c wq_attr->curr_wq_state : wq->state; wq 6327 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(modify_rq_in, in, uid, to_mpd(wq->pd)->uid); wq 27 drivers/infiniband/hw/mlx5/srq_cmd.c static void set_wq(void *wq, struct mlx5_srq_attr *in) wq 29 drivers/infiniband/hw/mlx5/srq_cmd.c MLX5_SET(wq, wq, wq_signature, !!(in->flags wq 31 drivers/infiniband/hw/mlx5/srq_cmd.c MLX5_SET(wq, wq, log_wq_pg_sz, in->log_page_size); wq 32 drivers/infiniband/hw/mlx5/srq_cmd.c MLX5_SET(wq, wq, log_wq_stride, in->wqe_shift + 4); wq 33 drivers/infiniband/hw/mlx5/srq_cmd.c MLX5_SET(wq, wq, log_wq_sz, in->log_size); wq 34 drivers/infiniband/hw/mlx5/srq_cmd.c MLX5_SET(wq, wq, page_offset, in->page_offset); wq 35 drivers/infiniband/hw/mlx5/srq_cmd.c MLX5_SET(wq, wq, lwm, in->lwm); wq 36 drivers/infiniband/hw/mlx5/srq_cmd.c MLX5_SET(wq, wq, pd, in->pd); wq 37 drivers/infiniband/hw/mlx5/srq_cmd.c MLX5_SET64(wq, wq, dbr_addr, in->db_record); wq 55 drivers/infiniband/hw/mlx5/srq_cmd.c static void get_wq(void *wq, struct mlx5_srq_attr *in) wq 57 drivers/infiniband/hw/mlx5/srq_cmd.c if (MLX5_GET(wq, wq, wq_signature)) wq 59 drivers/infiniband/hw/mlx5/srq_cmd.c in->log_page_size = MLX5_GET(wq, wq, log_wq_pg_sz); wq 60 drivers/infiniband/hw/mlx5/srq_cmd.c in->wqe_shift = MLX5_GET(wq, wq, log_wq_stride) - 4; wq 61 drivers/infiniband/hw/mlx5/srq_cmd.c in->log_size = MLX5_GET(wq, wq, log_wq_sz); wq 62 drivers/infiniband/hw/mlx5/srq_cmd.c in->page_offset = MLX5_GET(wq, wq, page_offset); wq 63 drivers/infiniband/hw/mlx5/srq_cmd.c in->lwm = MLX5_GET(wq, wq, lwm); wq 64 drivers/infiniband/hw/mlx5/srq_cmd.c in->pd = MLX5_GET(wq, wq, pd); wq 65 drivers/infiniband/hw/mlx5/srq_cmd.c in->db_record = MLX5_GET64(wq, wq, dbr_addr); wq 305 drivers/infiniband/hw/mlx5/srq_cmd.c void *wq; wq 322 drivers/infiniband/hw/mlx5/srq_cmd.c wq = MLX5_ADDR_OF(rmpc, rmpc, wq); wq 326 drivers/infiniband/hw/mlx5/srq_cmd.c set_wq(wq, in); wq 327 drivers/infiniband/hw/mlx5/srq_cmd.c memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size); wq 359 drivers/infiniband/hw/mlx5/srq_cmd.c void *wq; wq 377 drivers/infiniband/hw/mlx5/srq_cmd.c wq = MLX5_ADDR_OF(rmpc, rmpc, wq); wq 382 drivers/infiniband/hw/mlx5/srq_cmd.c MLX5_SET(wq, wq, lwm, lwm); wq 422 drivers/infiniband/hw/mlx5/srq_cmd.c get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out); wq 438 drivers/infiniband/hw/mlx5/srq_cmd.c void *wq; wq 450 drivers/infiniband/hw/mlx5/srq_cmd.c wq = MLX5_ADDR_OF(xrqc, xrqc, wq); wq 452 drivers/infiniband/hw/mlx5/srq_cmd.c set_wq(wq, in); wq 453 drivers/infiniband/hw/mlx5/srq_cmd.c memcpy(MLX5_ADDR_OF(xrqc, xrqc, wq.pas), in->pas, pas_size); wq 527 drivers/infiniband/hw/mlx5/srq_cmd.c get_wq(MLX5_ADDR_OF(xrqc, xrqc, wq), out); wq 484 drivers/infiniband/hw/mthca/mthca_cq.c struct mthca_wq *wq; wq 534 drivers/infiniband/hw/mthca/mthca_cq.c wq = &(*cur_qp)->sq; wq 536 drivers/infiniband/hw/mthca/mthca_cq.c >> wq->wqe_shift); wq 542 drivers/infiniband/hw/mthca/mthca_cq.c wq = NULL; wq 548 drivers/infiniband/hw/mthca/mthca_cq.c wq = &(*cur_qp)->rq; wq 550 drivers/infiniband/hw/mthca/mthca_cq.c wqe_index = wqe >> wq->wqe_shift; wq 557 drivers/infiniband/hw/mthca/mthca_cq.c wqe_index = wq->max - 1; wq 561 drivers/infiniband/hw/mthca/mthca_cq.c if (wq) { wq 562 drivers/infiniband/hw/mthca/mthca_cq.c if (wq->last_comp < wqe_index) wq 563 drivers/infiniband/hw/mthca/mthca_cq.c wq->tail += wqe_index - wq->last_comp; wq 565 drivers/infiniband/hw/mthca/mthca_cq.c wq->tail += wqe_index + wq->max - wq->last_comp; wq 567 drivers/infiniband/hw/mthca/mthca_cq.c wq->last_comp = wqe_index; wq 230 drivers/infiniband/hw/mthca/mthca_qp.c static void mthca_wq_reset(struct mthca_wq *wq) wq 232 drivers/infiniband/hw/mthca/mthca_qp.c wq->next_ind = 0; wq 233 drivers/infiniband/hw/mthca/mthca_qp.c wq->last_comp = wq->max - 1; wq 234 drivers/infiniband/hw/mthca/mthca_qp.c wq->head = 0; wq 235 drivers/infiniband/hw/mthca/mthca_qp.c wq->tail = 0; wq 1566 drivers/infiniband/hw/mthca/mthca_qp.c static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, wq 1572 drivers/infiniband/hw/mthca/mthca_qp.c cur = wq->head - wq->tail; wq 1573 drivers/infiniband/hw/mthca/mthca_qp.c if (likely(cur + nreq < wq->max)) wq 1578 drivers/infiniband/hw/mthca/mthca_qp.c cur = wq->head - wq->tail; wq 1581 drivers/infiniband/hw/mthca/mthca_qp.c return cur + nreq >= wq->max; wq 1784 drivers/infiniband/hw/ocrdma/ocrdma_sli.h } wq; wq 2588 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) & wq 2903 drivers/infiniband/hw/qedr/verbs.c static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq) wq 2905 drivers/infiniband/hw/qedr/verbs.c return (((wq->prod + 1) % wq->max_wr) == wq->cons); wq 826 drivers/infiniband/sw/rdmavt/qp.c rq->wq = vmalloc_user(sizeof(struct rvt_rwq) + size); wq 827 drivers/infiniband/sw/rdmavt/qp.c if (!rq->wq) wq 833 drivers/infiniband/sw/rdmavt/qp.c rq->kwq->curr_wq = rq->wq->wq; wq 840 drivers/infiniband/sw/rdmavt/qp.c rq->kwq->curr_wq = rq->kwq->wq; wq 1233 drivers/infiniband/sw/rdmavt/qp.c if (!qp->r_rq.wq) { wq 1246 drivers/infiniband/sw/rdmavt/qp.c qp->r_rq.wq); wq 1373 drivers/infiniband/sw/rdmavt/qp.c struct rvt_rwq *wq = NULL; wq 1379 drivers/infiniband/sw/rdmavt/qp.c wq = qp->r_rq.wq; wq 1380 drivers/infiniband/sw/rdmavt/qp.c head = RDMA_READ_UAPI_ATOMIC(wq->head); wq 1381 drivers/infiniband/sw/rdmavt/qp.c tail = RDMA_READ_UAPI_ATOMIC(wq->tail); wq 1399 drivers/infiniband/sw/rdmavt/qp.c RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail); wq 1825 drivers/infiniband/sw/rdmavt/qp.c struct rvt_krwq *wq = qp->r_rq.kwq; wq 1831 drivers/infiniband/sw/rdmavt/qp.c if (!(ib_rvt_state_ops[qp->state] & RVT_POST_RECV_OK) || !wq) { wq 1847 drivers/infiniband/sw/rdmavt/qp.c next = wq->head + 1; wq 1850 drivers/infiniband/sw/rdmavt/qp.c if (next == READ_ONCE(wq->tail)) { wq 1865 drivers/infiniband/sw/rdmavt/qp.c wqe = rvt_get_rwqe_ptr(&qp->r_rq, wq->head); wq 1877 drivers/infiniband/sw/rdmavt/qp.c smp_store_release(&wq->head, next); wq 2247 drivers/infiniband/sw/rdmavt/qp.c struct rvt_krwq *wq; wq 2261 drivers/infiniband/sw/rdmavt/qp.c wq = srq->rq.kwq; wq 2262 drivers/infiniband/sw/rdmavt/qp.c next = wq->head + 1; wq 2265 drivers/infiniband/sw/rdmavt/qp.c if (next == READ_ONCE(wq->tail)) { wq 2271 drivers/infiniband/sw/rdmavt/qp.c wqe = rvt_get_rwqe_ptr(&srq->rq, wq->head); wq 2280 drivers/infiniband/sw/rdmavt/qp.c smp_store_release(&wq->head, next); wq 2390 drivers/infiniband/sw/rdmavt/qp.c head = RDMA_READ_UAPI_ATOMIC(rq->wq->head); wq 2412 drivers/infiniband/sw/rdmavt/qp.c struct rvt_rwq *wq; wq 2440 drivers/infiniband/sw/rdmavt/qp.c wq = rq->wq; wq 2441 drivers/infiniband/sw/rdmavt/qp.c tail = RDMA_READ_UAPI_ATOMIC(wq->tail); wq 2469 drivers/infiniband/sw/rdmavt/qp.c RDMA_WRITE_UAPI_ATOMIC(wq->tail, tail); wq 114 drivers/infiniband/sw/rdmavt/rc.c head = RDMA_READ_UAPI_ATOMIC(qp->r_rq.wq->head); wq 115 drivers/infiniband/sw/rdmavt/rc.c tail = RDMA_READ_UAPI_ATOMIC(qp->r_rq.wq->tail); wq 113 drivers/infiniband/sw/rdmavt/srq.c srq->ip = rvt_create_mmap_info(dev, s, udata, srq->rq.wq); wq 215 drivers/infiniband/sw/rdmavt/srq.c owq = srq->rq.wq; wq 254 drivers/infiniband/sw/rdmavt/srq.c srq->rq.wq = tmp_rq.wq; wq 255 drivers/infiniband/sw/rdmavt/srq.c RDMA_WRITE_UAPI_ATOMIC(tmp_rq.wq->head, n); wq 256 drivers/infiniband/sw/rdmavt/srq.c RDMA_WRITE_UAPI_ATOMIC(tmp_rq.wq->tail, 0); wq 274 drivers/infiniband/sw/rdmavt/srq.c rvt_update_mmap_info(dev, ip, s, tmp_rq.wq); wq 351 drivers/infiniband/ulp/ipoib/ipoib.h struct workqueue_struct *wq; wq 480 drivers/infiniband/ulp/ipoib/ipoib_cm.c queue_delayed_work(priv->wq, wq 581 drivers/infiniband/ulp/ipoib/ipoib_cm.c queue_work(priv->wq, &priv->cm.rx_reap_task); wq 608 drivers/infiniband/ulp/ipoib/ipoib_cm.c queue_work(priv->wq, &priv->cm.rx_reap_task); wq 861 drivers/infiniband/ulp/ipoib/ipoib_cm.c queue_work(priv->wq, &priv->cm.reap_task); wq 1292 drivers/infiniband/ulp/ipoib/ipoib_cm.c queue_work(priv->wq, &priv->cm.reap_task); wq 1320 drivers/infiniband/ulp/ipoib/ipoib_cm.c queue_work(priv->wq, &priv->cm.start_task); wq 1331 drivers/infiniband/ulp/ipoib/ipoib_cm.c queue_work(priv->wq, &priv->cm.reap_task); wq 1472 drivers/infiniband/ulp/ipoib/ipoib_cm.c queue_work(priv->wq, &priv->cm.skb_task); wq 1505 drivers/infiniband/ulp/ipoib/ipoib_cm.c queue_delayed_work(priv->wq, wq 430 drivers/infiniband/ulp/ipoib/ipoib_ib.c queue_work(priv->wq, &qp_work->work); wq 702 drivers/infiniband/ulp/ipoib/ipoib_ib.c queue_delayed_work(priv->wq, &priv->ah_reap_task, wq 711 drivers/infiniband/ulp/ipoib/ipoib_ib.c flush_workqueue(priv->wq); wq 905 drivers/infiniband/ulp/ipoib/ipoib_ib.c queue_delayed_work(priv->wq, &priv->ah_reap_task, wq 1232 drivers/infiniband/ulp/ipoib/ipoib_main.c queue_work(priv->wq, &priv->restart_task); wq 1359 drivers/infiniband/ulp/ipoib/ipoib_main.c queue_delayed_work(priv->wq, &priv->neigh_reap_task, wq 1536 drivers/infiniband/ulp/ipoib/ipoib_main.c queue_delayed_work(priv->wq, &priv->neigh_reap_task, wq 1758 drivers/infiniband/ulp/ipoib/ipoib_main.c priv->wq = alloc_ordered_workqueue("ipoib_wq", WQ_MEM_RECLAIM); wq 1759 drivers/infiniband/ulp/ipoib/ipoib_main.c if (!priv->wq) { wq 1806 drivers/infiniband/ulp/ipoib/ipoib_main.c if (priv->wq) { wq 1807 drivers/infiniband/ulp/ipoib/ipoib_main.c destroy_workqueue(priv->wq); wq 1808 drivers/infiniband/ulp/ipoib/ipoib_main.c priv->wq = NULL; wq 1981 drivers/infiniband/ulp/ipoib/ipoib_main.c if (priv->wq) { wq 1982 drivers/infiniband/ulp/ipoib/ipoib_main.c flush_workqueue(priv->wq); wq 1983 drivers/infiniband/ulp/ipoib/ipoib_main.c destroy_workqueue(priv->wq); wq 1984 drivers/infiniband/ulp/ipoib/ipoib_main.c priv->wq = NULL; wq 100 drivers/infiniband/ulp/ipoib/ipoib_multicast.c queue_delayed_work(priv->wq, &priv->mcast_task, 0); wq 107 drivers/infiniband/ulp/ipoib/ipoib_multicast.c queue_delayed_work(priv->wq, &priv->mcast_task, HZ); wq 109 drivers/infiniband/ulp/ipoib/ipoib_multicast.c queue_delayed_work(priv->wq, &priv->mcast_task, 0); wq 401 drivers/infiniband/ulp/ipoib/ipoib_multicast.c queue_work(priv->wq, &priv->carrier_on_task); wq 663 drivers/infiniband/ulp/ipoib/ipoib_multicast.c queue_delayed_work(priv->wq, &priv->mcast_task, wq 50 drivers/iommu/amd_iommu_v2.c wait_queue_head_t wq; /* To wait for count == 0 */ wq 65 drivers/iommu/amd_iommu_v2.c wait_queue_head_t wq; wq 150 drivers/iommu/amd_iommu_v2.c wake_up(&dev_state->wq); wq 263 drivers/iommu/amd_iommu_v2.c wake_up(&pasid_state->wq); wq 269 drivers/iommu/amd_iommu_v2.c wait_event(pasid_state->wq, !atomic_read(&pasid_state->count)); wq 632 drivers/iommu/amd_iommu_v2.c init_waitqueue_head(&pasid_state->wq); wq 757 drivers/iommu/amd_iommu_v2.c init_waitqueue_head(&dev_state->wq); wq 856 drivers/iommu/amd_iommu_v2.c wait_event(dev_state->wq, !atomic_read(&dev_state->count)); wq 1867 drivers/lightnvm/pblk-core.c struct workqueue_struct *wq) wq 1878 drivers/lightnvm/pblk-core.c queue_work(wq, &line_ws->ws); wq 802 drivers/lightnvm/pblk.h struct workqueue_struct *wq); wq 146 drivers/md/bcache/closure.h struct workqueue_struct *wq; wq 228 drivers/md/bcache/closure.h struct workqueue_struct *wq) wq 232 drivers/md/bcache/closure.h cl->wq = wq; wq 239 drivers/md/bcache/closure.h struct workqueue_struct *wq = cl->wq; wq 246 drivers/md/bcache/closure.h if (wq) { wq 248 drivers/md/bcache/closure.h BUG_ON(!queue_work(wq, &cl->work)); wq 371 drivers/md/bcache/closure.h struct workqueue_struct *wq, wq 375 drivers/md/bcache/closure.h continue_at_nobarrier(cl, fn, wq); wq 113 drivers/md/bcache/movinggc.c continue_at(cl, write_moving_finish, op->wq); wq 123 drivers/md/bcache/movinggc.c continue_at(cl, write_moving, io->op.wq); wq 158 drivers/md/bcache/movinggc.c io->op.wq = c->moving_gc_wq; wq 94 drivers/md/bcache/request.c continue_at(cl, bch_data_insert_start, op->wq); wq 148 drivers/md/bcache/request.c continue_at(cl, bch_data_insert_keys, op->wq); wq 191 drivers/md/bcache/request.c set_closure_fn(cl, bch_data_insert_error, op->wq); wq 225 drivers/md/bcache/request.c continue_at(cl, bch_data_insert_keys, op->wq); wq 264 drivers/md/bcache/request.c continue_at(cl, bch_data_insert_keys, op->wq); wq 294 drivers/md/bcache/request.c continue_at(cl, bch_data_insert_keys, op->wq); wq 755 drivers/md/bcache/request.c s->iop.wq = bcache_wq; wq 9 drivers/md/bcache/request.h struct workqueue_struct *wq; wq 21 drivers/md/dm-bio-prison-v2.c struct workqueue_struct *wq; wq 36 drivers/md/dm-bio-prison-v2.c struct dm_bio_prison_v2 *dm_bio_prison_create_v2(struct workqueue_struct *wq) wq 44 drivers/md/dm-bio-prison-v2.c prison->wq = wq; wq 200 drivers/md/dm-bio-prison-v2.c queue_work(prison->wq, cell->quiesce_continuation); wq 279 drivers/md/dm-bio-prison-v2.c queue_work(prison->wq, continuation); wq 56 drivers/md/dm-bio-prison-v2.h struct dm_bio_prison_v2 *dm_bio_prison_create_v2(struct workqueue_struct *wq); wq 132 drivers/md/dm-cache-target.c static inline void queue_continuation(struct workqueue_struct *wq, wq 135 drivers/md/dm-cache-target.c queue_work(wq, &k->ws); wq 161 drivers/md/dm-cache-target.c struct workqueue_struct *wq; wq 202 drivers/md/dm-cache-target.c queue_work(b->wq, ws); wq 219 drivers/md/dm-cache-target.c struct workqueue_struct *wq) wq 225 drivers/md/dm-cache-target.c b->wq = wq; wq 236 drivers/md/dm-cache-target.c queue_work(b->wq, &b->commit_work); wq 450 drivers/md/dm-cache-target.c struct workqueue_struct *wq; wq 530 drivers/md/dm-cache-target.c queue_work(cache->wq, &cache->deferred_bio_worker); wq 538 drivers/md/dm-cache-target.c queue_work(cache->wq, &cache->migration_worker); wq 1183 drivers/md/dm-cache-target.c queue_continuation(mg->cache->wq, &mg->k); wq 1225 drivers/md/dm-cache-target.c queue_continuation(cache->wq, &mg->k); wq 1637 drivers/md/dm-cache-target.c queue_work(cache->wq, &mg->k.ws); wq 1949 drivers/md/dm-cache-target.c queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD); wq 1995 drivers/md/dm-cache-target.c if (cache->wq) wq 1996 drivers/md/dm-cache-target.c destroy_workqueue(cache->wq); wq 2607 drivers/md/dm-cache-target.c cache->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0); wq 2608 drivers/md/dm-cache-target.c if (!cache->wq) { wq 2616 drivers/md/dm-cache-target.c cache->prison = dm_bio_prison_create_v2(cache->wq); wq 2649 drivers/md/dm-cache-target.c issue_op, cache, cache->wq); wq 2871 drivers/md/dm-cache-target.c drain_workqueue(cache->wq); wq 137 drivers/md/dm-clone-target.c struct workqueue_struct *wq; wq 258 drivers/md/dm-clone-target.c queue_work(clone->wq, &clone->worker); wq 1317 drivers/md/dm-clone-target.c queue_delayed_work(clone->wq, &clone->waker, COMMIT_PERIOD); wq 1900 drivers/md/dm-clone-target.c clone->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0); wq 1901 drivers/md/dm-clone-target.c if (!clone->wq) { wq 1949 drivers/md/dm-clone-target.c destroy_workqueue(clone->wq); wq 1980 drivers/md/dm-clone-target.c destroy_workqueue(clone->wq); wq 2025 drivers/md/dm-clone-target.c flush_workqueue(clone->wq); wq 94 drivers/md/dm-core.h struct workqueue_struct *wq; wq 1150 drivers/md/dm-era-target.c struct workqueue_struct *wq; wq 1205 drivers/md/dm-era-target.c queue_work(era->wq, &era->worker); wq 1372 drivers/md/dm-era-target.c flush_workqueue(era->wq); wq 1395 drivers/md/dm-era-target.c if (era->wq) wq 1396 drivers/md/dm-era-target.c destroy_workqueue(era->wq); wq 1498 drivers/md/dm-era-target.c era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); wq 1499 drivers/md/dm-era-target.c if (!era->wq) { wq 251 drivers/md/dm-thin.c struct workqueue_struct *wq; wq 438 drivers/md/dm-thin.c queue_work(pool->wq, &pool->worker); wq 2434 drivers/md/dm-thin.c queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); wq 2476 drivers/md/dm-thin.c queue_work(pool->wq, &pw->worker); wq 2606 drivers/md/dm-thin.c queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout); wq 2934 drivers/md/dm-thin.c if (pool->wq) wq 2935 drivers/md/dm-thin.c destroy_workqueue(pool->wq); wq 2999 drivers/md/dm-thin.c pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); wq 3000 drivers/md/dm-thin.c if (!pool->wq) { wq 3069 drivers/md/dm-thin.c destroy_workqueue(pool->wq); wq 3689 drivers/md/dm-thin.c flush_workqueue(pool->wq); wq 19 drivers/md/dm-zoned-reclaim.c struct workqueue_struct *wq; wq 463 drivers/md/dm-zoned-reclaim.c mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD); wq 527 drivers/md/dm-zoned-reclaim.c zrc->wq = alloc_ordered_workqueue("dmz_rwq_%s", WQ_MEM_RECLAIM, wq 529 drivers/md/dm-zoned-reclaim.c if (!zrc->wq) { wq 535 drivers/md/dm-zoned-reclaim.c queue_delayed_work(zrc->wq, &zrc->work, 0); wq 552 drivers/md/dm-zoned-reclaim.c destroy_workqueue(zrc->wq); wq 570 drivers/md/dm-zoned-reclaim.c queue_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD); wq 587 drivers/md/dm-zoned-reclaim.c mod_delayed_work(zrc->wq, &zrc->work, 0); wq 696 drivers/md/dm.c queue_work(md->wq, &md->work); wq 1882 drivers/md/dm.c if (md->wq) wq 1883 drivers/md/dm.c destroy_workqueue(md->wq); wq 2000 drivers/md/dm.c md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); wq 2001 drivers/md/dm.c if (!md->wq) wq 2490 drivers/md/dm.c queue_work(md->wq, &md->work); wq 2640 drivers/md/dm.c flush_workqueue(md->wq); wq 2866 drivers/md/dm.c flush_workqueue(md->wq); wq 8487 drivers/md/md.c DEFINE_WAIT(wq); wq 8502 drivers/md/md.c prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); wq 8515 drivers/md/md.c finish_wait(&resync_wait, &wq); wq 8518 drivers/md/md.c finish_wait(&resync_wait, &wq); wq 309 drivers/media/i2c/msp3400-driver.c wake_up_interruptible(&state->wq); wq 316 drivers/media/i2c/msp3400-driver.c add_wait_queue(&state->wq, &wait); wq 327 drivers/media/i2c/msp3400-driver.c remove_wait_queue(&state->wq, &wait); wq 714 drivers/media/i2c/msp3400-driver.c init_waitqueue_head(&state->wq); wq 110 drivers/media/i2c/msp3400-driver.h wait_queue_head_t wq; wq 50 drivers/media/i2c/saa7110.c wait_queue_head_t wq; wq 186 drivers/media/i2c/saa7110.c prepare_to_wait(&decoder->wq, &wait, TASK_UNINTERRUPTIBLE); wq 188 drivers/media/i2c/saa7110.c finish_wait(&decoder->wq, &wait); wq 221 drivers/media/i2c/saa7110.c prepare_to_wait(&decoder->wq, &wait, TASK_UNINTERRUPTIBLE); wq 223 drivers/media/i2c/saa7110.c finish_wait(&decoder->wq, &wait); wq 402 drivers/media/i2c/saa7110.c init_waitqueue_head(&decoder->wq); wq 731 drivers/media/pci/ddbridge/ddbridge-core.c output->dma->wq, wq 761 drivers/media/pci/ddbridge/ddbridge-core.c input->dma->wq, wq 782 drivers/media/pci/ddbridge/ddbridge-core.c poll_wait(file, &input->dma->wq, wait); wq 783 drivers/media/pci/ddbridge/ddbridge-core.c poll_wait(file, &output->dma->wq, wait); wq 2192 drivers/media/pci/ddbridge/ddbridge-core.c wake_up(&dma->wq); wq 2218 drivers/media/pci/ddbridge/ddbridge-core.c wake_up(&dma->wq); wq 2259 drivers/media/pci/ddbridge/ddbridge-core.c init_waitqueue_head(&dma->wq); wq 155 drivers/media/pci/ddbridge/ddbridge.h wait_queue_head_t wq; wq 325 drivers/media/pci/ddbridge/ddbridge.h struct workqueue_struct *wq; wq 345 drivers/media/pci/dm1105/dm1105.c struct workqueue_struct *wq; wq 716 drivers/media/pci/dm1105/dm1105.c queue_work(dev->wq, &dev->work); wq 1129 drivers/media/pci/dm1105/dm1105.c dev->wq = create_singlethread_workqueue(dev->wqn); wq 1130 drivers/media/pci/dm1105/dm1105.c if (!dev->wq) { wq 1143 drivers/media/pci/dm1105/dm1105.c destroy_workqueue(dev->wq); wq 82 drivers/media/pci/netup_unidvb/netup_unidvb.h wait_queue_head_t wq; wq 118 drivers/media/pci/netup_unidvb/netup_unidvb.h struct workqueue_struct *wq; wq 241 drivers/media/pci/netup_unidvb/netup_unidvb_core.c queue_work(dma->ndev->wq, &dma->work); wq 811 drivers/media/pci/netup_unidvb/netup_unidvb_core.c ndev->wq = create_singlethread_workqueue(NETUP_UNIDVB_NAME); wq 812 drivers/media/pci/netup_unidvb/netup_unidvb_core.c if (!ndev->wq) { wq 963 drivers/media/pci/netup_unidvb/netup_unidvb_core.c destroy_workqueue(ndev->wq); wq 1000 drivers/media/pci/netup_unidvb/netup_unidvb_core.c destroy_workqueue(ndev->wq); wq 115 drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c wake_up(&i2c->wq); wq 222 drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c if (wait_event_timeout(i2c->wq, wq 308 drivers/media/pci/netup_unidvb/netup_unidvb_i2c.c init_waitqueue_head(&i2c->wq); wq 104 drivers/media/platform/mtk-vcodec/vdec_vpu_if.c init_waitqueue_head(&vpu->wq); wq 33 drivers/media/platform/mtk-vcodec/vdec_vpu_if.h wait_queue_head_t wq; wq 122 drivers/media/platform/mtk-vpu/mtk_vpu.c struct workqueue_struct *wq; wq 141 drivers/media/platform/mtk-vpu/mtk_vpu.c wait_queue_head_t wq; wq 579 drivers/media/platform/mtk-vpu/mtk_vpu.c ret = wait_event_interruptible_timeout(run->wq, wq 612 drivers/media/platform/mtk-vpu/mtk_vpu.c wake_up_interruptible(&vpu->run.wq); wq 755 drivers/media/platform/mtk-vpu/mtk_vpu.c queue_work(vpu->wdt.wq, &vpu->wdt.ws); wq 809 drivers/media/platform/mtk-vpu/mtk_vpu.c vpu->wdt.wq = create_singlethread_workqueue("vpu_wdt"); wq 810 drivers/media/platform/mtk-vpu/mtk_vpu.c if (!vpu->wdt.wq) { wq 872 drivers/media/platform/mtk-vpu/mtk_vpu.c init_waitqueue_head(&vpu->run.wq); wq 910 drivers/media/platform/mtk-vpu/mtk_vpu.c destroy_workqueue(vpu->wdt.wq); wq 930 drivers/media/platform/mtk-vpu/mtk_vpu.c if (vpu->wdt.wq) { wq 931 drivers/media/platform/mtk-vpu/mtk_vpu.c flush_workqueue(vpu->wdt.wq); wq 932 drivers/media/platform/mtk-vpu/mtk_vpu.c destroy_workqueue(vpu->wdt.wq); wq 279 drivers/media/platform/vsp1/vsp1_pipe.c init_waitqueue_head(&pipe->wq); wq 337 drivers/media/platform/vsp1/vsp1_pipe.c ret = wait_event_timeout(pipe->wq, vsp1_pipeline_stopped(pipe), wq 117 drivers/media/platform/vsp1/vsp1_pipe.h wait_queue_head_t wq; wq 461 drivers/media/platform/vsp1/vsp1_video.c wake_up(&pipe->wq); wq 1202 drivers/media/platform/vsp1/vsp1_video.c ret = wait_event_timeout(pipe->wq, vsp1_pipeline_stopped(pipe), wq 1095 drivers/media/usb/cx231xx/cx231xx-core.c init_waitqueue_head(&dma_q->wq); wq 1229 drivers/media/usb/cx231xx/cx231xx-core.c init_waitqueue_head(&dma_q->wq); wq 457 drivers/media/usb/cx231xx/cx231xx-vbi.c init_waitqueue_head(&dma_q->wq); wq 242 drivers/media/usb/cx231xx/cx231xx.h wait_queue_head_t wq; wq 1050 drivers/media/usb/em28xx/em28xx-core.c init_waitqueue_head(&dma_q->wq); wq 1051 drivers/media/usb/em28xx/em28xx-core.c init_waitqueue_head(&vbi_dma_q->wq); wq 297 drivers/media/usb/em28xx/em28xx.h wait_queue_head_t wq; wq 1507 drivers/media/usb/gspca/gspca.c init_waitqueue_head(&gspca_dev->wq); wq 199 drivers/media/usb/gspca/gspca.h wait_queue_head_t wq; /* wait queue */ wq 46 drivers/media/usb/siano/smsusb.c struct work_struct wq; wq 72 drivers/media/usb/siano/smsusb.c struct smsusb_urb_t *surb = container_of(work, struct smsusb_urb_t, wq); wq 146 drivers/media/usb/siano/smsusb.c INIT_WORK(&surb->wq, do_submit_urb); wq 147 drivers/media/usb/siano/smsusb.c schedule_work(&surb->wq); wq 650 drivers/media/usb/tm6000/tm6000-video.c init_waitqueue_head(&dma_q->wq); wq 85 drivers/media/usb/tm6000/tm6000.h wait_queue_head_t wq; wq 2091 drivers/message/fusion/mptbase.c struct workqueue_struct *wq; wq 2097 drivers/message/fusion/mptbase.c wq = ioc->reset_work_q; wq 2101 drivers/message/fusion/mptbase.c destroy_workqueue(wq); wq 2104 drivers/message/fusion/mptbase.c wq = ioc->fw_event_q; wq 2107 drivers/message/fusion/mptbase.c destroy_workqueue(wq); wq 84 drivers/mfd/dln2.c wait_queue_head_t wq; wq 386 drivers/mfd/dln2.c ret = wait_event_interruptible(dln2->mod_rx_slots[handle].wq, wq 424 drivers/mfd/dln2.c wake_up_interruptible(&rxs->wq); wq 760 drivers/mfd/dln2.c init_waitqueue_head(&dln2->mod_rx_slots[i].wq); wq 455 drivers/misc/cxl/api.c wake_up_all(&ctx->wq); wq 64 drivers/misc/cxl/context.c init_waitqueue_head(&ctx->wq); wq 291 drivers/misc/cxl/context.c wake_up_all(&ctx->wq); wq 557 drivers/misc/cxl/cxl.h wait_queue_head_t wq; wq 109 drivers/misc/cxl/fault.c wake_up_all(&ctx->wq); wq 371 drivers/misc/cxl/file.c poll_wait(file, &ctx->wq, poll); wq 443 drivers/misc/cxl/file.c prepare_to_wait(&ctx->wq, &wait, TASK_INTERRUPTIBLE); wq 469 drivers/misc/cxl/file.c finish_wait(&ctx->wq, &wait); wq 517 drivers/misc/cxl/file.c finish_wait(&ctx->wq, &wait); wq 71 drivers/misc/cxl/irq.c wake_up_all(&ctx->wq); wq 157 drivers/misc/cxl/irq.c wake_up_all(&ctx->wq); wq 218 drivers/misc/cxl/irq.c wake_up_all(&ctx->wq); wq 1289 drivers/misc/mic/scif/scif_api.c static inline void _scif_poll_wait(struct file *f, wait_queue_head_t *wq, wq 1300 drivers/misc/mic/scif/scif_api.c poll_wait(f, wq, p); wq 807 drivers/mmc/core/core.c add_wait_queue(&host->wq, &wait); wq 826 drivers/mmc/core/core.c wake_up(&host->wq); wq 828 drivers/mmc/core/core.c remove_wait_queue(&host->wq, &wait); wq 859 drivers/mmc/core/core.c wake_up(&host->wq); wq 430 drivers/mmc/core/host.c init_waitqueue_head(&host->wq); wq 585 drivers/mtd/chips/cfi_cmdset_0001.c init_waitqueue_head(&(cfi->chips[i].wq)); wq 784 drivers/mtd/chips/cfi_cmdset_0001.c init_waitqueue_head(&chip->wq); wq 914 drivers/mtd/chips/cfi_cmdset_0001.c add_wait_queue(&chip->wq, &wait); wq 917 drivers/mtd/chips/cfi_cmdset_0001.c remove_wait_queue(&chip->wq, &wait); wq 998 drivers/mtd/chips/cfi_cmdset_0001.c add_wait_queue(&chip->wq, &wait); wq 1001 drivers/mtd/chips/cfi_cmdset_0001.c remove_wait_queue(&chip->wq, &wait); wq 1038 drivers/mtd/chips/cfi_cmdset_0001.c wake_up(&chip->wq); wq 1052 drivers/mtd/chips/cfi_cmdset_0001.c wake_up(&chip->wq); wq 1087 drivers/mtd/chips/cfi_cmdset_0001.c wake_up(&chip->wq); wq 1218 drivers/mtd/chips/cfi_cmdset_0001.c add_wait_queue(&chip->wq, &wait); wq 1221 drivers/mtd/chips/cfi_cmdset_0001.c remove_wait_queue(&chip->wq, &wait); wq 1294 drivers/mtd/chips/cfi_cmdset_0001.c add_wait_queue(&chip->wq, &wait); wq 1297 drivers/mtd/chips/cfi_cmdset_0001.c remove_wait_queue(&chip->wq, &wait); wq 2062 drivers/mtd/chips/cfi_cmdset_0001.c wake_up(&chip->wq); wq 2569 drivers/mtd/chips/cfi_cmdset_0001.c wake_up(&chip->wq); wq 2618 drivers/mtd/chips/cfi_cmdset_0001.c wake_up(&chip->wq); wq 735 drivers/mtd/chips/cfi_cmdset_0002.c init_waitqueue_head(&(cfi->chips[i].wq)); wq 975 drivers/mtd/chips/cfi_cmdset_0002.c add_wait_queue(&chip->wq, &wait); wq 978 drivers/mtd/chips/cfi_cmdset_0002.c remove_wait_queue(&chip->wq, &wait); wq 1010 drivers/mtd/chips/cfi_cmdset_0002.c wake_up(&chip->wq); wq 1125 drivers/mtd/chips/cfi_cmdset_0002.c add_wait_queue(&chip->wq, &wait); wq 1128 drivers/mtd/chips/cfi_cmdset_0002.c remove_wait_queue(&chip->wq, &wait); wq 1324 drivers/mtd/chips/cfi_cmdset_0002.c add_wait_queue(&chip->wq, &wait); wq 1329 drivers/mtd/chips/cfi_cmdset_0002.c remove_wait_queue(&chip->wq, &wait); wq 1342 drivers/mtd/chips/cfi_cmdset_0002.c wake_up(&chip->wq); wq 1685 drivers/mtd/chips/cfi_cmdset_0002.c add_wait_queue(&chip->wq, &wait); wq 1688 drivers/mtd/chips/cfi_cmdset_0002.c remove_wait_queue(&chip->wq, &wait); wq 1846 drivers/mtd/chips/cfi_cmdset_0002.c add_wait_queue(&cfi->chips[chipnum].wq, &wait); wq 1851 drivers/mtd/chips/cfi_cmdset_0002.c remove_wait_queue(&cfi->chips[chipnum].wq, &wait); wq 1917 drivers/mtd/chips/cfi_cmdset_0002.c add_wait_queue(&cfi->chips[chipnum].wq, &wait); wq 1922 drivers/mtd/chips/cfi_cmdset_0002.c remove_wait_queue(&cfi->chips[chipnum].wq, &wait); wq 1965 drivers/mtd/chips/cfi_cmdset_0002.c add_wait_queue(&chip->wq, &wait); wq 1968 drivers/mtd/chips/cfi_cmdset_0002.c remove_wait_queue(&chip->wq, &wait); wq 2464 drivers/mtd/chips/cfi_cmdset_0002.c add_wait_queue(&chip->wq, &wait); wq 2467 drivers/mtd/chips/cfi_cmdset_0002.c remove_wait_queue(&chip->wq, &wait); wq 2563 drivers/mtd/chips/cfi_cmdset_0002.c add_wait_queue(&chip->wq, &wait); wq 2566 drivers/mtd/chips/cfi_cmdset_0002.c remove_wait_queue(&chip->wq, &wait); wq 2947 drivers/mtd/chips/cfi_cmdset_0002.c add_wait_queue(&chip->wq, &wait); wq 2953 drivers/mtd/chips/cfi_cmdset_0002.c remove_wait_queue(&chip->wq, &wait); wq 2968 drivers/mtd/chips/cfi_cmdset_0002.c wake_up(&chip->wq); wq 3019 drivers/mtd/chips/cfi_cmdset_0002.c wake_up(&chip->wq); wq 3045 drivers/mtd/chips/cfi_cmdset_0002.c wake_up(&chip->wq); wq 159 drivers/mtd/chips/cfi_cmdset_0020.c init_waitqueue_head(&(cfi->chips[i].wq)); wq 298 drivers/mtd/chips/cfi_cmdset_0020.c wake_up(&chip->wq); wq 354 drivers/mtd/chips/cfi_cmdset_0020.c add_wait_queue(&chip->wq, &wait); wq 357 drivers/mtd/chips/cfi_cmdset_0020.c remove_wait_queue(&chip->wq, &wait); wq 379 drivers/mtd/chips/cfi_cmdset_0020.c wake_up(&chip->wq); wq 488 drivers/mtd/chips/cfi_cmdset_0020.c add_wait_queue(&chip->wq, &wait); wq 491 drivers/mtd/chips/cfi_cmdset_0020.c remove_wait_queue(&chip->wq, &wait); wq 545 drivers/mtd/chips/cfi_cmdset_0020.c add_wait_queue(&chip->wq, &wait); wq 548 drivers/mtd/chips/cfi_cmdset_0020.c remove_wait_queue(&chip->wq, &wait); wq 598 drivers/mtd/chips/cfi_cmdset_0020.c wake_up(&chip->wq); wq 602 drivers/mtd/chips/cfi_cmdset_0020.c wake_up(&chip->wq); wq 782 drivers/mtd/chips/cfi_cmdset_0020.c add_wait_queue(&chip->wq, &wait); wq 785 drivers/mtd/chips/cfi_cmdset_0020.c remove_wait_queue(&chip->wq, &wait); wq 811 drivers/mtd/chips/cfi_cmdset_0020.c add_wait_queue(&chip->wq, &wait); wq 814 drivers/mtd/chips/cfi_cmdset_0020.c remove_wait_queue(&chip->wq, &wait); wq 888 drivers/mtd/chips/cfi_cmdset_0020.c wake_up(&chip->wq); wq 1009 drivers/mtd/chips/cfi_cmdset_0020.c add_wait_queue(&chip->wq, &wait); wq 1013 drivers/mtd/chips/cfi_cmdset_0020.c remove_wait_queue(&chip->wq, &wait); wq 1028 drivers/mtd/chips/cfi_cmdset_0020.c wake_up(&chip->wq); wq 1080 drivers/mtd/chips/cfi_cmdset_0020.c add_wait_queue(&chip->wq, &wait); wq 1083 drivers/mtd/chips/cfi_cmdset_0020.c remove_wait_queue(&chip->wq, &wait); wq 1126 drivers/mtd/chips/cfi_cmdset_0020.c wake_up(&chip->wq); wq 1227 drivers/mtd/chips/cfi_cmdset_0020.c add_wait_queue(&chip->wq, &wait); wq 1230 drivers/mtd/chips/cfi_cmdset_0020.c remove_wait_queue(&chip->wq, &wait); wq 1273 drivers/mtd/chips/cfi_cmdset_0020.c wake_up(&chip->wq); wq 1363 drivers/mtd/chips/cfi_cmdset_0020.c wake_up(&chip->wq); wq 1389 drivers/mtd/chips/cfi_cmdset_0020.c wake_up(&chip->wq); wq 155 drivers/mtd/chips/gen_probe.c init_waitqueue_head(&pchip->wq); wq 87 drivers/mtd/lpddr/lpddr_cmds.c init_waitqueue_head(&chip->wq); wq 145 drivers/mtd/lpddr/lpddr_cmds.c add_wait_queue(&chip->wq, &wait); wq 148 drivers/mtd/lpddr/lpddr_cmds.c remove_wait_queue(&chip->wq, &wait); wq 244 drivers/mtd/lpddr/lpddr_cmds.c add_wait_queue(&chip->wq, &wait); wq 247 drivers/mtd/lpddr/lpddr_cmds.c remove_wait_queue(&chip->wq, &wait); wq 312 drivers/mtd/lpddr/lpddr_cmds.c add_wait_queue(&chip->wq, &wait); wq 315 drivers/mtd/lpddr/lpddr_cmds.c remove_wait_queue(&chip->wq, &wait); wq 338 drivers/mtd/lpddr/lpddr_cmds.c wake_up(&chip->wq); wq 352 drivers/mtd/lpddr/lpddr_cmds.c wake_up(&chip->wq); wq 373 drivers/mtd/lpddr/lpddr_cmds.c wake_up(&chip->wq); wq 1016 drivers/mtd/nand/onenand/onenand_base.c add_wait_queue(&this->wq, &wait); wq 1019 drivers/mtd/nand/onenand/onenand_base.c remove_wait_queue(&this->wq, &wait); wq 1040 drivers/mtd/nand/onenand/onenand_base.c wake_up(&this->wq); wq 3894 drivers/mtd/nand/onenand/onenand_base.c init_waitqueue_head(&this->wq); wq 85 drivers/mtd/ubi/block.c struct workqueue_struct *wq; wq 323 drivers/mtd/ubi/block.c queue_work(dev->wq, &pdu->work); wq 448 drivers/mtd/ubi/block.c dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name); wq 449 drivers/mtd/ubi/block.c if (!dev->wq) { wq 484 drivers/mtd/ubi/block.c destroy_workqueue(dev->wq); wq 2363 drivers/net/bonding/bond_3ad.c queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); wq 1604 drivers/net/bonding/bond_alb.c queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks); wq 540 drivers/net/bonding/bond_main.c queue_delayed_work(bond->wq, &bond->mcast_work, 1); wq 547 drivers/net/bonding/bond_main.c queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); wq 915 drivers/net/bonding/bond_main.c queue_delayed_work(bond->wq, &bond->mcast_work, 1); wq 1355 drivers/net/bonding/bond_main.c queue_delayed_work(slave->bond->wq, &slave->notify_work, 1); wq 1361 drivers/net/bonding/bond_main.c queue_delayed_work(slave->bond->wq, &slave->notify_work, 0); wq 2332 drivers/net/bonding/bond_main.c queue_delayed_work(bond->wq, &bond->mii_work, delay); wq 2754 drivers/net/bonding/bond_main.c queue_delayed_work(bond->wq, &bond->arp_work, wq 3027 drivers/net/bonding/bond_main.c queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); wq 3373 drivers/net/bonding/bond_main.c queue_delayed_work(bond->wq, &bond->alb_work, 0); wq 3377 drivers/net/bonding/bond_main.c queue_delayed_work(bond->wq, &bond->mii_work, 0); wq 3380 drivers/net/bonding/bond_main.c queue_delayed_work(bond->wq, &bond->arp_work, 0); wq 3385 drivers/net/bonding/bond_main.c queue_delayed_work(bond->wq, &bond->ad_work, 0); wq 3975 drivers/net/bonding/bond_main.c queue_delayed_work(bond->wq, &bond->slave_arr_work, delay); wq 4328 drivers/net/bonding/bond_main.c if (bond->wq) wq 4329 drivers/net/bonding/bond_main.c destroy_workqueue(bond->wq); wq 4805 drivers/net/bonding/bond_main.c bond->wq = alloc_ordered_workqueue(bond_dev->name, WQ_MEM_RECLAIM); wq 4806 drivers/net/bonding/bond_main.c if (!bond->wq) wq 872 drivers/net/bonding/bond_options.c queue_delayed_work(bond->wq, &bond->mii_work, 0); wq 976 drivers/net/bonding/bond_options.c queue_delayed_work(bond->wq, &bond->arp_work, 0); wq 78 drivers/net/caif/caif_hsi.c queue_work(cfhsi->wq, &cfhsi->wake_down_work); wq 983 drivers/net/caif/caif_hsi.c queue_work(cfhsi->wq, &cfhsi->wake_up_work); wq 1102 drivers/net/caif/caif_hsi.c queue_work(cfhsi->wq, &cfhsi->wake_up_work); wq 1199 drivers/net/caif/caif_hsi.c cfhsi->wq = alloc_ordered_workqueue(cfhsi->ndev->name, WQ_MEM_RECLAIM); wq 1200 drivers/net/caif/caif_hsi.c if (!cfhsi->wq) { wq 1240 drivers/net/caif/caif_hsi.c destroy_workqueue(cfhsi->wq); wq 1268 drivers/net/caif/caif_hsi.c destroy_workqueue(cfhsi->wq); wq 641 drivers/net/caif/caif_spi.c cfspi->wq = create_singlethread_workqueue(dev->name); wq 642 drivers/net/caif/caif_spi.c if (!cfspi->wq) { wq 665 drivers/net/caif/caif_spi.c queue_work(cfspi->wq, &cfspi->work); wq 692 drivers/net/caif/caif_spi.c destroy_workqueue(cfspi->wq); wq 158 drivers/net/can/spi/hi311x.c struct workqueue_struct *wq; wq 381 drivers/net/can/spi/hi311x.c queue_work(priv->wq, &priv->tx_work); wq 398 drivers/net/can/spi/hi311x.c queue_work(priv->wq, &priv->restart_work); wq 548 drivers/net/can/spi/hi311x.c destroy_workqueue(priv->wq); wq 549 drivers/net/can/spi/hi311x.c priv->wq = NULL; wq 766 drivers/net/can/spi/hi311x.c priv->wq = alloc_workqueue("hi3110_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM, wq 768 drivers/net/can/spi/hi311x.c if (!priv->wq) { wq 794 drivers/net/can/spi/hi311x.c destroy_workqueue(priv->wq); wq 1008 drivers/net/can/spi/hi311x.c queue_work(priv->wq, &priv->restart_work); wq 215 drivers/net/can/spi/mcp251x.c struct workqueue_struct *wq; wq 476 drivers/net/can/spi/mcp251x.c queue_work(priv->wq, &priv->tx_work); wq 493 drivers/net/can/spi/mcp251x.c queue_work(priv->wq, &priv->restart_work); wq 643 drivers/net/can/spi/mcp251x.c destroy_workqueue(priv->wq); wq 644 drivers/net/can/spi/mcp251x.c priv->wq = NULL; wq 907 drivers/net/can/spi/mcp251x.c priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM, wq 909 drivers/net/can/spi/mcp251x.c if (!priv->wq) { wq 934 drivers/net/can/spi/mcp251x.c destroy_workqueue(priv->wq); wq 1161 drivers/net/can/spi/mcp251x.c queue_work(priv->wq, &priv->restart_work); wq 2717 drivers/net/ethernet/broadcom/genet/bcmgenet.c wake_up(&priv->wq); wq 3537 drivers/net/ethernet/broadcom/genet/bcmgenet.c init_waitqueue_head(&priv->wq); wq 632 drivers/net/ethernet/broadcom/genet/bcmgenet.h wait_queue_head_t wq; wq 411 drivers/net/ethernet/broadcom/genet/bcmmii.c wait_event_timeout(priv->wq, wq 433 drivers/net/ethernet/cavium/liquidio/lio_core.c struct cavium_wq *wq = &lio->rxq_status_wq[droq->q_no]; wq 435 drivers/net/ethernet/cavium/liquidio/lio_core.c queue_delayed_work(wq->wq, &wq->wk.work, wq 458 drivers/net/ethernet/cavium/liquidio/lio_core.c struct cavium_wq *wq; wq 463 drivers/net/ethernet/cavium/liquidio/lio_core.c wq = &lio->rxq_status_wq[q_no]; wq 464 drivers/net/ethernet/cavium/liquidio/lio_core.c wq->wq = alloc_workqueue("rxq-oom-status", wq 466 drivers/net/ethernet/cavium/liquidio/lio_core.c if (!wq->wq) { wq 471 drivers/net/ethernet/cavium/liquidio/lio_core.c INIT_DELAYED_WORK(&wq->wk.work, wq 473 drivers/net/ethernet/cavium/liquidio/lio_core.c wq->wk.ctxptr = lio; wq 474 drivers/net/ethernet/cavium/liquidio/lio_core.c wq->wk.ctxul = q_no; wq 484 drivers/net/ethernet/cavium/liquidio/lio_core.c struct cavium_wq *wq; wq 488 drivers/net/ethernet/cavium/liquidio/lio_core.c wq = &lio->rxq_status_wq[q_no]; wq 489 drivers/net/ethernet/cavium/liquidio/lio_core.c if (wq->wq) { wq 490 drivers/net/ethernet/cavium/liquidio/lio_core.c cancel_delayed_work_sync(&wq->wk.work); wq 491 drivers/net/ethernet/cavium/liquidio/lio_core.c flush_workqueue(wq->wq); wq 492 drivers/net/ethernet/cavium/liquidio/lio_core.c destroy_workqueue(wq->wq); wq 493 drivers/net/ethernet/cavium/liquidio/lio_core.c wq->wq = NULL; wq 564 drivers/net/ethernet/cavium/liquidio/lio_main.c lio->link_status_wq.wq = alloc_workqueue("link-status", wq 566 drivers/net/ethernet/cavium/liquidio/lio_main.c if (!lio->link_status_wq.wq) { wq 581 drivers/net/ethernet/cavium/liquidio/lio_main.c if (lio->link_status_wq.wq) { wq 583 drivers/net/ethernet/cavium/liquidio/lio_main.c destroy_workqueue(lio->link_status_wq.wq); wq 629 drivers/net/ethernet/cavium/liquidio/lio_main.c queue_delayed_work(lio->link_status_wq.wq, wq 682 drivers/net/ethernet/cavium/liquidio/lio_main.c queue_delayed_work(lio->sync_octeon_time_wq.wq, wq 698 drivers/net/ethernet/cavium/liquidio/lio_main.c lio->sync_octeon_time_wq.wq = wq 700 drivers/net/ethernet/cavium/liquidio/lio_main.c if (!lio->sync_octeon_time_wq.wq) { wq 707 drivers/net/ethernet/cavium/liquidio/lio_main.c queue_delayed_work(lio->sync_octeon_time_wq.wq, wq 725 drivers/net/ethernet/cavium/liquidio/lio_main.c if (time_wq->wq) { wq 727 drivers/net/ethernet/cavium/liquidio/lio_main.c destroy_workqueue(time_wq->wq); wq 1771 drivers/net/ethernet/cavium/liquidio/lio_main.c queue_delayed_work(lio->txq_status_wq.wq, wq 1784 drivers/net/ethernet/cavium/liquidio/lio_main.c lio->txq_status_wq.wq = alloc_workqueue("txq-status", wq 1786 drivers/net/ethernet/cavium/liquidio/lio_main.c if (!lio->txq_status_wq.wq) { wq 1793 drivers/net/ethernet/cavium/liquidio/lio_main.c queue_delayed_work(lio->txq_status_wq.wq, wq 1802 drivers/net/ethernet/cavium/liquidio/lio_main.c if (lio->txq_status_wq.wq) { wq 1804 drivers/net/ethernet/cavium/liquidio/lio_main.c destroy_workqueue(lio->txq_status_wq.wq); wq 308 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c lio->link_status_wq.wq = alloc_workqueue("link-status", wq 310 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c if (!lio->link_status_wq.wq) { wq 325 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c if (lio->link_status_wq.wq) { wq 327 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c destroy_workqueue(lio->link_status_wq.wq); wq 371 drivers/net/ethernet/cavium/liquidio/lio_vf_main.c queue_delayed_work(lio->link_status_wq.wq, wq 310 drivers/net/ethernet/cavium/liquidio/octeon_device.h struct workqueue_struct *wq; wq 147 drivers/net/ethernet/cavium/liquidio/request_manager.c oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db", wq 150 drivers/net/ethernet/cavium/liquidio/request_manager.c if (!oct->check_db_wq[iq_no].wq) { wq 164 drivers/net/ethernet/cavium/liquidio/request_manager.c queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1)); wq 175 drivers/net/ethernet/cavium/liquidio/request_manager.c destroy_workqueue(oct->check_db_wq[iq_no].wq); wq 442 drivers/net/ethernet/cavium/liquidio/request_manager.c queue_work(cwq->wq, &cwq->wk.work.work); wq 540 drivers/net/ethernet/cavium/liquidio/request_manager.c queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(delay)); wq 42 drivers/net/ethernet/cavium/liquidio/response_manager.c oct->dma_comp_wq.wq = alloc_workqueue("dma-comp", WQ_MEM_RECLAIM, 0); wq 43 drivers/net/ethernet/cavium/liquidio/response_manager.c if (!oct->dma_comp_wq.wq) { wq 59 drivers/net/ethernet/cavium/liquidio/response_manager.c destroy_workqueue(oct->dma_comp_wq.wq); wq 233 drivers/net/ethernet/cavium/liquidio/response_manager.c queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(1)); wq 177 drivers/net/ethernet/cisco/enic/enic.h ____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX]; wq 238 drivers/net/ethernet/cisco/enic/enic.h static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq) wq 240 drivers/net/ethernet/cisco/enic/enic.h return enic->rq_count + wq; wq 265 drivers/net/ethernet/cisco/enic/enic.h unsigned int wq) wq 267 drivers/net/ethernet/cisco/enic/enic.h return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset; wq 359 drivers/net/ethernet/cisco/enic/enic_main.c static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) wq 361 drivers/net/ethernet/cisco/enic/enic_main.c struct enic *enic = vnic_dev_priv(wq->vdev); wq 374 drivers/net/ethernet/cisco/enic/enic_main.c static void enic_wq_free_buf(struct vnic_wq *wq, wq 377 drivers/net/ethernet/cisco/enic/enic_main.c enic_free_wq_buf(wq, buf); wq 387 drivers/net/ethernet/cisco/enic/enic_main.c vnic_wq_service(&enic->wq[q_number], cq_desc, wq 392 drivers/net/ethernet/cisco/enic/enic_main.c vnic_wq_desc_avail(&enic->wq[q_number]) >= wq 408 drivers/net/ethernet/cisco/enic/enic_main.c error_status = vnic_wq_error_status(&enic->wq[i]); wq 579 drivers/net/ethernet/cisco/enic/enic_main.c static int enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq, wq 594 drivers/net/ethernet/cisco/enic/enic_main.c enic_queue_wq_desc_cont(wq, skb, dma_addr, skb_frag_size(frag), wq 602 drivers/net/ethernet/cisco/enic/enic_main.c static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq, wq 622 drivers/net/ethernet/cisco/enic/enic_main.c enic_queue_wq_desc(wq, skb, dma_addr, head_len, vlan_tag_insert, wq 626 drivers/net/ethernet/cisco/enic/enic_main.c err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); wq 631 drivers/net/ethernet/cisco/enic/enic_main.c static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq, wq 653 drivers/net/ethernet/cisco/enic/enic_main.c enic_queue_wq_desc_csum_l4(wq, skb, dma_addr, head_len, csum_offset, wq 658 drivers/net/ethernet/cisco/enic/enic_main.c err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); wq 704 drivers/net/ethernet/cisco/enic/enic_main.c static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, wq 736 drivers/net/ethernet/cisco/enic/enic_main.c enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len, wq 762 drivers/net/ethernet/cisco/enic/enic_main.c enic_queue_wq_desc_cont(wq, skb, dma_addr, len, wq 774 drivers/net/ethernet/cisco/enic/enic_main.c static inline int enic_queue_wq_skb_encap(struct enic *enic, struct vnic_wq *wq, wq 796 drivers/net/ethernet/cisco/enic/enic_main.c enic_queue_wq_desc_ex(wq, skb, dma_addr, head_len, mss_or_csum, 0, wq 801 drivers/net/ethernet/cisco/enic/enic_main.c err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); wq 807 drivers/net/ethernet/cisco/enic/enic_main.c struct vnic_wq *wq, struct sk_buff *skb) wq 825 drivers/net/ethernet/cisco/enic/enic_main.c err = enic_queue_wq_skb_tso(enic, wq, skb, mss, wq 829 drivers/net/ethernet/cisco/enic/enic_main.c err = enic_queue_wq_skb_encap(enic, wq, skb, vlan_tag_insert, wq 832 drivers/net/ethernet/cisco/enic/enic_main.c err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert, wq 835 drivers/net/ethernet/cisco/enic/enic_main.c err = enic_queue_wq_skb_vlan(enic, wq, skb, vlan_tag_insert, wq 840 drivers/net/ethernet/cisco/enic/enic_main.c buf = wq->to_use->prev; wq 844 drivers/net/ethernet/cisco/enic/enic_main.c while (!buf->os_buf && (buf->next != wq->to_clean)) { wq 845 drivers/net/ethernet/cisco/enic/enic_main.c enic_free_wq_buf(wq, buf); wq 846 drivers/net/ethernet/cisco/enic/enic_main.c wq->ring.desc_avail++; wq 849 drivers/net/ethernet/cisco/enic/enic_main.c wq->to_use = buf->next; wq 859 drivers/net/ethernet/cisco/enic/enic_main.c struct vnic_wq *wq; wq 869 drivers/net/ethernet/cisco/enic/enic_main.c wq = &enic->wq[txq_map]; wq 886 drivers/net/ethernet/cisco/enic/enic_main.c if (vnic_wq_desc_avail(wq) < wq 895 drivers/net/ethernet/cisco/enic/enic_main.c enic_queue_wq_skb(enic, wq, skb); wq 897 drivers/net/ethernet/cisco/enic/enic_main.c if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) wq 901 drivers/net/ethernet/cisco/enic/enic_main.c vnic_wq_doorbell(wq); wq 1639 drivers/net/ethernet/cisco/enic/enic_main.c struct vnic_wq *wq = &enic->wq[wq_index]; wq 1646 drivers/net/ethernet/cisco/enic/enic_main.c wq_irq = wq->index; wq 1788 drivers/net/ethernet/cisco/enic/enic_main.c int wq = enic_cq_wq(enic, i); wq 1795 drivers/net/ethernet/cisco/enic/enic_main.c enic->msix[intr].devid = &enic->napi[wq]; wq 1954 drivers/net/ethernet/cisco/enic/enic_main.c vnic_wq_enable(&enic->wq[i]); wq 2025 drivers/net/ethernet/cisco/enic/enic_main.c err = vnic_wq_disable(&enic->wq[i]); wq 2040 drivers/net/ethernet/cisco/enic/enic_main.c vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); wq 192 drivers/net/ethernet/cisco/enic/enic_res.c vnic_wq_free(&enic->wq[i]); wq 257 drivers/net/ethernet/cisco/enic/enic_res.c vnic_wq_init(&enic->wq[i], wq 338 drivers/net/ethernet/cisco/enic/enic_res.c err = vnic_wq_alloc(enic->vdev, &enic->wq[i], i, wq 43 drivers/net/ethernet/cisco/enic/enic_res.h static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq, wq 49 drivers/net/ethernet/cisco/enic/enic_res.h struct wq_enet_desc *desc = vnic_wq_next_desc(wq); wq 65 drivers/net/ethernet/cisco/enic/enic_res.h vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt, wq 69 drivers/net/ethernet/cisco/enic/enic_res.h static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq, wq 73 drivers/net/ethernet/cisco/enic/enic_res.h enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, wq 78 drivers/net/ethernet/cisco/enic/enic_res.h static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf, wq 82 drivers/net/ethernet/cisco/enic/enic_res.h enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, wq 88 drivers/net/ethernet/cisco/enic/enic_res.h static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq, wq 93 drivers/net/ethernet/cisco/enic/enic_res.h enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, wq 100 drivers/net/ethernet/cisco/enic/enic_res.h static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq, wq 105 drivers/net/ethernet/cisco/enic/enic_res.h enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, wq 111 drivers/net/ethernet/cisco/enic/enic_res.h static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq, wq 116 drivers/net/ethernet/cisco/enic/enic_res.h enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, wq 397 drivers/net/ethernet/cisco/enic/vnic_dev.c err = enic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq, DEVCMD2_RING_SIZE, wq 402 drivers/net/ethernet/cisco/enic/vnic_dev.c fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index); wq 409 drivers/net/ethernet/cisco/enic/vnic_dev.c enic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, fetch_index, 0, wq 412 drivers/net/ethernet/cisco/enic/vnic_dev.c vnic_wq_enable(&vdev->devcmd2->wq); wq 420 drivers/net/ethernet/cisco/enic/vnic_dev.c vdev->devcmd2->cmd_ring = vdev->devcmd2->wq.ring.descs; wq 421 drivers/net/ethernet/cisco/enic/vnic_dev.c vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl; wq 437 drivers/net/ethernet/cisco/enic/vnic_dev.c vnic_wq_disable(&vdev->devcmd2->wq); wq 439 drivers/net/ethernet/cisco/enic/vnic_dev.c vnic_wq_free(&vdev->devcmd2->wq); wq 450 drivers/net/ethernet/cisco/enic/vnic_dev.c vnic_wq_disable(&vdev->devcmd2->wq); wq 451 drivers/net/ethernet/cisco/enic/vnic_dev.c vnic_wq_free(&vdev->devcmd2->wq); wq 31 drivers/net/ethernet/cisco/enic/vnic_wq.c static int vnic_wq_alloc_bufs(struct vnic_wq *wq) wq 34 drivers/net/ethernet/cisco/enic/vnic_wq.c unsigned int i, j, count = wq->ring.desc_count; wq 38 drivers/net/ethernet/cisco/enic/vnic_wq.c wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_KERNEL); wq 39 drivers/net/ethernet/cisco/enic/vnic_wq.c if (!wq->bufs[i]) wq 44 drivers/net/ethernet/cisco/enic/vnic_wq.c buf = wq->bufs[i]; wq 47 drivers/net/ethernet/cisco/enic/vnic_wq.c buf->desc = (u8 *)wq->ring.descs + wq 48 drivers/net/ethernet/cisco/enic/vnic_wq.c wq->ring.desc_size * buf->index; wq 50 drivers/net/ethernet/cisco/enic/vnic_wq.c buf->next = wq->bufs[0]; wq 54 drivers/net/ethernet/cisco/enic/vnic_wq.c buf->next = wq->bufs[i + 1]; wq 64 drivers/net/ethernet/cisco/enic/vnic_wq.c wq->to_use = wq->to_clean = wq->bufs[0]; wq 69 drivers/net/ethernet/cisco/enic/vnic_wq.c void vnic_wq_free(struct vnic_wq *wq) wq 74 drivers/net/ethernet/cisco/enic/vnic_wq.c vdev = wq->vdev; wq 76 drivers/net/ethernet/cisco/enic/vnic_wq.c vnic_dev_free_desc_ring(vdev, &wq->ring); wq 79 drivers/net/ethernet/cisco/enic/vnic_wq.c if (wq->bufs[i]) { wq 80 drivers/net/ethernet/cisco/enic/vnic_wq.c kfree(wq->bufs[i]); wq 81 drivers/net/ethernet/cisco/enic/vnic_wq.c wq->bufs[i] = NULL; wq 85 drivers/net/ethernet/cisco/enic/vnic_wq.c wq->ctrl = NULL; wq 88 drivers/net/ethernet/cisco/enic/vnic_wq.c int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, wq 93 drivers/net/ethernet/cisco/enic/vnic_wq.c wq->index = index; wq 94 drivers/net/ethernet/cisco/enic/vnic_wq.c wq->vdev = vdev; wq 96 drivers/net/ethernet/cisco/enic/vnic_wq.c wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index); wq 97 drivers/net/ethernet/cisco/enic/vnic_wq.c if (!wq->ctrl) { wq 102 drivers/net/ethernet/cisco/enic/vnic_wq.c vnic_wq_disable(wq); wq 104 drivers/net/ethernet/cisco/enic/vnic_wq.c err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); wq 108 drivers/net/ethernet/cisco/enic/vnic_wq.c err = vnic_wq_alloc_bufs(wq); wq 110 drivers/net/ethernet/cisco/enic/vnic_wq.c vnic_wq_free(wq); wq 117 drivers/net/ethernet/cisco/enic/vnic_wq.c int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, wq 122 drivers/net/ethernet/cisco/enic/vnic_wq.c wq->index = 0; wq 123 drivers/net/ethernet/cisco/enic/vnic_wq.c wq->vdev = vdev; wq 125 drivers/net/ethernet/cisco/enic/vnic_wq.c wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0); wq 126 drivers/net/ethernet/cisco/enic/vnic_wq.c if (!wq->ctrl) wq 128 drivers/net/ethernet/cisco/enic/vnic_wq.c vnic_wq_disable(wq); wq 129 drivers/net/ethernet/cisco/enic/vnic_wq.c err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); wq 134 drivers/net/ethernet/cisco/enic/vnic_wq.c void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, wq 140 drivers/net/ethernet/cisco/enic/vnic_wq.c unsigned int count = wq->ring.desc_count; wq 142 drivers/net/ethernet/cisco/enic/vnic_wq.c paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; wq 143 drivers/net/ethernet/cisco/enic/vnic_wq.c writeq(paddr, &wq->ctrl->ring_base); wq 144 drivers/net/ethernet/cisco/enic/vnic_wq.c iowrite32(count, &wq->ctrl->ring_size); wq 145 drivers/net/ethernet/cisco/enic/vnic_wq.c iowrite32(fetch_index, &wq->ctrl->fetch_index); wq 146 drivers/net/ethernet/cisco/enic/vnic_wq.c iowrite32(posted_index, &wq->ctrl->posted_index); wq 147 drivers/net/ethernet/cisco/enic/vnic_wq.c iowrite32(cq_index, &wq->ctrl->cq_index); wq 148 drivers/net/ethernet/cisco/enic/vnic_wq.c iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); wq 149 drivers/net/ethernet/cisco/enic/vnic_wq.c iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); wq 150 drivers/net/ethernet/cisco/enic/vnic_wq.c iowrite32(0, &wq->ctrl->error_status); wq 152 drivers/net/ethernet/cisco/enic/vnic_wq.c wq->to_use = wq->to_clean = wq 153 drivers/net/ethernet/cisco/enic/vnic_wq.c &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)] wq 157 drivers/net/ethernet/cisco/enic/vnic_wq.c void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, wq 161 drivers/net/ethernet/cisco/enic/vnic_wq.c enic_wq_init_start(wq, cq_index, 0, 0, wq 166 drivers/net/ethernet/cisco/enic/vnic_wq.c unsigned int vnic_wq_error_status(struct vnic_wq *wq) wq 168 drivers/net/ethernet/cisco/enic/vnic_wq.c return ioread32(&wq->ctrl->error_status); wq 171 drivers/net/ethernet/cisco/enic/vnic_wq.c void vnic_wq_enable(struct vnic_wq *wq) wq 173 drivers/net/ethernet/cisco/enic/vnic_wq.c iowrite32(1, &wq->ctrl->enable); wq 176 drivers/net/ethernet/cisco/enic/vnic_wq.c int vnic_wq_disable(struct vnic_wq *wq) wq 179 drivers/net/ethernet/cisco/enic/vnic_wq.c struct vnic_dev *vdev = wq->vdev; wq 181 drivers/net/ethernet/cisco/enic/vnic_wq.c iowrite32(0, &wq->ctrl->enable); wq 185 drivers/net/ethernet/cisco/enic/vnic_wq.c if (!(ioread32(&wq->ctrl->running))) wq 190 drivers/net/ethernet/cisco/enic/vnic_wq.c vdev_neterr(vdev, "Failed to disable WQ[%d]\n", wq->index); wq 195 drivers/net/ethernet/cisco/enic/vnic_wq.c void vnic_wq_clean(struct vnic_wq *wq, wq 196 drivers/net/ethernet/cisco/enic/vnic_wq.c void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) wq 200 drivers/net/ethernet/cisco/enic/vnic_wq.c buf = wq->to_clean; wq 202 drivers/net/ethernet/cisco/enic/vnic_wq.c while (vnic_wq_desc_used(wq) > 0) { wq 204 drivers/net/ethernet/cisco/enic/vnic_wq.c (*buf_clean)(wq, buf); wq 206 drivers/net/ethernet/cisco/enic/vnic_wq.c buf = wq->to_clean = buf->next; wq 207 drivers/net/ethernet/cisco/enic/vnic_wq.c wq->ring.desc_avail++; wq 210 drivers/net/ethernet/cisco/enic/vnic_wq.c wq->to_use = wq->to_clean = wq->bufs[0]; wq 212 drivers/net/ethernet/cisco/enic/vnic_wq.c iowrite32(0, &wq->ctrl->fetch_index); wq 213 drivers/net/ethernet/cisco/enic/vnic_wq.c iowrite32(0, &wq->ctrl->posted_index); wq 214 drivers/net/ethernet/cisco/enic/vnic_wq.c iowrite32(0, &wq->ctrl->error_status); wq 216 drivers/net/ethernet/cisco/enic/vnic_wq.c vnic_dev_clear_desc_ring(&wq->ring); wq 99 drivers/net/ethernet/cisco/enic/vnic_wq.h struct vnic_wq wq; wq 103 drivers/net/ethernet/cisco/enic/vnic_wq.h static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) wq 106 drivers/net/ethernet/cisco/enic/vnic_wq.h return wq->ring.desc_avail; wq 109 drivers/net/ethernet/cisco/enic/vnic_wq.h static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) wq 112 drivers/net/ethernet/cisco/enic/vnic_wq.h return wq->ring.desc_count - wq->ring.desc_avail - 1; wq 115 drivers/net/ethernet/cisco/enic/vnic_wq.h static inline void *vnic_wq_next_desc(struct vnic_wq *wq) wq 117 drivers/net/ethernet/cisco/enic/vnic_wq.h return wq->to_use->desc; wq 120 drivers/net/ethernet/cisco/enic/vnic_wq.h static inline void vnic_wq_doorbell(struct vnic_wq *wq) wq 128 drivers/net/ethernet/cisco/enic/vnic_wq.h iowrite32(wq->to_use->index, &wq->ctrl->posted_index); wq 131 drivers/net/ethernet/cisco/enic/vnic_wq.h static inline void vnic_wq_post(struct vnic_wq *wq, wq 137 drivers/net/ethernet/cisco/enic/vnic_wq.h struct vnic_wq_buf *buf = wq->to_use; wq 149 drivers/net/ethernet/cisco/enic/vnic_wq.h wq->to_use = buf; wq 151 drivers/net/ethernet/cisco/enic/vnic_wq.h wq->ring.desc_avail -= desc_skip_cnt; wq 154 drivers/net/ethernet/cisco/enic/vnic_wq.h static inline void vnic_wq_service(struct vnic_wq *wq, wq 156 drivers/net/ethernet/cisco/enic/vnic_wq.h void (*buf_service)(struct vnic_wq *wq, wq 162 drivers/net/ethernet/cisco/enic/vnic_wq.h buf = wq->to_clean; wq 165 drivers/net/ethernet/cisco/enic/vnic_wq.h (*buf_service)(wq, cq_desc, buf, opaque); wq 167 drivers/net/ethernet/cisco/enic/vnic_wq.h wq->ring.desc_avail++; wq 169 drivers/net/ethernet/cisco/enic/vnic_wq.h wq->to_clean = buf->next; wq 174 drivers/net/ethernet/cisco/enic/vnic_wq.h buf = wq->to_clean; wq 178 drivers/net/ethernet/cisco/enic/vnic_wq.h void vnic_wq_free(struct vnic_wq *wq); wq 179 drivers/net/ethernet/cisco/enic/vnic_wq.h int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, wq 181 drivers/net/ethernet/cisco/enic/vnic_wq.h void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, wq 184 drivers/net/ethernet/cisco/enic/vnic_wq.h unsigned int vnic_wq_error_status(struct vnic_wq *wq); wq 185 drivers/net/ethernet/cisco/enic/vnic_wq.h void vnic_wq_enable(struct vnic_wq *wq); wq 186 drivers/net/ethernet/cisco/enic/vnic_wq.h int vnic_wq_disable(struct vnic_wq *wq); wq 187 drivers/net/ethernet/cisco/enic/vnic_wq.h void vnic_wq_clean(struct vnic_wq *wq, wq 188 drivers/net/ethernet/cisco/enic/vnic_wq.h void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)); wq 189 drivers/net/ethernet/cisco/enic/vnic_wq.h int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, wq 191 drivers/net/ethernet/cisco/enic/vnic_wq.h void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, wq 623 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c fq->wq = 1; wq 627 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c fq->wq = 5; wq 631 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c fq->wq = 6; wq 637 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c fq->wq = 6; wq 641 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c fq->wq = 2; wq 645 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c fq->wq = 1; wq 649 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c fq->wq = 0; wq 1011 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c qm_fqd_set_destwq(&initfq.fqd, dpaa_fq->channel, dpaa_fq->wq); wq 71 drivers/net/ethernet/freescale/dpaa/dpaa_eth.h u8 wq; wq 348 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c struct hinic_wq *wq = cmdq->wq; wq 356 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c hw_wqe = hinic_get_wqe(wq, WQE_LCMD_SIZE, &curr_prod_idx); wq 366 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c num_wqebbs = ALIGN(WQE_LCMD_SIZE, wq->wqebb_size) / wq->wqebb_size; wq 368 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c if (next_prod_idx >= wq->q_depth) { wq 370 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c next_prod_idx -= wq->q_depth; wq 426 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c struct hinic_wq *wq = cmdq->wq; wq 434 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c hw_wqe = hinic_get_wqe(wq, WQE_SCMD_SIZE, &curr_prod_idx); wq 444 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c num_wqebbs = ALIGN(WQE_SCMD_SIZE, wq->wqebb_size) / wq->wqebb_size; wq 446 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c if (next_prod_idx >= wq->q_depth) { wq 448 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c next_prod_idx -= wq->q_depth; wq 584 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c hinic_put_wqe(cmdq->wq, WQE_SCMD_SIZE); wq 634 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c hinic_put_wqe(cmdq->wq, WQE_LCMD_SIZE); wq 655 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c while ((hw_wqe = hinic_read_wqe(cmdq->wq, WQE_SCMD_SIZE, &ci))) { wq 671 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c hw_wqe = hinic_read_wqe(cmdq->wq, WQE_LCMD_SIZE, &ci); wq 703 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c struct hinic_wq *wq = cmdq->wq; wq 706 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c wq_first_page_paddr = be64_to_cpu(*wq->block_vaddr); wq 708 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c pfn = CMDQ_PFN(wq_first_page_paddr, wq->wq_page_size); wq 720 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c pfn = CMDQ_PFN(cmdq_first_block_paddr, wq->wq_page_size); wq 724 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c HINIC_CMDQ_CTXT_BLOCK_INFO_SET(atomic_read(&wq->cons_idx), CI); wq 739 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c static int init_cmdq(struct hinic_cmdq *cmdq, struct hinic_wq *wq, wq 744 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c cmdq->wq = wq; wq 750 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c cmdq->done = vzalloc(array_size(sizeof(*cmdq->done), wq->q_depth)); wq 755 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c wq->q_depth)); wq 133 drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h struct hinic_wq *wq; wq 61 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c #define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask) wq 62 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c #define RQ_MASKED_IDX(rq, idx) ((idx) & (rq)->wq->mask) wq 98 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c struct hinic_wq *wq; wq 100 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c wq = sq->wq; wq 101 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c ci_start = atomic_read(&wq->cons_idx); wq 102 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c pi_start = atomic_read(&wq->prod_idx); wq 105 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c wq_page_addr = be64_to_cpu(*wq->block_vaddr); wq 111 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr); wq 155 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c struct hinic_wq *wq; wq 157 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c wq = rq->wq; wq 158 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c ci_start = atomic_read(&wq->cons_idx); wq 159 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c pi_start = atomic_read(&wq->prod_idx); wq 162 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c wq_page_addr = be64_to_cpu(*wq->block_vaddr); wq 168 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c wq_block_pfn = HINIC_WQ_BLOCK_PFN(wq->block_paddr); wq 216 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c struct hinic_wq *wq = sq->wq; wq 219 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c skb_arr_size = wq->q_depth * sizeof(*sq->saved_skb); wq 244 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c struct hinic_wq *wq = rq->wq; wq 247 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c skb_arr_size = wq->q_depth * sizeof(*rq->saved_skb); wq 277 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c struct hinic_wq *wq, struct msix_entry *entry, wq 283 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c sq->wq = wq; wq 316 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c struct hinic_wq *wq = rq->wq; wq 319 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c cqe_size = wq->q_depth * sizeof(*rq->cqe); wq 324 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c cqe_dma_size = wq->q_depth * sizeof(*rq->cqe_dma); wq 329 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c for (i = 0; i < wq->q_depth; i++) { wq 359 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c struct hinic_wq *wq = rq->wq; wq 362 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c for (i = 0; i < wq->q_depth; i++) wq 380 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c struct hinic_wq *wq, struct msix_entry *entry) wq 388 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c rq->wq = wq; wq 453 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c struct hinic_wq *wq = sq->wq; wq 455 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c return atomic_read(&wq->delta) - 1; wq 466 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c struct hinic_wq *wq = rq->wq; wq 468 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c return atomic_read(&wq->delta) - 1; wq 637 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c struct hinic_wq *wq = sq->wq; wq 640 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c prod_idx += ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; wq 658 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(sq->wq, wqe_size, wq 674 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c hinic_return_wqe(sq->wq, wqe_size); wq 696 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c hinic_write_wqe(sq->wq, hw_wqe, wqe_size); wq 720 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c hw_wqe = hinic_read_wqe(sq->wq, sizeof(*ctrl), cons_idx); wq 733 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c *wqe_size = ALIGN(*wqe_size, sq->wq->wqebb_size); wq 753 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c hw_wqe = hinic_read_wqe(sq->wq, wqe_size, cons_idx); wq 766 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c hinic_put_wqe(sq->wq, wqe_size); wq 797 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(rq->wq, wqe_size, wq 823 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c hinic_write_wqe(rq->wq, hw_wqe, sizeof(*rq_wqe)); wq 844 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c hw_wqe = hinic_read_wqe(rq->wq, wqe_size, cons_idx); wq 875 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c struct hinic_wq *wq = rq->wq; wq 879 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c wqe_size = ALIGN(wqe_size, wq->wqebb_size); wq 880 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c num_wqebbs = wqe_size / wq->wqebb_size; wq 886 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c hw_wqe = hinic_read_wqe_direct(wq, *cons_idx); wq 910 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c hinic_put_wqe(rq->wq, wqe_size); wq 51 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h #define HINIC_MIN_TX_WQE_SIZE(wq) \ wq 52 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h ALIGN(HINIC_SQ_WQE_SIZE(1), (wq)->wqebb_size) wq 55 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h (HINIC_MIN_TX_WQE_SIZE((sq)->wq) / (sq)->wq->wqebb_size) wq 79 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h struct hinic_wq *wq; wq 95 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h struct hinic_wq *wq; wq 130 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h struct hinic_wq *wq, struct msix_entry *entry, void *ci_addr, wq 136 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h struct hinic_wq *wq, struct msix_entry *entry); wq 34 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c #define WQ_SIZE(wq) ((wq)->q_depth * (wq)->wqebb_size) wq 44 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c #define WQ_BASE_VADDR(wqs, wq) \ wq 45 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c ((void *)((wqs)->page_vaddr[(wq)->page_idx]) \ wq 46 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c + (wq)->block_idx * WQ_BLOCK_SIZE) wq 48 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c #define WQ_BASE_PADDR(wqs, wq) \ wq 49 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c ((wqs)->page_paddr[(wq)->page_idx] \ wq 50 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c + (wq)->block_idx * WQ_BLOCK_SIZE) wq 52 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c #define WQ_BASE_ADDR(wqs, wq) \ wq 53 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c ((void *)((wqs)->shadow_page_vaddr[(wq)->page_idx]) \ wq 54 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c + (wq)->block_idx * WQ_BLOCK_SIZE) wq 56 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c #define CMDQ_BASE_VADDR(cmdq_pages, wq) \ wq 58 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c + (wq)->block_idx * CMDQ_BLOCK_SIZE) wq 60 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c #define CMDQ_BASE_PADDR(cmdq_pages, wq) \ wq 62 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c + (wq)->block_idx * CMDQ_BLOCK_SIZE) wq 64 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c #define CMDQ_BASE_ADDR(cmdq_pages, wq) \ wq 66 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c + (wq)->block_idx * CMDQ_BLOCK_SIZE) wq 68 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c #define WQ_PAGE_ADDR(wq, idx) \ wq 69 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c ((wq)->shadow_block_vaddr[WQE_PAGE_NUM(wq, idx)]) wq 71 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c #define MASKED_WQE_IDX(wq, idx) ((idx) & (wq)->mask) wq 77 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c #define WQE_SHADOW_PAGE(wq, wqe) \ wq 78 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c (((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \ wq 79 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c / (wq)->max_wqe_size) wq 81 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c static inline int WQE_PAGE_OFF(struct hinic_wq *wq, u16 idx) wq 83 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c return (((idx) & ((wq)->num_wqebbs_per_page - 1)) wq 84 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c << (wq)->wqebb_size_shift); wq 87 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c static inline int WQE_PAGE_NUM(struct hinic_wq *wq, u16 idx) wq 89 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c return (((idx) >> ((wq)->wqebbs_per_page_shift)) wq 90 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c & ((wq)->num_q_pages - 1)); wq 377 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c static int alloc_wqes_shadow(struct hinic_wq *wq) wq 379 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c struct hinic_hwif *hwif = wq->hwif; wq 383 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c size = wq->num_q_pages * wq->max_wqe_size; wq 384 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wq->shadow_wqe = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); wq 385 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c if (!wq->shadow_wqe) wq 388 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c size = wq->num_q_pages * sizeof(wq->prod_idx); wq 389 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wq->shadow_idx = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); wq 390 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c if (!wq->shadow_idx) wq 396 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c devm_kfree(&pdev->dev, wq->shadow_wqe); wq 404 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c static void free_wqes_shadow(struct hinic_wq *wq) wq 406 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c struct hinic_hwif *hwif = wq->hwif; wq 409 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c devm_kfree(&pdev->dev, wq->shadow_idx); wq 410 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c devm_kfree(&pdev->dev, wq->shadow_wqe); wq 419 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c static void free_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif, wq 426 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c void **vaddr = &wq->shadow_block_vaddr[i]; wq 427 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c u64 *paddr = &wq->block_vaddr[i]; wq 431 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c dma_free_coherent(&pdev->dev, wq->wq_page_size, *vaddr, wq 435 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c free_wqes_shadow(wq); wq 446 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c static int alloc_wq_pages(struct hinic_wq *wq, struct hinic_hwif *hwif, wq 452 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c num_q_pages = ALIGN(WQ_SIZE(wq), wq->wq_page_size) / wq->wq_page_size; wq 463 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wq->num_q_pages = num_q_pages; wq 465 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c err = alloc_wqes_shadow(wq); wq 472 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c void **vaddr = &wq->shadow_block_vaddr[i]; wq 473 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c u64 *paddr = &wq->block_vaddr[i]; wq 476 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c *vaddr = dma_alloc_coherent(&pdev->dev, wq->wq_page_size, wq 490 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c free_wq_pages(wq, hwif, i); wq 505 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq, wq 539 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wq->hwif = hwif; wq 541 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c err = wqs_next_block(wqs, &wq->page_idx, &wq->block_idx); wq 547 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wq->wqebb_size = wqebb_size; wq 548 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wq->wq_page_size = wq_page_size; wq 549 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wq->q_depth = q_depth; wq 550 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wq->max_wqe_size = max_wqe_size; wq 551 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wq->num_wqebbs_per_page = num_wqebbs_per_page; wq 552 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wq->wqebbs_per_page_shift = ilog2(num_wqebbs_per_page); wq 553 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wq->wqebb_size_shift = wqebb_size_shift; wq 554 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wq->block_vaddr = WQ_BASE_VADDR(wqs, wq); wq 555 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq); wq 556 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wq->block_paddr = WQ_BASE_PADDR(wqs, wq); wq 558 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c err = alloc_wq_pages(wq, wqs->hwif, WQ_MAX_PAGES); wq 564 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c atomic_set(&wq->cons_idx, 0); wq 565 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c atomic_set(&wq->prod_idx, 0); wq 566 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c atomic_set(&wq->delta, q_depth); wq 567 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wq->mask = q_depth - 1; wq 572 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wqs_return_block(wqs, wq->page_idx, wq->block_idx); wq 581 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq) wq 583 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c free_wq_pages(wq, wqs->hwif, wq->num_q_pages); wq 585 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wqs_return_block(wqs, wq->page_idx, wq->block_idx); wq 602 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c struct hinic_wq *wq, struct hinic_hwif *hwif, wq 646 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wq[i].hwif = hwif; wq 647 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wq[i].page_idx = 0; wq 648 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wq[i].block_idx = i; wq 650 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wq[i].wqebb_size = wqebb_size; wq 651 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wq[i].wq_page_size = wq_page_size; wq 652 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wq[i].q_depth = q_depth; wq 653 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wq[i].max_wqe_size = max_wqe_size; wq 654 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wq[i].num_wqebbs_per_page = num_wqebbs_per_page; wq 655 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wq[i].wqebbs_per_page_shift = num_wqebbs_per_page_shift; wq 656 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wq[i].wqebb_size_shift = wqebb_size_shift; wq 657 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]); wq 658 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]); wq 659 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]); wq 661 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c err = alloc_wq_pages(&wq[i], cmdq_pages->hwif, wq 668 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c atomic_set(&wq[i].cons_idx, 0); wq 669 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c atomic_set(&wq[i].prod_idx, 0); wq 670 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c atomic_set(&wq[i].delta, q_depth); wq 671 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wq[i].mask = q_depth - 1; wq 678 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c free_wq_pages(&wq[j], cmdq_pages->hwif, wq[j].num_q_pages); wq 691 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c struct hinic_wq *wq, int cmdq_blocks) wq 696 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c free_wq_pages(&wq[i], cmdq_pages->hwif, wq[i].num_q_pages); wq 701 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c static void copy_wqe_to_shadow(struct hinic_wq *wq, void *shadow_addr, wq 708 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c idx = MASKED_WQE_IDX(wq, idx); wq 709 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wqebb_addr = WQ_PAGE_ADDR(wq, idx) + wq 710 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c WQE_PAGE_OFF(wq, idx); wq 712 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c memcpy(shadow_addr, wqebb_addr, wq->wqebb_size); wq 714 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c shadow_addr += wq->wqebb_size; wq 718 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c static void copy_wqe_from_shadow(struct hinic_wq *wq, void *shadow_addr, wq 725 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c idx = MASKED_WQE_IDX(wq, idx); wq 726 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wqebb_addr = WQ_PAGE_ADDR(wq, idx) + wq 727 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c WQE_PAGE_OFF(wq, idx); wq 729 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c memcpy(wqebb_addr, shadow_addr, wq->wqebb_size); wq 730 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c shadow_addr += wq->wqebb_size; wq 742 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size, wq 748 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c *prod_idx = MASKED_WQE_IDX(wq, atomic_read(&wq->prod_idx)); wq 750 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) >> wq->wqebb_size_shift; wq 752 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c if (atomic_sub_return(num_wqebbs, &wq->delta) <= 0) { wq 753 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c atomic_add(num_wqebbs, &wq->delta); wq 757 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c end_prod_idx = atomic_add_return(num_wqebbs, &wq->prod_idx); wq 759 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c end_prod_idx = MASKED_WQE_IDX(wq, end_prod_idx); wq 761 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c curr_prod_idx = MASKED_WQE_IDX(wq, curr_prod_idx); wq 764 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c end_prod_idx = MASKED_WQE_IDX(wq, end_prod_idx - 1); wq 766 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c curr_pg = WQE_PAGE_NUM(wq, curr_prod_idx); wq 767 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c end_pg = WQE_PAGE_NUM(wq, end_prod_idx); wq 772 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size]; wq 774 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *prod_idx); wq 776 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c wq->shadow_idx[curr_pg] = *prod_idx; wq 780 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c return WQ_PAGE_ADDR(wq, *prod_idx) + WQE_PAGE_OFF(wq, *prod_idx); wq 788 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size) wq 790 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; wq 792 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c atomic_sub(num_wqebbs, &wq->prod_idx); wq 794 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c atomic_add(num_wqebbs, &wq->delta); wq 802 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size) wq 804 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) wq 805 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c >> wq->wqebb_size_shift; wq 807 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c atomic_add(num_wqebbs, &wq->cons_idx); wq 809 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c atomic_add(num_wqebbs, &wq->delta); wq 820 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size, wq 823 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) wq 824 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c >> wq->wqebb_size_shift; wq 828 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c if ((atomic_read(&wq->delta) + num_wqebbs) > wq->q_depth) wq 831 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c curr_cons_idx = atomic_read(&wq->cons_idx); wq 833 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c curr_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx); wq 834 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c end_cons_idx = MASKED_WQE_IDX(wq, curr_cons_idx + num_wqebbs - 1); wq 836 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c curr_pg = WQE_PAGE_NUM(wq, curr_cons_idx); wq 837 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c end_pg = WQE_PAGE_NUM(wq, end_cons_idx); wq 842 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c void *shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size]; wq 844 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c copy_wqe_to_shadow(wq, shadow_addr, num_wqebbs, *cons_idx); wq 848 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c return WQ_PAGE_ADDR(wq, *cons_idx) + WQE_PAGE_OFF(wq, *cons_idx); wq 858 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c struct hinic_hw_wqe *hinic_read_wqe_direct(struct hinic_wq *wq, u16 cons_idx) wq 860 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c return WQ_PAGE_ADDR(wq, cons_idx) + WQE_PAGE_OFF(wq, cons_idx); wq 870 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c static inline bool wqe_shadow(struct hinic_wq *wq, struct hinic_hw_wqe *wqe) wq 872 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c size_t wqe_shadow_size = wq->num_q_pages * wq->max_wqe_size; wq 874 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c return WQE_IN_RANGE(wqe, wq->shadow_wqe, wq 875 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c &wq->shadow_wqe[wqe_shadow_size]); wq 884 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c void hinic_write_wqe(struct hinic_wq *wq, struct hinic_hw_wqe *wqe, wq 891 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c if (wqe_shadow(wq, wqe)) { wq 892 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c curr_pg = WQE_SHADOW_PAGE(wq, wqe); wq 894 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c prod_idx = wq->shadow_idx[curr_pg]; wq 895 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; wq 896 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c shadow_addr = &wq->shadow_wqe[curr_pg * wq->max_wqe_size]; wq 898 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c copy_wqe_from_shadow(wq, shadow_addr, num_wqebbs, prod_idx); wq 78 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h struct hinic_wq *wq, struct hinic_hwif *hwif, wq 83 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h struct hinic_wq *wq, int cmdq_blocks); wq 90 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq, wq 94 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq); wq 96 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size, wq 99 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size); wq 101 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size); wq 103 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size, wq 106 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h struct hinic_hw_wqe *hinic_read_wqe_direct(struct hinic_wq *wq, u16 cons_idx); wq 108 drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h void hinic_write_wqe(struct hinic_wq *wq, struct hinic_hw_wqe *wqe, wq 255 drivers/net/ethernet/huawei/hinic/hinic_rx.c while ((hw_wqe = hinic_read_wqe(rq->wq, HINIC_RQ_WQE_SIZE, &ci))) { wq 261 drivers/net/ethernet/huawei/hinic/hinic_rx.c hinic_put_wqe(rq->wq, HINIC_RQ_WQE_SIZE); wq 614 drivers/net/ethernet/huawei/hinic/hinic_tx.c struct hinic_wq *wq = sq->wq; wq 623 drivers/net/ethernet/huawei/hinic/hinic_tx.c hw_ci = HW_CONS_IDX(sq) & wq->mask; wq 630 drivers/net/ethernet/huawei/hinic/hinic_tx.c (((hw_ci - sw_ci) & wq->mask) * wq->wqebb_size < wqe_size)) wq 636 drivers/net/ethernet/huawei/hinic/hinic_tx.c if (wqe_size > wq->wqebb_size) { wq 1048 drivers/net/ethernet/mellanox/mlx5/core/cmd.c } else if (!queue_work(cmd->wq, &ent->work)) { wq 1995 drivers/net/ethernet/mellanox/mlx5/core/cmd.c cmd->wq = create_singlethread_workqueue(cmd->wq_name); wq 1996 drivers/net/ethernet/mellanox/mlx5/core/cmd.c if (!cmd->wq) { wq 2024 drivers/net/ethernet/mellanox/mlx5/core/cmd.c destroy_workqueue(cmd->wq); wq 320 drivers/net/ethernet/mellanox/mlx5/core/en.h struct mlx5_cqwq wq; wq 400 drivers/net/ethernet/mellanox/mlx5/core/en.h struct mlx5_wq_cyc wq; wq 524 drivers/net/ethernet/mellanox/mlx5/core/en.h struct mlx5_wq_cyc wq; wq 559 drivers/net/ethernet/mellanox/mlx5/core/en.h struct mlx5_wq_cyc wq; wq 633 drivers/net/ethernet/mellanox/mlx5/core/en.h struct mlx5_wq_cyc wq; wq 640 drivers/net/ethernet/mellanox/mlx5/core/en.h struct mlx5_wq_ll wq; wq 844 drivers/net/ethernet/mellanox/mlx5/core/en.h struct workqueue_struct *wq; wq 925 drivers/net/ethernet/mellanox/mlx5/core/en.h return mlx5_wq_ll_get_size(&rq->mpwqe.wq); wq 927 drivers/net/ethernet/mellanox/mlx5/core/en.h return mlx5_wq_cyc_get_size(&rq->wqe.wq); wq 935 drivers/net/ethernet/mellanox/mlx5/core/en.h return rq->mpwqe.wq.cur_sz; wq 937 drivers/net/ethernet/mellanox/mlx5/core/en.h return rq->wqe.wq.cur_sz; wq 197 drivers/net/ethernet/mellanox/mlx5/core/en/fs.h struct workqueue_struct *wq; wq 77 drivers/net/ethernet/mellanox/mlx5/core/en/health.c cq_sz = mlx5_cqwq_get_size(&cq->wq); wq 78 drivers/net/ethernet/mellanox/mlx5/core/en/health.c cq_log_stride = mlx5_cqwq_get_log_stride_size(&cq->wq); wq 85 drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c queue_delayed_work(priv->wq, &sagent->work, sagent->delay); wq 113 drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c queue_delayed_work(priv->wq, &sagent->work, sagent->delay); wq 65 drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c queue_work(priv->wq, &priv->monitor_counters_work); wq 149 drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c queue_work(priv->wq, &priv->update_stats_work); wq 87 drivers/net/ethernet/mellanox/mlx5/core/en/params.c #define MLX5_MAX_MPWQE_LOG_WQE_STRIDE_SZ ((BIT(__mlx5_bit_sz(wq, log_wqe_stride_size)) - 1) + \ wq 16 drivers/net/ethernet/mellanox/mlx5/core/en/params.h struct mlx5_wq_param wq; wq 22 drivers/net/ethernet/mellanox/mlx5/core/en/params.h struct mlx5_wq_param wq; wq 28 drivers/net/ethernet/mellanox/mlx5/core/en/params.h struct mlx5_wq_param wq; wq 258 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c rq->mpwqe.wq.head : mlx5_wq_cyc_get_head(&rq->wqe.wq); wq 221 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c sq_sz = mlx5_wq_cyc_get_size(&generic_sq->wq); wq 31 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n) wq 33 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc); wq 39 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h struct mlx5_wq_cyc *wq = &sq->wq; wq 42 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); wq 43 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h wqe = mlx5_wq_cyc_get_wqe(wq, *pi); wq 50 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h mlx5e_post_nop(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) wq 52 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc); wq 53 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); wq 67 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h mlx5e_post_nop_fence(struct mlx5_wq_cyc *wq, u32 sqn, u16 *pc) wq 69 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h u16 pi = mlx5_wq_cyc_ctr2ix(wq, *pc); wq 70 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); wq 85 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, struct mlx5_wq_cyc *wq, wq 96 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h mlx5e_post_nop(wq, sq->sqn, &sq->pc); wq 102 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h mlx5e_notify_hw(struct mlx5_wq_cyc *wq, u16 pc, void __iomem *uar_map, wq 109 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h *wq->db = cpu_to_be32(pc); wq 147 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc); wq 185 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h mlx5_wq_ll_reset(&rq->mpwqe.wq); wq 188 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h mlx5_wq_cyc_reset(&rq->wqe.wq); wq 185 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c struct mlx5_wq_cyc *wq = &sq->wq; wq 188 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); wq 189 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c contig_wqebbs = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); wq 192 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c mlx5e_fill_xdpsq_frag_edge(sq, wq, pi, contig_wqebbs); wq 207 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c struct mlx5_wq_cyc *wq = &sq->wq; wq 211 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); wq 236 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, wq 289 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) { wq 304 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c struct mlx5_wq_cyc *wq = &sq->wq; wq 305 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); wq 306 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); wq 400 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c cqe = mlx5_cqwq_get_cqe(&cq->wq); wq 414 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c mlx5_cqwq_pop(&cq->wq); wq 428 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); wq 435 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); wq 442 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c mlx5_cqwq_update_db_record(&cq->wq); wq 459 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); wq 106 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg); wq 141 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h mlx5e_fill_xdpsq_frag_edge(struct mlx5e_xdpsq *sq, struct mlx5_wq_cyc *wq, wq 151 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h mlx5e_post_nop(wq, sq->sqn, &sq->pc); wq 192 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h struct mlx5_wq_cyc *wq = &sq->wq; wq 195 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); wq 196 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h wqe = mlx5_wq_cyc_get_wqe(wq, *pi); wq 42 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c void *wq = MLX5_ADDR_OF(sqc, sqc, wq); wq 46 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c MLX5_SET(wq, wq, log_wq_sz, log_wq_size); wq 55 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c u16 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); wq 62 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); wq 388 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c flush_workqueue(sa_entry->ipsec->wq); wq 419 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c ipsec->wq = alloc_ordered_workqueue("mlx5e_ipsec: %s", 0, wq 421 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c if (!ipsec->wq) { wq 436 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c drain_workqueue(ipsec->wq); wq 437 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c destroy_workqueue(ipsec->wq); wq 502 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c WARN_ON(!queue_work(sa_entry->ipsec->wq, &modify_work->work)); wq 86 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h struct workqueue_struct *wq; wq 166 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c struct mlx5_wq_cyc *wq = &sq->wq; wq 169 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); wq 170 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); wq 173 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); wq 331 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c struct mlx5_wq_cyc *wq = &sq->wq; wq 332 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); wq 336 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc); wq 346 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c struct mlx5_wq_cyc *wq = &sq->wq; wq 380 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); wq 381 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); wq 384 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); wq 164 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c destroy_workqueue(priv->fs.arfs.wq); wq 354 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c priv->fs.arfs.wq = create_singlethread_workqueue("mlx5e_arfs"); wq 355 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c if (!priv->fs.arfs.wq) wq 709 drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c queue_work(priv->fs.arfs.wq, &arfs_rule->arfs_work); wq 198 drivers/net/ethernet/mellanox/mlx5/core/en_main.c queue_work(priv->wq, &priv->update_stats_work); wq 212 drivers/net/ethernet/mellanox/mlx5/core/en_main.c queue_work(priv->wq, &priv->update_carrier_work); wq 254 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq); wq 303 drivers/net/ethernet/mellanox/mlx5/core/en_main.c u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq)); wq 321 drivers/net/ethernet/mellanox/mlx5/core/en_main.c for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) { wq 383 drivers/net/ethernet/mellanox/mlx5/core/en_main.c void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); wq 391 drivers/net/ethernet/mellanox/mlx5/core/en_main.c rqp->wq.db_numa_node = cpu_to_node(c->cpu); wq 432 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq, wq 437 drivers/net/ethernet/mellanox/mlx5/core/en_main.c rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR]; wq 439 drivers/net/ethernet/mellanox/mlx5/core/en_main.c wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq); wq 485 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, wq 490 drivers/net/ethernet/mellanox/mlx5/core/en_main.c rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR]; wq 492 drivers/net/ethernet/mellanox/mlx5/core/en_main.c wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq); wq 575 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5_wq_ll_get_wqe(&rq->mpwqe.wq, i); wq 585 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i); wq 681 drivers/net/ethernet/mellanox/mlx5/core/en_main.c void *wq; wq 692 drivers/net/ethernet/mellanox/mlx5/core/en_main.c wq = MLX5_ADDR_OF(rqc, rqc, wq); wq 698 drivers/net/ethernet/mellanox/mlx5/core/en_main.c MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - wq 700 drivers/net/ethernet/mellanox/mlx5/core/en_main.c MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma); wq 703 drivers/net/ethernet/mellanox/mlx5/core/en_main.c (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); wq 829 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5_wq_ll *wq; wq 836 drivers/net/ethernet/mellanox/mlx5/core/en_main.c wq = &rq->mpwqe.wq; wq 837 drivers/net/ethernet/mellanox/mlx5/core/en_main.c head = wq->head; wq 842 drivers/net/ethernet/mellanox/mlx5/core/en_main.c head = mlx5_wq_ll_get_wqe_next_ix(wq, head); wq 845 drivers/net/ethernet/mellanox/mlx5/core/en_main.c rq->mpwqe.actual_wq_head = wq->head; wq 856 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5_wq_ll *wq = &rq->mpwqe.wq; wq 860 drivers/net/ethernet/mellanox/mlx5/core/en_main.c while (!mlx5_wq_ll_is_empty(wq)) { wq 863 drivers/net/ethernet/mellanox/mlx5/core/en_main.c wqe_ix_be = *wq->tail_next; wq 865 drivers/net/ethernet/mellanox/mlx5/core/en_main.c wqe = mlx5_wq_ll_get_wqe(wq, wqe_ix); wq 867 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5_wq_ll_pop(wq, wqe_ix_be, wq 871 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5_wq_cyc *wq = &rq->wqe.wq; wq 873 drivers/net/ethernet/mellanox/mlx5/core/en_main.c while (!mlx5_wq_cyc_is_empty(wq)) { wq 874 drivers/net/ethernet/mellanox/mlx5/core/en_main.c wqe_ix = mlx5_wq_cyc_get_tail(wq); wq 876 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5_wq_cyc_pop(wq); wq 954 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); wq 971 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); wq 995 drivers/net/ethernet/mellanox/mlx5/core/en_main.c void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); wq 997 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5_wq_cyc *wq = &sq->wq; wq 1014 drivers/net/ethernet/mellanox/mlx5/core/en_main.c param->wq.db_numa_node = cpu_to_node(c->cpu); wq 1015 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); wq 1018 drivers/net/ethernet/mellanox/mlx5/core/en_main.c wq->db = &wq->db[MLX5_SND_DBR]; wq 1045 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); wq 1068 drivers/net/ethernet/mellanox/mlx5/core/en_main.c void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); wq 1070 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5_wq_cyc *wq = &sq->wq; wq 1076 drivers/net/ethernet/mellanox/mlx5/core/en_main.c param->wq.db_numa_node = cpu_to_node(c->cpu); wq 1077 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); wq 1080 drivers/net/ethernet/mellanox/mlx5/core/en_main.c wq->db = &wq->db[MLX5_SND_DBR]; wq 1110 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); wq 1137 drivers/net/ethernet/mellanox/mlx5/core/en_main.c void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); wq 1139 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5_wq_cyc *wq = &sq->wq; wq 1168 drivers/net/ethernet/mellanox/mlx5/core/en_main.c param->wq.db_numa_node = cpu_to_node(c->cpu); wq 1169 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); wq 1172 drivers/net/ethernet/mellanox/mlx5/core/en_main.c wq->db = &wq->db[MLX5_SND_DBR]; wq 1210 drivers/net/ethernet/mellanox/mlx5/core/en_main.c void *wq; wq 1221 drivers/net/ethernet/mellanox/mlx5/core/en_main.c wq = MLX5_ADDR_OF(sqc, sqc, wq); wq 1234 drivers/net/ethernet/mellanox/mlx5/core/en_main.c MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); wq 1235 drivers/net/ethernet/mellanox/mlx5/core/en_main.c MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.bfreg.index); wq 1236 drivers/net/ethernet/mellanox/mlx5/core/en_main.c MLX5_SET(wq, wq, log_wq_pg_sz, csp->wq_ctrl->buf.page_shift - wq 1238 drivers/net/ethernet/mellanox/mlx5/core/en_main.c MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma); wq 1241 drivers/net/ethernet/mellanox/mlx5/core/en_main.c (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); wq 1366 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5_wq_cyc *wq = &sq->wq; wq 1375 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) { wq 1376 drivers/net/ethernet/mellanox/mlx5/core/en_main.c u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); wq 1384 drivers/net/ethernet/mellanox/mlx5/core/en_main.c nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc); wq 1385 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl); wq 1494 drivers/net/ethernet/mellanox/mlx5/core/en_main.c for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) { wq 1496 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i); wq 1547 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, wq 1562 drivers/net/ethernet/mellanox/mlx5/core/en_main.c for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { wq 1563 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); wq 1580 drivers/net/ethernet/mellanox/mlx5/core/en_main.c param->wq.buf_numa_node = cpu_to_node(c->cpu); wq 1581 drivers/net/ethernet/mellanox/mlx5/core/en_main.c param->wq.db_numa_node = cpu_to_node(c->cpu); wq 2143 drivers/net/ethernet/mellanox/mlx5/core/en_main.c void *wq = MLX5_ADDR_OF(rqc, rqc, wq); wq 2145 drivers/net/ethernet/mellanox/mlx5/core/en_main.c return MLX5_GET(wq, wq, log_wq_sz); wq 2155 drivers/net/ethernet/mellanox/mlx5/core/en_main.c void *wq = MLX5_ADDR_OF(rqc, rqc, wq); wq 2160 drivers/net/ethernet/mellanox/mlx5/core/en_main.c MLX5_SET(wq, wq, log_wqe_num_of_strides, wq 2163 drivers/net/ethernet/mellanox/mlx5/core/en_main.c MLX5_SET(wq, wq, log_wqe_stride_size, wq 2166 drivers/net/ethernet/mellanox/mlx5/core/en_main.c MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk)); wq 2169 drivers/net/ethernet/mellanox/mlx5/core/en_main.c MLX5_SET(wq, wq, log_wq_sz, params->log_rq_mtu_frames); wq 2174 drivers/net/ethernet/mellanox/mlx5/core/en_main.c MLX5_SET(wq, wq, wq_type, params->rq_wq_type); wq 2175 drivers/net/ethernet/mellanox/mlx5/core/en_main.c MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); wq 2176 drivers/net/ethernet/mellanox/mlx5/core/en_main.c MLX5_SET(wq, wq, log_wq_stride, wq 2178 drivers/net/ethernet/mellanox/mlx5/core/en_main.c MLX5_SET(wq, wq, pd, mdev->mlx5e_res.pdn); wq 2183 drivers/net/ethernet/mellanox/mlx5/core/en_main.c param->wq.buf_numa_node = dev_to_node(mdev->device); wq 2191 drivers/net/ethernet/mellanox/mlx5/core/en_main.c void *wq = MLX5_ADDR_OF(rqc, rqc, wq); wq 2193 drivers/net/ethernet/mellanox/mlx5/core/en_main.c MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); wq 2194 drivers/net/ethernet/mellanox/mlx5/core/en_main.c MLX5_SET(wq, wq, log_wq_stride, wq 2198 drivers/net/ethernet/mellanox/mlx5/core/en_main.c param->wq.buf_numa_node = dev_to_node(mdev->device); wq 2205 drivers/net/ethernet/mellanox/mlx5/core/en_main.c void *wq = MLX5_ADDR_OF(sqc, sqc, wq); wq 2207 drivers/net/ethernet/mellanox/mlx5/core/en_main.c MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); wq 2208 drivers/net/ethernet/mellanox/mlx5/core/en_main.c MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn); wq 2210 drivers/net/ethernet/mellanox/mlx5/core/en_main.c param->wq.buf_numa_node = dev_to_node(priv->mdev->device); wq 2218 drivers/net/ethernet/mellanox/mlx5/core/en_main.c void *wq = MLX5_ADDR_OF(sqc, sqc, wq); wq 2224 drivers/net/ethernet/mellanox/mlx5/core/en_main.c MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); wq 2296 drivers/net/ethernet/mellanox/mlx5/core/en_main.c void *wq = MLX5_ADDR_OF(sqc, sqc, wq); wq 2300 drivers/net/ethernet/mellanox/mlx5/core/en_main.c MLX5_SET(wq, wq, log_wq_sz, log_wq_size); wq 2309 drivers/net/ethernet/mellanox/mlx5/core/en_main.c void *wq = MLX5_ADDR_OF(sqc, sqc, wq); wq 2312 drivers/net/ethernet/mellanox/mlx5/core/en_main.c MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); wq 3122 drivers/net/ethernet/mellanox/mlx5/core/en_main.c void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); wq 3125 drivers/net/ethernet/mellanox/mlx5/core/en_main.c param->wq.db_numa_node = param->wq.buf_numa_node; wq 3127 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5_wq_cyc_create(mdev, ¶m->wq, rqc_wq, &rq->wqe.wq, wq 3144 drivers/net/ethernet/mellanox/mlx5/core/en_main.c param->wq.buf_numa_node = dev_to_node(mdev->device); wq 3145 drivers/net/ethernet/mellanox/mlx5/core/en_main.c param->wq.db_numa_node = dev_to_node(mdev->device); wq 3628 drivers/net/ethernet/mellanox/mlx5/core/en_main.c queue_work(priv->wq, &priv->set_rx_mode_work); wq 3643 drivers/net/ethernet/mellanox/mlx5/core/en_main.c queue_work(priv->wq, &priv->set_rx_mode_work); wq 4236 drivers/net/ethernet/mellanox/mlx5/core/en_main.c queue_work(priv->wq, &vxlan_work->work); wq 4380 drivers/net/ethernet/mellanox/mlx5/core/en_main.c queue_work(priv->wq, &priv->tx_timeout_work); wq 5193 drivers/net/ethernet/mellanox/mlx5/core/en_main.c queue_work(priv->wq, &priv->set_rx_mode_work); wq 5217 drivers/net/ethernet/mellanox/mlx5/core/en_main.c queue_work(priv->wq, &priv->set_rx_mode_work); wq 5274 drivers/net/ethernet/mellanox/mlx5/core/en_main.c priv->wq = create_singlethread_workqueue("mlx5e"); wq 5275 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (!priv->wq) wq 5290 drivers/net/ethernet/mellanox/mlx5/core/en_main.c destroy_workqueue(priv->wq); wq 5370 drivers/net/ethernet/mellanox/mlx5/core/en_main.c flush_workqueue(priv->wq); wq 895 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c if (!queue_work(priv->wq, &nhe->neigh_update_work)) { wq 1018 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c flush_workqueue(priv->wq); /* flush neigh update works */ wq 1704 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c queue_work(priv->wq, &priv->update_carrier_work); wq 1716 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c queue_work(priv->wq, &rpriv->uplink_priv.reoffload_flows_work); wq 58 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq, wq 61 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc); wq 63 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c memcpy(data, mlx5_cqwq_get_wqe(wq, ci), sizeof(struct mlx5_cqe64)); wq 67 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct mlx5_cqwq *wq, wq 73 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5e_read_cqe_slot(wq, cqcc, title); wq 79 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static inline void mlx5e_read_mini_arr_slot(struct mlx5_cqwq *wq, wq 83 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5e_read_cqe_slot(wq, cqcc, cqd->mini_arr); wq 87 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static inline void mlx5e_cqes_update_owner(struct mlx5_cqwq *wq, int n) wq 89 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c u32 cqcc = wq->cc; wq 90 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c u8 op_own = mlx5_cqwq_get_ctr_wrap_cnt(wq, cqcc) & 1; wq 91 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc); wq 92 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c u32 wq_sz = mlx5_cqwq_get_size(wq); wq 96 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); wq 104 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); wq 112 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct mlx5_cqwq *wq, wq 122 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c title->op_own |= 0x01 & (cqcc >> wq->fbc.log_sz); wq 129 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cqd->wqe_counter + 1); wq 133 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct mlx5_cqwq *wq, wq 138 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5e_decompress_cqe(rq, wq, cqcc); wq 144 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct mlx5_cqwq *wq, wq 149 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c u32 cqcc = wq->cc + update_owner_only; wq 158 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5e_read_mini_arr_slot(wq, cqd, cqcc); wq 160 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5e_decompress_cqe_no_hash(rq, wq, cqcc); wq 163 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5e_cqes_update_owner(wq, cqcc - wq->cc); wq 164 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c wq->cc = cqcc; wq 172 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct mlx5_cqwq *wq, wq 176 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c u32 cc = wq->cc; wq 178 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5e_read_title_slot(rq, wq, cc); wq 179 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5e_read_mini_arr_slot(wq, cqd, cc + 1); wq 180 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5e_decompress_cqe(rq, wq, cc); wq 184 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1; wq 379 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct mlx5_wq_cyc *wq = &rq->wqe.wq; wq 391 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct mlx5e_rx_wqe_cyc *wqe = mlx5_wq_cyc_get_wqe(wq, ix + i); wq 455 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct mlx5_wq_ll *wq = &rq->mpwqe.wq; wq 458 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c u16 next_wqe_index = mlx5_wq_ll_get_wqe_next_ix(wq, wq->head); wq 460 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5_wq_ll_push(wq, next_wqe_index); wq 466 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5_wq_ll_update_db_record(wq); wq 470 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct mlx5_wq_cyc *wq, wq 481 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5e_post_nop(wq, sq->sqn, &sq->pc); wq 490 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct mlx5_wq_cyc *wq = &sq->wq; wq 503 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); wq 504 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); wq 506 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5e_fill_icosq_frag_edge(sq, wq, pi, contig_wqebbs_room); wq 507 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); wq 510 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi); wq 558 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct mlx5_wq_cyc *wq = &rq->wqe.wq; wq 567 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c if (mlx5_wq_cyc_missing(wq) < wqe_bulk) wq 571 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c u16 head = mlx5_wq_cyc_get_head(wq); wq 579 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5_wq_cyc_push_n(wq, wqe_bulk); wq 580 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c } while (mlx5_wq_cyc_missing(wq) >= wqe_bulk); wq 585 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5_wq_cyc_update_db_record(wq); wq 600 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c cqe = mlx5_cqwq_get_cqe(&cq->wq); wq 614 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5_cqwq_pop(&cq->wq); wq 622 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c queue_work(cq->channel->priv->wq, &sq->recover_work); wq 631 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); wq 644 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); wq 648 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5_cqwq_update_db_record(&cq->wq); wq 656 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct mlx5_wq_ll *wq = &rq->mpwqe.wq; wq 671 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c missing = mlx5_wq_ll_missing(wq) - rq->mpwqe.umr_in_progress; wq 687 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c head = mlx5_wq_ll_get_wqe_next_ix(wq, head); wq 692 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg); wq 1143 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c queue_work(rq->channel->priv->wq, &rq->recover_work); wq 1148 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct mlx5_wq_cyc *wq = &rq->wqe.wq; wq 1154 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); wq 1185 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5_wq_cyc_pop(wq); wq 1195 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct mlx5_wq_cyc *wq = &rq->wqe.wq; wq 1201 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); wq 1232 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5_wq_cyc_pop(wq); wq 1338 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct mlx5_wq_ll *wq; wq 1374 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c wq = &rq->mpwqe.wq; wq 1375 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c wqe = mlx5_wq_ll_get_wqe(wq, wqe_id); wq 1377 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); wq 1383 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct mlx5_cqwq *cqwq = &cq->wq; wq 1514 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct mlx5_wq_cyc *wq = &rq->wqe.wq; wq 1520 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); wq 1545 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5_wq_cyc_pop(wq); wq 1554 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct mlx5_wq_cyc *wq = &rq->wqe.wq; wq 1560 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter)); wq 1585 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5_wq_cyc_pop(wq); wq 243 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c struct mlx5_wq_cyc *wq = &sq->wq; wq 258 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, sq->stop_room))) { wq 266 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg); wq 272 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c struct mlx5_wq_cyc *wq = &sq->wq; wq 319 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); wq 327 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); wq 406 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c struct mlx5_cqwq *wq = &sq->cq.wq; wq 409 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c ci = mlx5_cqwq_ctr2ix(wq, wq->cc - 1); wq 435 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c cqe = mlx5_cqwq_get_cqe(&cq->wq); wq 457 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5_cqwq_pop(&cq->wq); wq 466 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c queue_work(cq->channel->priv->wq, wq 480 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); wq 513 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); wq 517 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5_cqwq_update_db_record(&cq->wq); wq 528 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) && wq 549 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); wq 592 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c struct mlx5_wq_cyc *wq = &sq->wq; wq 640 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); wq 641 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); wq 643 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); wq 644 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); wq 76 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c struct mlx5_wq_cyc *wq = &sq->wq; wq 78 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); wq 82 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc); wq 83 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl); wq 56 drivers/net/ethernet/mellanox/mlx5/core/events.c struct workqueue_struct *wq; wq 311 drivers/net/ethernet/mellanox/mlx5/core/events.c queue_work(events->wq, &events->pcie_core_work); wq 348 drivers/net/ethernet/mellanox/mlx5/core/events.c events->wq = create_singlethread_workqueue("mlx5_events"); wq 349 drivers/net/ethernet/mellanox/mlx5/core/events.c if (!events->wq) { wq 360 drivers/net/ethernet/mellanox/mlx5/core/events.c destroy_workqueue(dev->priv.events->wq); wq 383 drivers/net/ethernet/mellanox/mlx5/core/events.c flush_workqueue(events->wq); wq 116 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c data = mlx5_wq_cyc_get_wqe(&conn->qp.wq.rq, ix); wq 126 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c *conn->qp.wq.rq.db = cpu_to_be32(conn->qp.rq.pc & 0xffff); wq 135 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c *conn->qp.wq.sq.db = cpu_to_be32(conn->qp.sq.pc); wq 151 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c ctrl = mlx5_wq_cyc_get_wqe(&conn->qp.wq.sq, ix); wq 362 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->fdev->conn_res.uar->map, conn->cq.wq.cc); wq 388 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c cqe = mlx5_cqwq_get_cqe(&conn->cq.wq); wq 393 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c mlx5_cqwq_pop(&conn->cq.wq); wq 395 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c mlx5_cqwq_update_db_record(&conn->cq.wq); wq 402 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c mlx5_fpga_dbg(conn->fdev, "Re-arming CQ with cc# %u\n", conn->cq.wq.cc); wq 448 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c err = mlx5_cqwq_create(mdev, &wqp, temp_cqc, &conn->cq.wq, wq 453 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c for (i = 0; i < mlx5_cqwq_get_size(&conn->cq.wq); i++) { wq 454 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c cqe = mlx5_cqwq_get_wqe(&conn->cq.wq, i); wq 529 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c return mlx5_wq_qp_create(mdev, &wqp, qpc, &conn->qp.wq, wq 56 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h struct mlx5_cqwq wq; wq 66 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h struct mlx5_wq_qp wq; wq 242 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c queue_delayed_work(fc_stats->wq, &fc_stats->work, wq 331 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c mod_delayed_work(fc_stats->wq, &fc_stats->work, 0); wq 357 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c mod_delayed_work(fc_stats->wq, &fc_stats->work, 0); wq 383 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c fc_stats->wq = create_singlethread_workqueue("mlx5_fc"); wq 384 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c if (!fc_stats->wq) wq 406 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c destroy_workqueue(dev->priv.fc_stats.wq); wq 407 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c dev->priv.fc_stats.wq = NULL; wq 454 drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c queue_delayed_work(fc_stats->wq, dwork, delay); wq 694 drivers/net/ethernet/mellanox/mlx5/core/health.c queue_work(health->wq, &health->fatal_report_work); wq 732 drivers/net/ethernet/mellanox/mlx5/core/health.c queue_work(health->wq, &health->report_work); wq 738 drivers/net/ethernet/mellanox/mlx5/core/health.c queue_work(health->wq, &health->report_work); wq 788 drivers/net/ethernet/mellanox/mlx5/core/health.c flush_workqueue(health->wq); wq 795 drivers/net/ethernet/mellanox/mlx5/core/health.c destroy_workqueue(health->wq); wq 813 drivers/net/ethernet/mellanox/mlx5/core/health.c health->wq = create_singlethread_workqueue(name); wq 815 drivers/net/ethernet/mellanox/mlx5/core/health.c if (!health->wq) wq 117 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h struct mlx5_wq_cyc *wq = &sq->wq; wq 119 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h *wqe = mlx5_wq_cyc_get_wqe(wq, pi); wq 350 drivers/net/ethernet/mellanox/mlx5/core/lag.c queue_delayed_work(ldev->wq, &ldev->bond_work, delay); wq 502 drivers/net/ethernet/mellanox/mlx5/core/lag.c ldev->wq = create_singlethread_workqueue("mlx5_lag"); wq 503 drivers/net/ethernet/mellanox/mlx5/core/lag.c if (!ldev->wq) { wq 515 drivers/net/ethernet/mellanox/mlx5/core/lag.c destroy_workqueue(ldev->wq); wq 39 drivers/net/ethernet/mellanox/mlx5/core/lag.h struct workqueue_struct *wq; wq 96 drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c flush_workqueue(ldev->wq); wq 300 drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c queue_work(ldev->wq, &fib_work->work); wq 78 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c cqe64 = mlx5_cqwq_get_cqe(&dr_cq->wq); wq 82 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c mlx5_cqwq_pop(&dr_cq->wq); wq 84 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c mlx5_cqwq_update_db_record(&dr_cq->wq); wq 136 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c err = mlx5_wq_qp_create(mdev, &wqp, temp_qpc, &dr_qp->wq, wq 216 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c *dr_qp->wq.sq.db = cpu_to_be32(dr_qp->sq.pc & 0xfffff); wq 239 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c wq_ctrl = mlx5_wq_cyc_get_wqe(&dr_qp->wq.sq, idx); wq 724 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c err = mlx5_cqwq_create(mdev, &wqp, temp_cqc, &cq->wq, wq 729 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { wq 730 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c cqe = mlx5_cqwq_get_wqe(&cq->wq, i); wq 972 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h struct mlx5_wq_qp wq; wq 994 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h struct mlx5_cqwq wq; wq 309 drivers/net/ethernet/mellanox/mlx5/core/transobj.c void *rqc, *wq; wq 312 drivers/net/ethernet/mellanox/mlx5/core/transobj.c wq = MLX5_ADDR_OF(rqc, rqc, wq); wq 318 drivers/net/ethernet/mellanox/mlx5/core/transobj.c MLX5_SET(wq, wq, log_hairpin_data_sz, params->log_data_size); wq 319 drivers/net/ethernet/mellanox/mlx5/core/transobj.c MLX5_SET(wq, wq, log_hairpin_num_packets, params->log_num_packets); wq 328 drivers/net/ethernet/mellanox/mlx5/core/transobj.c void *sqc, *wq; wq 331 drivers/net/ethernet/mellanox/mlx5/core/transobj.c wq = MLX5_ADDR_OF(sqc, sqc, wq); wq 336 drivers/net/ethernet/mellanox/mlx5/core/transobj.c MLX5_SET(wq, wq, log_hairpin_data_sz, params->log_data_size); wq 337 drivers/net/ethernet/mellanox/mlx5/core/transobj.c MLX5_SET(wq, wq, log_hairpin_num_packets, params->log_num_packets); wq 37 drivers/net/ethernet/mellanox/mlx5/core/wq.c u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq) wq 39 drivers/net/ethernet/mellanox/mlx5/core/wq.c return (u32)wq->fbc.sz_m1 + 1; wq 42 drivers/net/ethernet/mellanox/mlx5/core/wq.c u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) wq 44 drivers/net/ethernet/mellanox/mlx5/core/wq.c return wq->fbc.sz_m1 + 1; wq 47 drivers/net/ethernet/mellanox/mlx5/core/wq.c u8 mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq *wq) wq 49 drivers/net/ethernet/mellanox/mlx5/core/wq.c return wq->fbc.log_stride; wq 52 drivers/net/ethernet/mellanox/mlx5/core/wq.c u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq) wq 54 drivers/net/ethernet/mellanox/mlx5/core/wq.c return (u32)wq->fbc.sz_m1 + 1; wq 63 drivers/net/ethernet/mellanox/mlx5/core/wq.c void *wqc, struct mlx5_wq_cyc *wq, wq 66 drivers/net/ethernet/mellanox/mlx5/core/wq.c u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride); wq 67 drivers/net/ethernet/mellanox/mlx5/core/wq.c u8 log_wq_sz = MLX5_GET(wq, wqc, log_wq_sz); wq 68 drivers/net/ethernet/mellanox/mlx5/core/wq.c struct mlx5_frag_buf_ctrl *fbc = &wq->fbc; wq 77 drivers/net/ethernet/mellanox/mlx5/core/wq.c wq->db = wq_ctrl->db.db; wq 87 drivers/net/ethernet/mellanox/mlx5/core/wq.c wq->sz = mlx5_wq_cyc_get_size(wq); wq 99 drivers/net/ethernet/mellanox/mlx5/core/wq.c void mlx5_wq_cyc_reset(struct mlx5_wq_cyc *wq) wq 101 drivers/net/ethernet/mellanox/mlx5/core/wq.c wq->wqe_ctr = 0; wq 102 drivers/net/ethernet/mellanox/mlx5/core/wq.c wq->cur_sz = 0; wq 103 drivers/net/ethernet/mellanox/mlx5/core/wq.c mlx5_wq_cyc_update_db_record(wq); wq 107 drivers/net/ethernet/mellanox/mlx5/core/wq.c void *qpc, struct mlx5_wq_qp *wq, wq 135 drivers/net/ethernet/mellanox/mlx5/core/wq.c mlx5_init_fbc(wq_ctrl->buf.frags, log_rq_stride, log_rq_sz, &wq->rq.fbc); wq 145 drivers/net/ethernet/mellanox/mlx5/core/wq.c &wq->sq.fbc); wq 150 drivers/net/ethernet/mellanox/mlx5/core/wq.c log_sq_stride, log_sq_sz, &wq->sq.fbc); wq 153 drivers/net/ethernet/mellanox/mlx5/core/wq.c wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR]; wq 154 drivers/net/ethernet/mellanox/mlx5/core/wq.c wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR]; wq 167 drivers/net/ethernet/mellanox/mlx5/core/wq.c void *cqc, struct mlx5_cqwq *wq, wq 181 drivers/net/ethernet/mellanox/mlx5/core/wq.c wq->db = wq_ctrl->db.db; wq 192 drivers/net/ethernet/mellanox/mlx5/core/wq.c mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, &wq->fbc); wq 204 drivers/net/ethernet/mellanox/mlx5/core/wq.c static void mlx5_wq_ll_init_list(struct mlx5_wq_ll *wq) wq 209 drivers/net/ethernet/mellanox/mlx5/core/wq.c for (i = 0; i < wq->fbc.sz_m1; i++) { wq 210 drivers/net/ethernet/mellanox/mlx5/core/wq.c next_seg = mlx5_wq_ll_get_wqe(wq, i); wq 213 drivers/net/ethernet/mellanox/mlx5/core/wq.c next_seg = mlx5_wq_ll_get_wqe(wq, i); wq 214 drivers/net/ethernet/mellanox/mlx5/core/wq.c wq->tail_next = &next_seg->next_wqe_index; wq 218 drivers/net/ethernet/mellanox/mlx5/core/wq.c void *wqc, struct mlx5_wq_ll *wq, wq 221 drivers/net/ethernet/mellanox/mlx5/core/wq.c u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride); wq 222 drivers/net/ethernet/mellanox/mlx5/core/wq.c u8 log_wq_sz = MLX5_GET(wq, wqc, log_wq_sz); wq 223 drivers/net/ethernet/mellanox/mlx5/core/wq.c struct mlx5_frag_buf_ctrl *fbc = &wq->fbc; wq 232 drivers/net/ethernet/mellanox/mlx5/core/wq.c wq->db = wq_ctrl->db.db; wq 243 drivers/net/ethernet/mellanox/mlx5/core/wq.c mlx5_wq_ll_init_list(wq); wq 254 drivers/net/ethernet/mellanox/mlx5/core/wq.c void mlx5_wq_ll_reset(struct mlx5_wq_ll *wq) wq 256 drivers/net/ethernet/mellanox/mlx5/core/wq.c wq->head = 0; wq 257 drivers/net/ethernet/mellanox/mlx5/core/wq.c wq->wqe_ctr = 0; wq 258 drivers/net/ethernet/mellanox/mlx5/core/wq.c wq->cur_sz = 0; wq 259 drivers/net/ethernet/mellanox/mlx5/core/wq.c mlx5_wq_ll_init_list(wq); wq 260 drivers/net/ethernet/mellanox/mlx5/core/wq.c mlx5_wq_ll_update_db_record(wq); wq 80 drivers/net/ethernet/mellanox/mlx5/core/wq.h void *wqc, struct mlx5_wq_cyc *wq, wq 82 drivers/net/ethernet/mellanox/mlx5/core/wq.h u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); wq 83 drivers/net/ethernet/mellanox/mlx5/core/wq.h void mlx5_wq_cyc_reset(struct mlx5_wq_cyc *wq); wq 86 drivers/net/ethernet/mellanox/mlx5/core/wq.h void *qpc, struct mlx5_wq_qp *wq, wq 88 drivers/net/ethernet/mellanox/mlx5/core/wq.h void mlx5_wq_ll_reset(struct mlx5_wq_ll *wq); wq 91 drivers/net/ethernet/mellanox/mlx5/core/wq.h void *cqc, struct mlx5_cqwq *wq, wq 93 drivers/net/ethernet/mellanox/mlx5/core/wq.h u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq); wq 94 drivers/net/ethernet/mellanox/mlx5/core/wq.h u8 mlx5_cqwq_get_log_stride_size(struct mlx5_cqwq *wq); wq 97 drivers/net/ethernet/mellanox/mlx5/core/wq.h void *wqc, struct mlx5_wq_ll *wq, wq 99 drivers/net/ethernet/mellanox/mlx5/core/wq.h u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq); wq 103 drivers/net/ethernet/mellanox/mlx5/core/wq.h static inline int mlx5_wq_cyc_is_full(struct mlx5_wq_cyc *wq) wq 105 drivers/net/ethernet/mellanox/mlx5/core/wq.h return wq->cur_sz == wq->sz; wq 108 drivers/net/ethernet/mellanox/mlx5/core/wq.h static inline int mlx5_wq_cyc_missing(struct mlx5_wq_cyc *wq) wq 110 drivers/net/ethernet/mellanox/mlx5/core/wq.h return wq->sz - wq->cur_sz; wq 113 drivers/net/ethernet/mellanox/mlx5/core/wq.h static inline int mlx5_wq_cyc_is_empty(struct mlx5_wq_cyc *wq) wq 115 drivers/net/ethernet/mellanox/mlx5/core/wq.h return !wq->cur_sz; wq 118 drivers/net/ethernet/mellanox/mlx5/core/wq.h static inline void mlx5_wq_cyc_push(struct mlx5_wq_cyc *wq) wq 120 drivers/net/ethernet/mellanox/mlx5/core/wq.h wq->wqe_ctr++; wq 121 drivers/net/ethernet/mellanox/mlx5/core/wq.h wq->cur_sz++; wq 124 drivers/net/ethernet/mellanox/mlx5/core/wq.h static inline void mlx5_wq_cyc_push_n(struct mlx5_wq_cyc *wq, u8 n) wq 126 drivers/net/ethernet/mellanox/mlx5/core/wq.h wq->wqe_ctr += n; wq 127 drivers/net/ethernet/mellanox/mlx5/core/wq.h wq->cur_sz += n; wq 130 drivers/net/ethernet/mellanox/mlx5/core/wq.h static inline void mlx5_wq_cyc_pop(struct mlx5_wq_cyc *wq) wq 132 drivers/net/ethernet/mellanox/mlx5/core/wq.h wq->cur_sz--; wq 135 drivers/net/ethernet/mellanox/mlx5/core/wq.h static inline void mlx5_wq_cyc_update_db_record(struct mlx5_wq_cyc *wq) wq 137 drivers/net/ethernet/mellanox/mlx5/core/wq.h *wq->db = cpu_to_be32(wq->wqe_ctr); wq 140 drivers/net/ethernet/mellanox/mlx5/core/wq.h static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr) wq 142 drivers/net/ethernet/mellanox/mlx5/core/wq.h return ctr & wq->fbc.sz_m1; wq 145 drivers/net/ethernet/mellanox/mlx5/core/wq.h static inline u16 mlx5_wq_cyc_get_head(struct mlx5_wq_cyc *wq) wq 147 drivers/net/ethernet/mellanox/mlx5/core/wq.h return mlx5_wq_cyc_ctr2ix(wq, wq->wqe_ctr); wq 150 drivers/net/ethernet/mellanox/mlx5/core/wq.h static inline u16 mlx5_wq_cyc_get_tail(struct mlx5_wq_cyc *wq) wq 152 drivers/net/ethernet/mellanox/mlx5/core/wq.h return mlx5_wq_cyc_ctr2ix(wq, wq->wqe_ctr - wq->cur_sz); wq 155 drivers/net/ethernet/mellanox/mlx5/core/wq.h static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix) wq 157 drivers/net/ethernet/mellanox/mlx5/core/wq.h return mlx5_frag_buf_get_wqe(&wq->fbc, ix); wq 160 drivers/net/ethernet/mellanox/mlx5/core/wq.h static inline u16 mlx5_wq_cyc_get_contig_wqebbs(struct mlx5_wq_cyc *wq, u16 ix) wq 162 drivers/net/ethernet/mellanox/mlx5/core/wq.h return mlx5_frag_buf_get_idx_last_contig_stride(&wq->fbc, ix) - ix + 1; wq 173 drivers/net/ethernet/mellanox/mlx5/core/wq.h static inline u32 mlx5_cqwq_ctr2ix(struct mlx5_cqwq *wq, u32 ctr) wq 175 drivers/net/ethernet/mellanox/mlx5/core/wq.h return ctr & wq->fbc.sz_m1; wq 178 drivers/net/ethernet/mellanox/mlx5/core/wq.h static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq) wq 180 drivers/net/ethernet/mellanox/mlx5/core/wq.h return mlx5_cqwq_ctr2ix(wq, wq->cc); wq 183 drivers/net/ethernet/mellanox/mlx5/core/wq.h static inline struct mlx5_cqe64 *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix) wq 185 drivers/net/ethernet/mellanox/mlx5/core/wq.h struct mlx5_cqe64 *cqe = mlx5_frag_buf_get_wqe(&wq->fbc, ix); wq 188 drivers/net/ethernet/mellanox/mlx5/core/wq.h cqe += wq->fbc.log_stride == 7; wq 193 drivers/net/ethernet/mellanox/mlx5/core/wq.h static inline u32 mlx5_cqwq_get_ctr_wrap_cnt(struct mlx5_cqwq *wq, u32 ctr) wq 195 drivers/net/ethernet/mellanox/mlx5/core/wq.h return ctr >> wq->fbc.log_sz; wq 198 drivers/net/ethernet/mellanox/mlx5/core/wq.h static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq) wq 200 drivers/net/ethernet/mellanox/mlx5/core/wq.h return mlx5_cqwq_get_ctr_wrap_cnt(wq, wq->cc); wq 203 drivers/net/ethernet/mellanox/mlx5/core/wq.h static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq) wq 205 drivers/net/ethernet/mellanox/mlx5/core/wq.h wq->cc++; wq 208 drivers/net/ethernet/mellanox/mlx5/core/wq.h static inline void mlx5_cqwq_update_db_record(struct mlx5_cqwq *wq) wq 210 drivers/net/ethernet/mellanox/mlx5/core/wq.h *wq->db = cpu_to_be32(wq->cc & 0xffffff); wq 213 drivers/net/ethernet/mellanox/mlx5/core/wq.h static inline struct mlx5_cqe64 *mlx5_cqwq_get_cqe(struct mlx5_cqwq *wq) wq 215 drivers/net/ethernet/mellanox/mlx5/core/wq.h u32 ci = mlx5_cqwq_get_ci(wq); wq 216 drivers/net/ethernet/mellanox/mlx5/core/wq.h struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); wq 218 drivers/net/ethernet/mellanox/mlx5/core/wq.h u8 sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1; wq 229 drivers/net/ethernet/mellanox/mlx5/core/wq.h static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq) wq 231 drivers/net/ethernet/mellanox/mlx5/core/wq.h return wq->cur_sz == wq->fbc.sz_m1; wq 234 drivers/net/ethernet/mellanox/mlx5/core/wq.h static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq) wq 236 drivers/net/ethernet/mellanox/mlx5/core/wq.h return !wq->cur_sz; wq 239 drivers/net/ethernet/mellanox/mlx5/core/wq.h static inline int mlx5_wq_ll_missing(struct mlx5_wq_ll *wq) wq 241 drivers/net/ethernet/mellanox/mlx5/core/wq.h return wq->fbc.sz_m1 - wq->cur_sz; wq 244 drivers/net/ethernet/mellanox/mlx5/core/wq.h static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix) wq 246 drivers/net/ethernet/mellanox/mlx5/core/wq.h return mlx5_frag_buf_get_wqe(&wq->fbc, ix); wq 249 drivers/net/ethernet/mellanox/mlx5/core/wq.h static inline u16 mlx5_wq_ll_get_wqe_next_ix(struct mlx5_wq_ll *wq, u16 ix) wq 251 drivers/net/ethernet/mellanox/mlx5/core/wq.h struct mlx5_wqe_srq_next_seg *wqe = mlx5_wq_ll_get_wqe(wq, ix); wq 256 drivers/net/ethernet/mellanox/mlx5/core/wq.h static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next) wq 258 drivers/net/ethernet/mellanox/mlx5/core/wq.h wq->head = head_next; wq 259 drivers/net/ethernet/mellanox/mlx5/core/wq.h wq->wqe_ctr++; wq 260 drivers/net/ethernet/mellanox/mlx5/core/wq.h wq->cur_sz++; wq 263 drivers/net/ethernet/mellanox/mlx5/core/wq.h static inline void mlx5_wq_ll_pop(struct mlx5_wq_ll *wq, __be16 ix, wq 266 drivers/net/ethernet/mellanox/mlx5/core/wq.h *wq->tail_next = ix; wq 267 drivers/net/ethernet/mellanox/mlx5/core/wq.h wq->tail_next = next_tail_next; wq 268 drivers/net/ethernet/mellanox/mlx5/core/wq.h wq->cur_sz--; wq 271 drivers/net/ethernet/mellanox/mlx5/core/wq.h static inline void mlx5_wq_ll_update_db_record(struct mlx5_wq_ll *wq) wq 273 drivers/net/ethernet/mellanox/mlx5/core/wq.h *wq->db = cpu_to_be32(wq->wqe_ctr); wq 104 drivers/net/ethernet/netronome/nfp/ccm.c err = wait_event_interruptible_timeout(ccm->wq, wq 195 drivers/net/ethernet/netronome/nfp/ccm.c wake_up_interruptible_all(&ccm->wq); wq 210 drivers/net/ethernet/netronome/nfp/ccm.c init_waitqueue_head(&ccm->wq); wq 103 drivers/net/ethernet/netronome/nfp/ccm.h wait_queue_head_t wq; wq 417 drivers/net/ethernet/netronome/nfp/ccm_mbox.c wake_up_all(&nn->mbox_cmsg.wq); wq 443 drivers/net/ethernet/netronome/nfp/ccm_mbox.c wait_event(nn->mbox_cmsg.wq, nfp_ccm_mbox_done(skb)); wq 456 drivers/net/ethernet/netronome/nfp/ccm_mbox.c wake_up_all(&nn->mbox_cmsg.wq); wq 563 drivers/net/ethernet/netronome/nfp/ccm_mbox.c to = !wait_event_timeout(nn->mbox_cmsg.wq, wq 645 drivers/net/ethernet/netronome/nfp/ccm_mbox.c wake_up_all(&nn->mbox_cmsg.wq); wq 729 drivers/net/ethernet/netronome/nfp/ccm_mbox.c init_waitqueue_head(&nn->mbox_cmsg.wq); wq 722 drivers/net/ethernet/netronome/nfp/nfp_main.c pf->wq = alloc_workqueue("nfp-%s", 0, 2, pci_name(pdev)); wq 723 drivers/net/ethernet/netronome/nfp/nfp_main.c if (!pf->wq) { wq 807 drivers/net/ethernet/netronome/nfp/nfp_main.c destroy_workqueue(pf->wq); wq 840 drivers/net/ethernet/netronome/nfp/nfp_main.c destroy_workqueue(pf->wq); wq 137 drivers/net/ethernet/netronome/nfp/nfp_main.h struct workqueue_struct *wq; wq 680 drivers/net/ethernet/netronome/nfp/nfp_net.h wait_queue_head_t wq; wq 615 drivers/net/ethernet/netronome/nfp/nfp_net_main.c queue_work(pf->wq, &pf->port_refresh_work); wq 289 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c if (dcb->wq) { wq 290 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c destroy_workqueue(dcb->wq); wq 291 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c dcb->wq = NULL; wq 314 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c dcb->wq = create_singlethread_workqueue("qlcnic-dcb"); wq 315 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c if (!dcb->wq) { wq 339 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c destroy_workqueue(dcb->wq); wq 340 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c dcb->wq = NULL; wq 539 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c queue_delayed_work(dcb->wq, &dcb->aen_work, 0); wq 653 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c queue_delayed_work(dcb->wq, &dcb->aen_work, 0); wq 39 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h struct workqueue_struct *wq; wq 145 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c struct workqueue_struct *wq; wq 167 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c wq = create_singlethread_workqueue("bc-trans"); wq 168 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c if (wq == NULL) { wq 175 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c bc->bc_trans_wq = wq; wq 177 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c wq = create_singlethread_workqueue("async"); wq 178 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c if (wq == NULL) { wq 184 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c bc->bc_async_wq = wq; wq 422 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c struct workqueue_struct *wq; wq 424 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c wq = create_singlethread_workqueue("qlcnic-flr"); wq 425 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c if (wq == NULL) { wq 430 drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c bc->bc_flr_wq = wq; wq 80 drivers/net/ethernet/sfc/mcdi.c init_waitqueue_head(&mcdi->wq); wq 422 drivers/net/ethernet/sfc/mcdi.c wait_event(mcdi->wq, wq 432 drivers/net/ethernet/sfc/mcdi.c if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED, wq 458 drivers/net/ethernet/sfc/mcdi.c wake_up(&mcdi->wq); wq 490 drivers/net/ethernet/sfc/mcdi.c wake_up(&mcdi->wq); wq 71 drivers/net/ethernet/sfc/mcdi.h wait_queue_head_t wq; wq 222 drivers/net/ethernet/stmicro/stmmac/stmmac.h struct workqueue_struct *wq; wq 208 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c queue_work(priv->wq, &priv->service_task); wq 4503 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c priv->wq = create_singlethread_workqueue("stmmac_wq"); wq 4504 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c if (!priv->wq) { wq 4709 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c destroy_workqueue(priv->wq); wq 4745 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c destroy_workqueue(priv->wq); wq 97 drivers/net/ipvlan/ipvlan.h struct work_struct wq; wq 227 drivers/net/ipvlan/ipvlan_core.c struct ipvl_port *port = container_of(work, struct ipvl_port, wq); wq 556 drivers/net/ipvlan/ipvlan_core.c schedule_work(&port->wq); wq 76 drivers/net/ipvlan/ipvlan_main.c INIT_WORK(&port->wq, ipvlan_process_multicast); wq 99 drivers/net/ipvlan/ipvlan_main.c cancel_work_sync(&port->wq); wq 523 drivers/net/tap.c init_waitqueue_head(&q->sock.wq.wait); wq 581 drivers/net/tap.c poll_wait(file, &q->sock.wq.wait, wait); wq 2185 drivers/net/tun.c add_wait_queue(&tfile->socket.wq.wait, &wait); wq 2205 drivers/net/tun.c remove_wait_queue(&tfile->socket.wq.wait, &wait); wq 3439 drivers/net/tun.c init_waitqueue_head(&tfile->socket.wq.wait); wq 378 drivers/net/usb/lan78xx.c struct delayed_work wq; wq 1248 drivers/net/usb/lan78xx.c if (!schedule_delayed_work(&dev->wq, 0)) wq 2720 drivers/net/usb/lan78xx.c cancel_delayed_work_sync(&dev->wq); wq 3516 drivers/net/usb/lan78xx.c dev = container_of(work, struct lan78xx_net, wq.work); wq 3651 drivers/net/usb/lan78xx.c cancel_delayed_work_sync(&dev->wq); wq 3747 drivers/net/usb/lan78xx.c INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork); wq 613 drivers/net/wireless/ath/ar5523/ar5523.c queue_work(ar->wq, &ar->rx_refill_work); wq 1037 drivers/net/wireless/ath/ar5523/ar5523.c queue_work(ar->wq, &ar->rx_refill_work); wq 1619 drivers/net/wireless/ath/ar5523/ar5523.c ar->wq = create_singlethread_workqueue("ar5523"); wq 1620 drivers/net/wireless/ath/ar5523/ar5523.c if (!ar->wq) { wq 1714 drivers/net/wireless/ath/ar5523/ar5523.c destroy_workqueue(ar->wq); wq 1736 drivers/net/wireless/ath/ar5523/ar5523.c destroy_workqueue(ar->wq); wq 93 drivers/net/wireless/ath/ar5523/ar5523.h struct workqueue_struct *wq; wq 653 drivers/net/wireless/ath/wil6210/interrupt.c wake_up_interruptible(&wil->wq); wq 676 drivers/net/wireless/ath/wil6210/interrupt.c wake_up_interruptible(&wil->wq); wq 510 drivers/net/wireless/ath/wil6210/main.c if (wait_event_interruptible(wil->wq, wil->recovery_state != wq 529 drivers/net/wireless/ath/wil6210/main.c wake_up_interruptible(&wil->wq); wq 730 drivers/net/wireless/ath/wil6210/main.c init_waitqueue_head(&wil->wq); wq 1483 drivers/net/wireless/ath/wil6210/main.c wait_event_interruptible_timeout(wil->wq, !vif->scan_request, wq 952 drivers/net/wireless/ath/wil6210/wil6210.h wait_queue_head_t wq; /* for all wait_event() use */ wq 952 drivers/net/wireless/ath/wil6210/wmi.c wake_up_interruptible(&wil->wq); wq 3118 drivers/net/wireless/ath/wil6210/wmi.c rc = wait_event_interruptible_timeout(wil->wq, wq 4310 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c struct workqueue_struct *wq; wq 4329 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c wq = alloc_ordered_workqueue("brcmf_wq/%s", WQ_MEM_RECLAIM, wq 4331 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c if (!wq) { wq 4337 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c bus->brcmf_wq = wq; wq 40 drivers/net/wireless/st/cw1200/cw1200_spi.c wait_queue_head_t wq; wq 202 drivers/net/wireless/st/cw1200/cw1200_spi.c add_wait_queue(&self->wq, &wait); wq 215 drivers/net/wireless/st/cw1200/cw1200_spi.c remove_wait_queue(&self->wq, &wait); wq 227 drivers/net/wireless/st/cw1200/cw1200_spi.c wake_up(&self->wq); wq 410 drivers/net/wireless/st/cw1200/cw1200_spi.c init_waitqueue_head(&self->wq); wq 181 drivers/net/xen-netback/common.h wait_queue_head_t wq; wq 647 drivers/net/xen-netback/interface.c init_waitqueue_head(&queue->wq); wq 154 drivers/net/xen-netback/netback.c wake_up(&queue->wq); wq 536 drivers/net/xen-netback/rx.c prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE); wq 543 drivers/net/xen-netback/rx.c finish_wait(&queue->wq, &wait); wq 531 drivers/nfc/pn533/pn533.c queue_work(dev->wq, &dev->cmd_work); wq 1079 drivers/nfc/pn533/pn533.c queue_work(dev->wq, &dev->mi_tm_rx_work); wq 1159 drivers/nfc/pn533/pn533.c queue_work(dev->wq, &dev->cmd_work); wq 1218 drivers/nfc/pn533/pn533.c queue_work(dev->wq, &dev->tg_work); wq 1233 drivers/nfc/pn533/pn533.c queue_delayed_work(dev->wq, &dev->poll_work, wq 1252 drivers/nfc/pn533/pn533.c queue_delayed_work(dev->wq, &dev->poll_work, wq 1298 drivers/nfc/pn533/pn533.c queue_work(dev->wq, &dev->rf_work); wq 1349 drivers/nfc/pn533/pn533.c queue_work(dev->wq, &dev->rf_work); wq 1437 drivers/nfc/pn533/pn533.c queue_work(dev->wq, &dev->rf_work); wq 1979 drivers/nfc/pn533/pn533.c queue_work(dev->wq, &dev->mi_rx_work); wq 1986 drivers/nfc/pn533/pn533.c queue_work(dev->wq, &dev->mi_tx_work); wq 2051 drivers/nfc/pn533/pn533.c queue_work(dev->wq, &dev->cmd_complete_work); wq 2187 drivers/nfc/pn533/pn533.c queue_work(dev->wq, &dev->mi_tm_tx_work); wq 2200 drivers/nfc/pn533/pn533.c queue_work(dev->wq, &dev->tg_work); wq 2289 drivers/nfc/pn533/pn533.c queue_work(dev->wq, &dev->cmd_work); wq 2345 drivers/nfc/pn533/pn533.c queue_work(dev->wq, &dev->cmd_work); wq 2621 drivers/nfc/pn533/pn533.c priv->wq = alloc_ordered_workqueue("pn533", 0); wq 2622 drivers/nfc/pn533/pn533.c if (priv->wq == NULL) wq 2654 drivers/nfc/pn533/pn533.c destroy_workqueue(priv->wq); wq 2669 drivers/nfc/pn533/pn533.c destroy_workqueue(priv->wq); wq 131 drivers/nfc/pn533/pn533.h struct workqueue_struct *wq; wq 129 drivers/nfc/pn533/usb.c queue_work(priv->wq, &priv->cmd_complete_work); wq 490 drivers/pci/controller/pci-hyperv.c struct workqueue_struct *wq; wq 2145 drivers/pci/controller/pci-hyperv.c queue_work(hbus->wq, &dr_wrk->wrk); wq 2230 drivers/pci/controller/pci-hyperv.c queue_work(hpdev->hbus->wq, &hpdev->wrk); wq 2919 drivers/pci/controller/pci-hyperv.c hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0, wq 2921 drivers/pci/controller/pci-hyperv.c if (!hbus->wq) { wq 3006 drivers/pci/controller/pci-hyperv.c destroy_workqueue(hbus->wq); wq 3089 drivers/pci/controller/pci-hyperv.c destroy_workqueue(hbus->wq); wq 52 drivers/pci/hotplug/pnv_php.c if (php_slot->wq) { wq 53 drivers/pci/hotplug/pnv_php.c destroy_workqueue(php_slot->wq); wq 54 drivers/pci/hotplug/pnv_php.c php_slot->wq = NULL; wq 822 drivers/pci/hotplug/pnv_php.c queue_work(php_slot->wq, &event->work); wq 835 drivers/pci/hotplug/pnv_php.c php_slot->wq = alloc_workqueue("pciehp-%s", 0, 0, php_slot->name); wq 836 drivers/pci/hotplug/pnv_php.c if (!php_slot->wq) { wq 80 drivers/pci/hotplug/shpchp.h struct workqueue_struct *wq; wq 88 drivers/pci/hotplug/shpchp_core.c slot->wq = alloc_workqueue("shpchp-%d", 0, 0, slot->number); wq 89 drivers/pci/hotplug/shpchp_core.c if (!slot->wq) { wq 123 drivers/pci/hotplug/shpchp_core.c destroy_workqueue(slot->wq); wq 137 drivers/pci/hotplug/shpchp_core.c destroy_workqueue(slot->wq); wq 40 drivers/pci/hotplug/shpchp_ctrl.c queue_work(p_slot->wq, &info->work); wq 444 drivers/pci/hotplug/shpchp_ctrl.c queue_work(p_slot->wq, &info->work); wq 481 drivers/pci/hotplug/shpchp_ctrl.c queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ); wq 193 drivers/platform/chrome/wilco_ec/event.c wait_queue_head_t wq; wq 249 drivers/platform/chrome/wilco_ec/event.c wake_up_interruptible(&dev_data->wq); wq 329 drivers/platform/chrome/wilco_ec/event.c poll_wait(filp, &dev_data->wq, wait); wq 371 drivers/platform/chrome/wilco_ec/event.c err = wait_event_interruptible(dev_data->wq, wq 432 drivers/platform/chrome/wilco_ec/event.c wake_up_interruptible(&dev_data->wq); wq 477 drivers/platform/chrome/wilco_ec/event.c init_waitqueue_head(&dev_data->wq); wq 27 drivers/platform/x86/asus-wireless.c struct workqueue_struct *wq; wq 108 drivers/platform/x86/asus-wireless.c queue_work(data->wq, &data->led_work); wq 161 drivers/platform/x86/asus-wireless.c data->wq = create_singlethread_workqueue("asus_wireless_workqueue"); wq 162 drivers/platform/x86/asus-wireless.c if (!data->wq) wq 173 drivers/platform/x86/asus-wireless.c destroy_workqueue(data->wq); wq 182 drivers/platform/x86/asus-wireless.c if (data->wq) { wq 184 drivers/platform/x86/asus-wireless.c destroy_workqueue(data->wq); wq 1131 drivers/power/supply/charger-manager.c container_of(work, struct charger_cable, wq); wq 1184 drivers/power/supply/charger-manager.c schedule_work(&cable->wq); wq 1206 drivers/power/supply/charger-manager.c INIT_WORK(&cable->wq, charger_extcon_work); wq 40 drivers/power/supply/ipaq_micro_battery.c struct workqueue_struct *wq; wq 88 drivers/power/supply/ipaq_micro_battery.c queue_delayed_work(mb->wq, &mb->update, msecs_to_jiffies(BATT_PERIOD)); wq 235 drivers/power/supply/ipaq_micro_battery.c mb->wq = alloc_workqueue("ipaq-battery-wq", WQ_MEM_RECLAIM, 0); wq 236 drivers/power/supply/ipaq_micro_battery.c if (!mb->wq) wq 241 drivers/power/supply/ipaq_micro_battery.c queue_delayed_work(mb->wq, &mb->update, 1); wq 264 drivers/power/supply/ipaq_micro_battery.c destroy_workqueue(mb->wq); wq 276 drivers/power/supply/ipaq_micro_battery.c destroy_workqueue(mb->wq); wq 293 drivers/power/supply/ipaq_micro_battery.c queue_delayed_work(mb->wq, &mb->update, msecs_to_jiffies(BATT_PERIOD)); wq 130 drivers/sbus/char/bbc_i2c.c add_wait_queue(&bp->wq, &wait); wq 135 drivers/sbus/char/bbc_i2c.c bp->wq, wq 144 drivers/sbus/char/bbc_i2c.c remove_wait_queue(&bp->wq, &wait); wq 280 drivers/sbus/char/bbc_i2c.c wake_up_interruptible(&bp->wq); wq 318 drivers/sbus/char/bbc_i2c.c init_waitqueue_head(&bp->wq); wq 62 drivers/sbus/char/bbc_i2c.h wait_queue_head_t wq; wq 694 drivers/scsi/be2iscsi/be_main.c queue_work(phba->wq, &pbe_eq->mcc_work); wq 774 drivers/scsi/be2iscsi/be_main.c queue_work(phba->wq, &pbe_eq->mcc_work); wq 1468 drivers/scsi/be2iscsi/be_main.c plist = &pasync_ctx->async_entry[cri].wq.list; wq 1472 drivers/scsi/be2iscsi/be_main.c INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wq.list); wq 1473 drivers/scsi/be2iscsi/be_main.c pasync_ctx->async_entry[cri].wq.hdr_len = 0; wq 1474 drivers/scsi/be2iscsi/be_main.c pasync_ctx->async_entry[cri].wq.bytes_received = 0; wq 1475 drivers/scsi/be2iscsi/be_main.c pasync_ctx->async_entry[cri].wq.bytes_needed = 0; wq 1598 drivers/scsi/be2iscsi/be_main.c plist = &pasync_ctx->async_entry[cri].wq.list; wq 1628 drivers/scsi/be2iscsi/be_main.c pasync_ctx->async_entry[cri].wq.hdr_len, wq 1629 drivers/scsi/be2iscsi/be_main.c pasync_ctx->async_entry[cri].wq.bytes_needed, wq 1630 drivers/scsi/be2iscsi/be_main.c pasync_ctx->async_entry[cri].wq.bytes_received); wq 1646 drivers/scsi/be2iscsi/be_main.c struct cri_wait_queue *wq; wq 1652 drivers/scsi/be2iscsi/be_main.c wq = &pasync_ctx->async_entry[cri].wq; wq 1655 drivers/scsi/be2iscsi/be_main.c if (wq->hdr_len) { wq 1665 drivers/scsi/be2iscsi/be_main.c wq->hdr_len = pasync_handle->buffer_len; wq 1666 drivers/scsi/be2iscsi/be_main.c wq->bytes_received = 0; wq 1667 drivers/scsi/be2iscsi/be_main.c wq->bytes_needed = bytes_needed; wq 1668 drivers/scsi/be2iscsi/be_main.c list_add_tail(&pasync_handle->link, &wq->list); wq 1674 drivers/scsi/be2iscsi/be_main.c if (!wq->hdr_len || !wq->bytes_needed) { wq 1678 drivers/scsi/be2iscsi/be_main.c wq->bytes_received += pasync_handle->buffer_len; wq 1680 drivers/scsi/be2iscsi/be_main.c if (wq->bytes_received > wq->bytes_needed) { wq 1684 drivers/scsi/be2iscsi/be_main.c list_add_tail(&pasync_handle->link, &wq->list); wq 1685 drivers/scsi/be2iscsi/be_main.c if (wq->bytes_received == wq->bytes_needed) wq 1696 drivers/scsi/be2iscsi/be_main.c wq->hdr_len, wq->bytes_needed, wq 2888 drivers/scsi/be2iscsi/be_main.c wq.list); wq 5262 drivers/scsi/be2iscsi/be_main.c queue_delayed_work(phba->wq, &phba->recover_port, wq 5275 drivers/scsi/be2iscsi/be_main.c queue_work(phba->wq, &phba->sess_work); wq 5654 drivers/scsi/be2iscsi/be_main.c phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, wq_name); wq 5655 drivers/scsi/be2iscsi/be_main.c if (!phba->wq) { wq 5733 drivers/scsi/be2iscsi/be_main.c destroy_workqueue(phba->wq); wq 5778 drivers/scsi/be2iscsi/be_main.c destroy_workqueue(phba->wq); wq 366 drivers/scsi/be2iscsi/be_main.h struct workqueue_struct *wq; /* The actuak work queue */ wq 572 drivers/scsi/be2iscsi/be_main.h } wq; wq 151 drivers/scsi/bfa/bfad_im.c wait_queue_head_t *wq; wq 155 drivers/scsi/bfa/bfad_im.c wq = (wait_queue_head_t *) cmnd->SCp.ptr; wq 158 drivers/scsi/bfa/bfad_im.c if (wq) wq 159 drivers/scsi/bfa/bfad_im.c wake_up(wq); wq 301 drivers/scsi/bfa/bfad_im.c DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); wq 329 drivers/scsi/bfa/bfad_im.c cmnd->SCp.ptr = (char *)&wq; wq 350 drivers/scsi/bfa/bfad_im.c wait_event(wq, test_bit(IO_DONE_BIT, wq 378 drivers/scsi/bfa/bfad_im.c DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); wq 384 drivers/scsi/bfa/bfad_im.c cmnd->SCp.ptr = (char *)&wq; wq 389 drivers/scsi/bfa/bfad_im.c wait_event(wq, test_bit(IO_DONE_BIT, wq 317 drivers/scsi/cxlflash/ocxl_hw.c wake_up_all(&ctx->wq); wq 505 drivers/scsi/cxlflash/ocxl_hw.c init_waitqueue_head(&ctx->wq); wq 989 drivers/scsi/cxlflash/ocxl_hw.c poll_wait(file, &ctx->wq, poll); wq 1035 drivers/scsi/cxlflash/ocxl_hw.c prepare_to_wait(&ctx->wq, &event_wait, TASK_INTERRUPTIBLE); wq 1059 drivers/scsi/cxlflash/ocxl_hw.c finish_wait(&ctx->wq, &event_wait); wq 1094 drivers/scsi/cxlflash/ocxl_hw.c finish_wait(&ctx->wq, &event_wait); wq 1295 drivers/scsi/cxlflash/ocxl_hw.c wake_up_all(&ctx->wq); wq 61 drivers/scsi/cxlflash/ocxl_hw.h wait_queue_head_t wq; /* Wait queue for poll and interrupts */ wq 136 drivers/scsi/dpt_i2o.c adpt_wait_queue_head_t *wq; wq 1176 drivers/scsi/dpt_i2o.c wait_data->wq = &adpt_wq_i2o_post; wq 1287 drivers/scsi/dpt_i2o.c wake_up_interruptible(p1->wq); wq 548 drivers/scsi/esas2r/esas2r_init.c struct workqueue_struct *wq; wq 580 drivers/scsi/esas2r/esas2r_init.c wq = a->fw_event_q; wq 583 drivers/scsi/esas2r/esas2r_init.c if (wq) wq 584 drivers/scsi/esas2r/esas2r_init.c destroy_workqueue(wq); wq 307 drivers/scsi/fnic/fnic.h ____cacheline_aligned struct vnic_wq wq[FNIC_WQ_MAX]; wq 332 drivers/scsi/fnic/fnic.h void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf); wq 357 drivers/scsi/fnic/fnic.h void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, wq 1031 drivers/scsi/fnic/fnic_fcs.c struct vnic_wq *wq = &fnic->wq[0]; wq 1063 drivers/scsi/fnic/fnic_fcs.c if (!vnic_wq_desc_avail(wq)) wq 1066 drivers/scsi/fnic/fnic_fcs.c fnic_queue_wq_eth_desc(wq, skb, pa, skb->len, wq 1084 drivers/scsi/fnic/fnic_fcs.c struct vnic_wq *wq = &fnic->wq[0]; wq 1145 drivers/scsi/fnic/fnic_fcs.c if (!vnic_wq_desc_avail(wq)) { wq 1151 drivers/scsi/fnic/fnic_fcs.c fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp), wq 1253 drivers/scsi/fnic/fnic_fcs.c static void fnic_wq_complete_frame_send(struct vnic_wq *wq, wq 1259 drivers/scsi/fnic/fnic_fcs.c struct fnic *fnic = vnic_dev_priv(wq->vdev); wq 1276 drivers/scsi/fnic/fnic_fcs.c vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index, wq 1299 drivers/scsi/fnic/fnic_fcs.c void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) wq 1302 drivers/scsi/fnic/fnic_fcs.c struct fnic *fnic = vnic_dev_priv(wq->vdev); wq 238 drivers/scsi/fnic/fnic_isr.c unsigned int m = ARRAY_SIZE(fnic->wq); wq 352 drivers/scsi/fnic/fnic_main.c error_status = ioread32(&fnic->wq[i].ctrl->error_status); wq 500 drivers/scsi/fnic/fnic_main.c err = vnic_wq_disable(&fnic->wq[i]); wq 517 drivers/scsi/fnic/fnic_main.c vnic_wq_clean(&fnic->wq[i], fnic_free_wq_buf); wq 884 drivers/scsi/fnic/fnic_main.c vnic_wq_enable(&fnic->wq[i]); wq 215 drivers/scsi/fnic/fnic_res.c vnic_wq_free(&fnic->wq[i]); wq 256 drivers/scsi/fnic/fnic_res.c err = vnic_wq_alloc(fnic->vdev, &fnic->wq[i], i, wq 365 drivers/scsi/fnic/fnic_res.c vnic_wq_init(&fnic->wq[i], wq 30 drivers/scsi/fnic/fnic_res.h static inline void fnic_queue_wq_desc(struct vnic_wq *wq, wq 37 drivers/scsi/fnic/fnic_res.h struct wq_enet_desc *desc = vnic_wq_next_desc(wq); wq 51 drivers/scsi/fnic/fnic_res.h vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop); wq 54 drivers/scsi/fnic/fnic_res.h static inline void fnic_queue_wq_eth_desc(struct vnic_wq *wq, wq 61 drivers/scsi/fnic/fnic_res.h struct wq_enet_desc *desc = vnic_wq_next_desc(wq); wq 76 drivers/scsi/fnic/fnic_res.h vnic_wq_post(wq, os_buf, dma_addr, len, 1, 1); wq 79 drivers/scsi/fnic/fnic_res.h static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq, wq 91 drivers/scsi/fnic/fnic_res.h struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); wq 121 drivers/scsi/fnic/fnic_res.h vnic_wq_copy_post(wq); wq 124 drivers/scsi/fnic/fnic_res.h static inline void fnic_queue_wq_copy_desc_itmf(struct vnic_wq_copy *wq, wq 130 drivers/scsi/fnic/fnic_res.h struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); wq 147 drivers/scsi/fnic/fnic_res.h vnic_wq_copy_post(wq); wq 150 drivers/scsi/fnic/fnic_res.h static inline void fnic_queue_wq_copy_desc_flogi_reg(struct vnic_wq_copy *wq, wq 154 drivers/scsi/fnic/fnic_res.h struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); wq 166 drivers/scsi/fnic/fnic_res.h vnic_wq_copy_post(wq); wq 169 drivers/scsi/fnic/fnic_res.h static inline void fnic_queue_wq_copy_desc_fip_reg(struct vnic_wq_copy *wq, wq 174 drivers/scsi/fnic/fnic_res.h struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); wq 190 drivers/scsi/fnic/fnic_res.h vnic_wq_copy_post(wq); wq 193 drivers/scsi/fnic/fnic_res.h static inline void fnic_queue_wq_copy_desc_fw_reset(struct vnic_wq_copy *wq, wq 196 drivers/scsi/fnic/fnic_res.h struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); wq 203 drivers/scsi/fnic/fnic_res.h vnic_wq_copy_post(wq); wq 206 drivers/scsi/fnic/fnic_res.h static inline void fnic_queue_wq_copy_desc_lunmap(struct vnic_wq_copy *wq, wq 210 drivers/scsi/fnic/fnic_res.h struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); wq 220 drivers/scsi/fnic/fnic_res.h vnic_wq_copy_post(wq); wq 143 drivers/scsi/fnic/fnic_scsi.c static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq) wq 153 drivers/scsi/fnic/fnic_scsi.c if (wq->to_clean_index <= fnic->fw_ack_index[0]) wq 154 drivers/scsi/fnic/fnic_scsi.c wq->ring.desc_avail += (fnic->fw_ack_index[0] wq 155 drivers/scsi/fnic/fnic_scsi.c - wq->to_clean_index + 1); wq 157 drivers/scsi/fnic/fnic_scsi.c wq->ring.desc_avail += (wq->ring.desc_count wq 158 drivers/scsi/fnic/fnic_scsi.c - wq->to_clean_index wq 166 drivers/scsi/fnic/fnic_scsi.c wq->to_clean_index = wq 167 drivers/scsi/fnic/fnic_scsi.c (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count; wq 207 drivers/scsi/fnic/fnic_scsi.c struct vnic_wq_copy *wq = &fnic->wq_copy[0]; wq 223 drivers/scsi/fnic/fnic_scsi.c if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) wq 224 drivers/scsi/fnic/fnic_scsi.c free_wq_copy_descs(fnic, wq); wq 226 drivers/scsi/fnic/fnic_scsi.c if (!vnic_wq_copy_desc_avail(wq)) wq 229 drivers/scsi/fnic/fnic_scsi.c fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG); wq 260 drivers/scsi/fnic/fnic_scsi.c struct vnic_wq_copy *wq = &fnic->wq_copy[0]; wq 269 drivers/scsi/fnic/fnic_scsi.c if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) wq 270 drivers/scsi/fnic/fnic_scsi.c free_wq_copy_descs(fnic, wq); wq 272 drivers/scsi/fnic/fnic_scsi.c if (!vnic_wq_copy_desc_avail(wq)) { wq 286 drivers/scsi/fnic/fnic_scsi.c fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG, wq 294 drivers/scsi/fnic/fnic_scsi.c fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG, wq 317 drivers/scsi/fnic/fnic_scsi.c struct vnic_wq_copy *wq, wq 370 drivers/scsi/fnic/fnic_scsi.c if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) wq 371 drivers/scsi/fnic/fnic_scsi.c free_wq_copy_descs(fnic, wq); wq 373 drivers/scsi/fnic/fnic_scsi.c if (unlikely(!vnic_wq_copy_desc_avail(wq))) { wq 392 drivers/scsi/fnic/fnic_scsi.c fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag, wq 429 drivers/scsi/fnic/fnic_scsi.c struct vnic_wq_copy *wq; wq 565 drivers/scsi/fnic/fnic_scsi.c wq = &fnic->wq_copy[0]; wq 566 drivers/scsi/fnic/fnic_scsi.c ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count); wq 763 drivers/scsi/fnic/fnic_scsi.c static inline int is_ack_index_in_range(struct vnic_wq_copy *wq, wq 766 drivers/scsi/fnic/fnic_scsi.c if (wq->to_clean_index <= wq->to_use_index) { wq 768 drivers/scsi/fnic/fnic_scsi.c if (request_out < wq->to_clean_index || wq 769 drivers/scsi/fnic/fnic_scsi.c request_out >= wq->to_use_index) wq 773 drivers/scsi/fnic/fnic_scsi.c if (request_out < wq->to_clean_index && wq 774 drivers/scsi/fnic/fnic_scsi.c request_out >= wq->to_use_index) wq 792 drivers/scsi/fnic/fnic_scsi.c struct vnic_wq_copy *wq; wq 798 drivers/scsi/fnic/fnic_scsi.c wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count]; wq 802 drivers/scsi/fnic/fnic_scsi.c if (is_ack_index_in_range(wq, request_out)) { wq 1451 drivers/scsi/fnic/fnic_scsi.c void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, wq 1455 drivers/scsi/fnic/fnic_scsi.c struct fnic *fnic = vnic_dev_priv(wq->vdev); wq 1516 drivers/scsi/fnic/fnic_scsi.c struct vnic_wq_copy *wq = &fnic->wq_copy[0]; wq 1532 drivers/scsi/fnic/fnic_scsi.c if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) wq 1533 drivers/scsi/fnic/fnic_scsi.c free_wq_copy_descs(fnic, wq); wq 1535 drivers/scsi/fnic/fnic_scsi.c if (!vnic_wq_copy_desc_avail(wq)) { wq 1543 drivers/scsi/fnic/fnic_scsi.c fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT, wq 2070 drivers/scsi/fnic/fnic_scsi.c struct vnic_wq_copy *wq = &fnic->wq_copy[0]; wq 2088 drivers/scsi/fnic/fnic_scsi.c if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) wq 2089 drivers/scsi/fnic/fnic_scsi.c free_wq_copy_descs(fnic, wq); wq 2091 drivers/scsi/fnic/fnic_scsi.c if (!vnic_wq_copy_desc_avail(wq)) { wq 2102 drivers/scsi/fnic/fnic_scsi.c fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST, wq 35 drivers/scsi/fnic/vnic_dev.c struct vnic_wq wq; wq 439 drivers/scsi/fnic/vnic_dev.c err = vnic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq, wq 444 drivers/scsi/fnic/vnic_dev.c fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index); wq 455 drivers/scsi/fnic/vnic_dev.c vnic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, wq 458 drivers/scsi/fnic/vnic_dev.c vnic_wq_enable(&vdev->devcmd2->wq); wq 468 drivers/scsi/fnic/vnic_dev.c (struct vnic_devcmd2 *) vdev->devcmd2->wq.ring.descs; wq 469 drivers/scsi/fnic/vnic_dev.c vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl; wq 485 drivers/scsi/fnic/vnic_dev.c vnic_wq_disable(&vdev->devcmd2->wq); wq 486 drivers/scsi/fnic/vnic_dev.c vnic_wq_free(&vdev->devcmd2->wq); wq 498 drivers/scsi/fnic/vnic_dev.c vnic_wq_disable(&vdev->devcmd2->wq); wq 499 drivers/scsi/fnic/vnic_dev.c vnic_wq_free(&vdev->devcmd2->wq); wq 28 drivers/scsi/fnic/vnic_wq.c int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq, wq 31 drivers/scsi/fnic/vnic_wq.c wq->ctrl = vnic_dev_get_res(vdev, res_type, index); wq 33 drivers/scsi/fnic/vnic_wq.c if (!wq->ctrl) wq 40 drivers/scsi/fnic/vnic_wq.c int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq, wq 43 drivers/scsi/fnic/vnic_wq.c return vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); wq 47 drivers/scsi/fnic/vnic_wq.c static int vnic_wq_alloc_bufs(struct vnic_wq *wq) wq 50 drivers/scsi/fnic/vnic_wq.c unsigned int i, j, count = wq->ring.desc_count; wq 54 drivers/scsi/fnic/vnic_wq.c wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC); wq 55 drivers/scsi/fnic/vnic_wq.c if (!wq->bufs[i]) { wq 62 drivers/scsi/fnic/vnic_wq.c buf = wq->bufs[i]; wq 65 drivers/scsi/fnic/vnic_wq.c buf->desc = (u8 *)wq->ring.descs + wq 66 drivers/scsi/fnic/vnic_wq.c wq->ring.desc_size * buf->index; wq 68 drivers/scsi/fnic/vnic_wq.c buf->next = wq->bufs[0]; wq 71 drivers/scsi/fnic/vnic_wq.c buf->next = wq->bufs[i + 1]; wq 79 drivers/scsi/fnic/vnic_wq.c wq->to_use = wq->to_clean = wq->bufs[0]; wq 84 drivers/scsi/fnic/vnic_wq.c void vnic_wq_free(struct vnic_wq *wq) wq 89 drivers/scsi/fnic/vnic_wq.c vdev = wq->vdev; wq 91 drivers/scsi/fnic/vnic_wq.c vnic_dev_free_desc_ring(vdev, &wq->ring); wq 94 drivers/scsi/fnic/vnic_wq.c kfree(wq->bufs[i]); wq 95 drivers/scsi/fnic/vnic_wq.c wq->bufs[i] = NULL; wq 98 drivers/scsi/fnic/vnic_wq.c wq->ctrl = NULL; wq 102 drivers/scsi/fnic/vnic_wq.c int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, wq 107 drivers/scsi/fnic/vnic_wq.c wq->index = index; wq 108 drivers/scsi/fnic/vnic_wq.c wq->vdev = vdev; wq 110 drivers/scsi/fnic/vnic_wq.c wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index); wq 111 drivers/scsi/fnic/vnic_wq.c if (!wq->ctrl) { wq 116 drivers/scsi/fnic/vnic_wq.c vnic_wq_disable(wq); wq 118 drivers/scsi/fnic/vnic_wq.c err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); wq 122 drivers/scsi/fnic/vnic_wq.c err = vnic_wq_alloc_bufs(wq); wq 124 drivers/scsi/fnic/vnic_wq.c vnic_wq_free(wq); wq 132 drivers/scsi/fnic/vnic_wq.c int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, wq 137 drivers/scsi/fnic/vnic_wq.c wq->index = 0; wq 138 drivers/scsi/fnic/vnic_wq.c wq->vdev = vdev; wq 140 drivers/scsi/fnic/vnic_wq.c err = vnic_wq_get_ctrl(vdev, wq, 0, RES_TYPE_DEVCMD2); wq 145 drivers/scsi/fnic/vnic_wq.c vnic_wq_disable(wq); wq 147 drivers/scsi/fnic/vnic_wq.c err = vnic_wq_alloc_ring(vdev, wq, desc_count, desc_size); wq 153 drivers/scsi/fnic/vnic_wq.c void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, wq 159 drivers/scsi/fnic/vnic_wq.c unsigned int count = wq->ring.desc_count; wq 161 drivers/scsi/fnic/vnic_wq.c paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; wq 162 drivers/scsi/fnic/vnic_wq.c writeq(paddr, &wq->ctrl->ring_base); wq 163 drivers/scsi/fnic/vnic_wq.c iowrite32(count, &wq->ctrl->ring_size); wq 164 drivers/scsi/fnic/vnic_wq.c iowrite32(fetch_index, &wq->ctrl->fetch_index); wq 165 drivers/scsi/fnic/vnic_wq.c iowrite32(posted_index, &wq->ctrl->posted_index); wq 166 drivers/scsi/fnic/vnic_wq.c iowrite32(cq_index, &wq->ctrl->cq_index); wq 167 drivers/scsi/fnic/vnic_wq.c iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); wq 168 drivers/scsi/fnic/vnic_wq.c iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); wq 169 drivers/scsi/fnic/vnic_wq.c iowrite32(0, &wq->ctrl->error_status); wq 171 drivers/scsi/fnic/vnic_wq.c wq->to_use = wq->to_clean = wq 172 drivers/scsi/fnic/vnic_wq.c &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES] wq 177 drivers/scsi/fnic/vnic_wq.c void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, wq 183 drivers/scsi/fnic/vnic_wq.c paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; wq 184 drivers/scsi/fnic/vnic_wq.c writeq(paddr, &wq->ctrl->ring_base); wq 185 drivers/scsi/fnic/vnic_wq.c iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size); wq 186 drivers/scsi/fnic/vnic_wq.c iowrite32(0, &wq->ctrl->fetch_index); wq 187 drivers/scsi/fnic/vnic_wq.c iowrite32(0, &wq->ctrl->posted_index); wq 188 drivers/scsi/fnic/vnic_wq.c iowrite32(cq_index, &wq->ctrl->cq_index); wq 189 drivers/scsi/fnic/vnic_wq.c iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); wq 190 drivers/scsi/fnic/vnic_wq.c iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); wq 191 drivers/scsi/fnic/vnic_wq.c iowrite32(0, &wq->ctrl->error_status); wq 194 drivers/scsi/fnic/vnic_wq.c unsigned int vnic_wq_error_status(struct vnic_wq *wq) wq 196 drivers/scsi/fnic/vnic_wq.c return ioread32(&wq->ctrl->error_status); wq 199 drivers/scsi/fnic/vnic_wq.c void vnic_wq_enable(struct vnic_wq *wq) wq 201 drivers/scsi/fnic/vnic_wq.c iowrite32(1, &wq->ctrl->enable); wq 204 drivers/scsi/fnic/vnic_wq.c int vnic_wq_disable(struct vnic_wq *wq) wq 208 drivers/scsi/fnic/vnic_wq.c iowrite32(0, &wq->ctrl->enable); wq 212 drivers/scsi/fnic/vnic_wq.c if (!(ioread32(&wq->ctrl->running))) wq 217 drivers/scsi/fnic/vnic_wq.c printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index); wq 222 drivers/scsi/fnic/vnic_wq.c void vnic_wq_clean(struct vnic_wq *wq, wq 223 drivers/scsi/fnic/vnic_wq.c void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) wq 227 drivers/scsi/fnic/vnic_wq.c BUG_ON(ioread32(&wq->ctrl->enable)); wq 229 drivers/scsi/fnic/vnic_wq.c buf = wq->to_clean; wq 231 drivers/scsi/fnic/vnic_wq.c while (vnic_wq_desc_used(wq) > 0) { wq 233 drivers/scsi/fnic/vnic_wq.c (*buf_clean)(wq, buf); wq 235 drivers/scsi/fnic/vnic_wq.c buf = wq->to_clean = buf->next; wq 236 drivers/scsi/fnic/vnic_wq.c wq->ring.desc_avail++; wq 239 drivers/scsi/fnic/vnic_wq.c wq->to_use = wq->to_clean = wq->bufs[0]; wq 241 drivers/scsi/fnic/vnic_wq.c iowrite32(0, &wq->ctrl->fetch_index); wq 242 drivers/scsi/fnic/vnic_wq.c iowrite32(0, &wq->ctrl->posted_index); wq 243 drivers/scsi/fnic/vnic_wq.c iowrite32(0, &wq->ctrl->error_status); wq 245 drivers/scsi/fnic/vnic_wq.c vnic_dev_clear_desc_ring(&wq->ring); wq 98 drivers/scsi/fnic/vnic_wq.h static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) wq 101 drivers/scsi/fnic/vnic_wq.h return wq->ring.desc_avail; wq 104 drivers/scsi/fnic/vnic_wq.h static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) wq 107 drivers/scsi/fnic/vnic_wq.h return wq->ring.desc_count - wq->ring.desc_avail - 1; wq 110 drivers/scsi/fnic/vnic_wq.h static inline void *vnic_wq_next_desc(struct vnic_wq *wq) wq 112 drivers/scsi/fnic/vnic_wq.h return wq->to_use->desc; wq 115 drivers/scsi/fnic/vnic_wq.h static inline void vnic_wq_post(struct vnic_wq *wq, wq 119 drivers/scsi/fnic/vnic_wq.h struct vnic_wq_buf *buf = wq->to_use; wq 134 drivers/scsi/fnic/vnic_wq.h iowrite32(buf->index, &wq->ctrl->posted_index); wq 136 drivers/scsi/fnic/vnic_wq.h wq->to_use = buf; wq 138 drivers/scsi/fnic/vnic_wq.h wq->ring.desc_avail--; wq 141 drivers/scsi/fnic/vnic_wq.h static inline void vnic_wq_service(struct vnic_wq *wq, wq 143 drivers/scsi/fnic/vnic_wq.h void (*buf_service)(struct vnic_wq *wq, wq 149 drivers/scsi/fnic/vnic_wq.h buf = wq->to_clean; wq 152 drivers/scsi/fnic/vnic_wq.h (*buf_service)(wq, cq_desc, buf, opaque); wq 154 drivers/scsi/fnic/vnic_wq.h wq->ring.desc_avail++; wq 156 drivers/scsi/fnic/vnic_wq.h wq->to_clean = buf->next; wq 161 drivers/scsi/fnic/vnic_wq.h buf = wq->to_clean; wq 165 drivers/scsi/fnic/vnic_wq.h void vnic_wq_free(struct vnic_wq *wq); wq 166 drivers/scsi/fnic/vnic_wq.h int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, wq 168 drivers/scsi/fnic/vnic_wq.h int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, wq 170 drivers/scsi/fnic/vnic_wq.h void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, wq 174 drivers/scsi/fnic/vnic_wq.h void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, wq 177 drivers/scsi/fnic/vnic_wq.h unsigned int vnic_wq_error_status(struct vnic_wq *wq); wq 178 drivers/scsi/fnic/vnic_wq.h void vnic_wq_enable(struct vnic_wq *wq); wq 179 drivers/scsi/fnic/vnic_wq.h int vnic_wq_disable(struct vnic_wq *wq); wq 180 drivers/scsi/fnic/vnic_wq.h void vnic_wq_clean(struct vnic_wq *wq, wq 181 drivers/scsi/fnic/vnic_wq.h void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)); wq 25 drivers/scsi/fnic/vnic_wq_copy.c void vnic_wq_copy_enable(struct vnic_wq_copy *wq) wq 27 drivers/scsi/fnic/vnic_wq_copy.c iowrite32(1, &wq->ctrl->enable); wq 30 drivers/scsi/fnic/vnic_wq_copy.c int vnic_wq_copy_disable(struct vnic_wq_copy *wq) wq 34 drivers/scsi/fnic/vnic_wq_copy.c iowrite32(0, &wq->ctrl->enable); wq 38 drivers/scsi/fnic/vnic_wq_copy.c if (!(ioread32(&wq->ctrl->running))) wq 45 drivers/scsi/fnic/vnic_wq_copy.c wq->index, ioread32(&wq->ctrl->fetch_index), wq 46 drivers/scsi/fnic/vnic_wq_copy.c ioread32(&wq->ctrl->posted_index)); wq 51 drivers/scsi/fnic/vnic_wq_copy.c void vnic_wq_copy_clean(struct vnic_wq_copy *wq, wq 52 drivers/scsi/fnic/vnic_wq_copy.c void (*q_clean)(struct vnic_wq_copy *wq, wq 55 drivers/scsi/fnic/vnic_wq_copy.c BUG_ON(ioread32(&wq->ctrl->enable)); wq 57 drivers/scsi/fnic/vnic_wq_copy.c if (vnic_wq_copy_desc_in_use(wq)) wq 58 drivers/scsi/fnic/vnic_wq_copy.c vnic_wq_copy_service(wq, -1, q_clean); wq 60 drivers/scsi/fnic/vnic_wq_copy.c wq->to_use_index = wq->to_clean_index = 0; wq 62 drivers/scsi/fnic/vnic_wq_copy.c iowrite32(0, &wq->ctrl->fetch_index); wq 63 drivers/scsi/fnic/vnic_wq_copy.c iowrite32(0, &wq->ctrl->posted_index); wq 64 drivers/scsi/fnic/vnic_wq_copy.c iowrite32(0, &wq->ctrl->error_status); wq 66 drivers/scsi/fnic/vnic_wq_copy.c vnic_dev_clear_desc_ring(&wq->ring); wq 69 drivers/scsi/fnic/vnic_wq_copy.c void vnic_wq_copy_free(struct vnic_wq_copy *wq) wq 73 drivers/scsi/fnic/vnic_wq_copy.c vdev = wq->vdev; wq 74 drivers/scsi/fnic/vnic_wq_copy.c vnic_dev_free_desc_ring(vdev, &wq->ring); wq 75 drivers/scsi/fnic/vnic_wq_copy.c wq->ctrl = NULL; wq 78 drivers/scsi/fnic/vnic_wq_copy.c int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq, wq 84 drivers/scsi/fnic/vnic_wq_copy.c wq->index = index; wq 85 drivers/scsi/fnic/vnic_wq_copy.c wq->vdev = vdev; wq 86 drivers/scsi/fnic/vnic_wq_copy.c wq->to_use_index = wq->to_clean_index = 0; wq 87 drivers/scsi/fnic/vnic_wq_copy.c wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index); wq 88 drivers/scsi/fnic/vnic_wq_copy.c if (!wq->ctrl) { wq 93 drivers/scsi/fnic/vnic_wq_copy.c vnic_wq_copy_disable(wq); wq 95 drivers/scsi/fnic/vnic_wq_copy.c err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); wq 102 drivers/scsi/fnic/vnic_wq_copy.c void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index, wq 108 drivers/scsi/fnic/vnic_wq_copy.c paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; wq 109 drivers/scsi/fnic/vnic_wq_copy.c writeq(paddr, &wq->ctrl->ring_base); wq 110 drivers/scsi/fnic/vnic_wq_copy.c iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size); wq 111 drivers/scsi/fnic/vnic_wq_copy.c iowrite32(0, &wq->ctrl->fetch_index); wq 112 drivers/scsi/fnic/vnic_wq_copy.c iowrite32(0, &wq->ctrl->posted_index); wq 113 drivers/scsi/fnic/vnic_wq_copy.c iowrite32(cq_index, &wq->ctrl->cq_index); wq 114 drivers/scsi/fnic/vnic_wq_copy.c iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); wq 115 drivers/scsi/fnic/vnic_wq_copy.c iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); wq 36 drivers/scsi/fnic/vnic_wq_copy.h static inline unsigned int vnic_wq_copy_desc_avail(struct vnic_wq_copy *wq) wq 38 drivers/scsi/fnic/vnic_wq_copy.h return wq->ring.desc_avail; wq 41 drivers/scsi/fnic/vnic_wq_copy.h static inline unsigned int vnic_wq_copy_desc_in_use(struct vnic_wq_copy *wq) wq 43 drivers/scsi/fnic/vnic_wq_copy.h return wq->ring.desc_count - 1 - wq->ring.desc_avail; wq 46 drivers/scsi/fnic/vnic_wq_copy.h static inline void *vnic_wq_copy_next_desc(struct vnic_wq_copy *wq) wq 48 drivers/scsi/fnic/vnic_wq_copy.h struct fcpio_host_req *desc = wq->ring.descs; wq 49 drivers/scsi/fnic/vnic_wq_copy.h return &desc[wq->to_use_index]; wq 52 drivers/scsi/fnic/vnic_wq_copy.h static inline void vnic_wq_copy_post(struct vnic_wq_copy *wq) wq 55 drivers/scsi/fnic/vnic_wq_copy.h ((wq->to_use_index + 1) == wq->ring.desc_count) ? wq 56 drivers/scsi/fnic/vnic_wq_copy.h (wq->to_use_index = 0) : (wq->to_use_index++); wq 57 drivers/scsi/fnic/vnic_wq_copy.h wq->ring.desc_avail--; wq 66 drivers/scsi/fnic/vnic_wq_copy.h iowrite32(wq->to_use_index, &wq->ctrl->posted_index); wq 69 drivers/scsi/fnic/vnic_wq_copy.h static inline void vnic_wq_copy_desc_process(struct vnic_wq_copy *wq, u16 index) wq 73 drivers/scsi/fnic/vnic_wq_copy.h if (wq->to_clean_index <= index) wq 74 drivers/scsi/fnic/vnic_wq_copy.h cnt = (index - wq->to_clean_index) + 1; wq 76 drivers/scsi/fnic/vnic_wq_copy.h cnt = wq->ring.desc_count - wq->to_clean_index + index + 1; wq 78 drivers/scsi/fnic/vnic_wq_copy.h wq->to_clean_index = ((index + 1) % wq->ring.desc_count); wq 79 drivers/scsi/fnic/vnic_wq_copy.h wq->ring.desc_avail += cnt; wq 83 drivers/scsi/fnic/vnic_wq_copy.h static inline void vnic_wq_copy_service(struct vnic_wq_copy *wq, wq 85 drivers/scsi/fnic/vnic_wq_copy.h void (*q_service)(struct vnic_wq_copy *wq, wq 88 drivers/scsi/fnic/vnic_wq_copy.h struct fcpio_host_req *wq_desc = wq->ring.descs; wq 94 drivers/scsi/fnic/vnic_wq_copy.h (*q_service)(wq, &wq_desc[wq->to_clean_index]); wq 96 drivers/scsi/fnic/vnic_wq_copy.h wq->ring.desc_avail++; wq 98 drivers/scsi/fnic/vnic_wq_copy.h curr_index = wq->to_clean_index; wq 103 drivers/scsi/fnic/vnic_wq_copy.h ((wq->to_clean_index + 1) == wq->ring.desc_count) ? wq 104 drivers/scsi/fnic/vnic_wq_copy.h (wq->to_clean_index = 0) : (wq->to_clean_index++); wq 111 drivers/scsi/fnic/vnic_wq_copy.h (wq->to_clean_index == wq->to_use_index)) wq 116 drivers/scsi/fnic/vnic_wq_copy.h void vnic_wq_copy_enable(struct vnic_wq_copy *wq); wq 117 drivers/scsi/fnic/vnic_wq_copy.h int vnic_wq_copy_disable(struct vnic_wq_copy *wq); wq 118 drivers/scsi/fnic/vnic_wq_copy.h void vnic_wq_copy_free(struct vnic_wq_copy *wq); wq 119 drivers/scsi/fnic/vnic_wq_copy.h int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq, wq 121 drivers/scsi/fnic/vnic_wq_copy.h void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index, wq 124 drivers/scsi/fnic/vnic_wq_copy.h void vnic_wq_copy_clean(struct vnic_wq_copy *wq, wq 125 drivers/scsi/fnic/vnic_wq_copy.h void (*q_clean)(struct vnic_wq_copy *wq, wq 348 drivers/scsi/hisi_sas/hisi_sas.h struct workqueue_struct *wq; wq 887 drivers/scsi/hisi_sas/hisi_sas_main.c return queue_work(hisi_hba->wq, &phy->works[event]); wq 1574 drivers/scsi/hisi_sas/hisi_sas_main.c queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); wq 1874 drivers/scsi/hisi_sas/hisi_sas_main.c queue_work(hisi_hba->wq, &r.work); wq 2066 drivers/scsi/hisi_sas/hisi_sas_main.c queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); wq 2243 drivers/scsi/hisi_sas/hisi_sas_main.c queue_work(hisi_hba->wq, &hisi_hba->rst_work); wq 2438 drivers/scsi/hisi_sas/hisi_sas_main.c hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); wq 2439 drivers/scsi/hisi_sas/hisi_sas_main.c if (!hisi_hba->wq) { wq 2460 drivers/scsi/hisi_sas/hisi_sas_main.c if (hisi_hba->wq) wq 2461 drivers/scsi/hisi_sas/hisi_sas_main.c destroy_workqueue(hisi_hba->wq); wq 3295 drivers/scsi/hisi_sas/hisi_sas_main.c queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); wq 1502 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c queue_work(hisi_hba->wq, &hisi_hba->rst_work); wq 2893 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c queue_work(hisi_hba->wq, &hisi_hba->rst_work); wq 2972 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c queue_work(hisi_hba->wq, &hisi_hba->rst_work); wq 3082 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c queue_work(hisi_hba->wq, &hisi_hba->rst_work); wq 3087 drivers/scsi/hisi_sas/hisi_sas_v2_hw.c queue_work(hisi_hba->wq, &hisi_hba->rst_work); wq 1689 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c queue_work(hisi_hba->wq, &hisi_hba->rst_work); wq 1928 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c queue_work(hisi_hba->wq, &hisi_hba->rst_work); wq 2049 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c queue_work(hisi_hba->wq, &hisi_hba->rst_work); wq 2054 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c queue_work(hisi_hba->wq, &hisi_hba->rst_work); wq 3371 drivers/scsi/hisi_sas/hisi_sas_v3_hw.c flush_workqueue(hisi_hba->wq); wq 8571 drivers/scsi/hpsa.c struct workqueue_struct *wq = NULL; wq 8573 drivers/scsi/hpsa.c wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr); wq 8574 drivers/scsi/hpsa.c if (!wq) wq 8577 drivers/scsi/hpsa.c return wq; wq 693 drivers/scsi/lpfc/lpfc.h struct workqueue_struct *wq; wq 5068 drivers/scsi/lpfc/lpfc_attr.c queue_delayed_work(phba->wq, &phba->eq_delay_work, wq 56 drivers/scsi/lpfc/lpfc_bsg.c wait_queue_head_t wq; wq 868 drivers/scsi/lpfc/lpfc_bsg.c init_waitqueue_head(&evt->wq); wq 1125 drivers/scsi/lpfc/lpfc_bsg.c wake_up_interruptible(&evt->wq); wq 2750 drivers/scsi/lpfc/lpfc_bsg.c evt->wq, !list_empty(&evt->events_to_see), wq 3326 drivers/scsi/lpfc/lpfc_bsg.c evt->wq, !list_empty(&evt->events_to_see), wq 259 drivers/scsi/lpfc/lpfc_crtn.h void lpfc_nvmet_wqfull_process(struct lpfc_hba *phba, struct lpfc_queue *wq); wq 414 drivers/scsi/lpfc/lpfc_debugfs.h struct lpfc_queue *wq; wq 418 drivers/scsi/lpfc/lpfc_debugfs.h wq = phba->sli4_hba.hdwq[wqidx].io_wq; wq 421 drivers/scsi/lpfc/lpfc_debugfs.h wq = phba->sli4_hba.mbx_wq; wq 424 drivers/scsi/lpfc/lpfc_debugfs.h wq = phba->sli4_hba.els_wq; wq 427 drivers/scsi/lpfc/lpfc_debugfs.h wq = phba->sli4_hba.nvmels_wq; wq 434 drivers/scsi/lpfc/lpfc_debugfs.h qtypestr, wqidx, wq->queue_id); wq 437 drivers/scsi/lpfc/lpfc_debugfs.h qtypestr, wq->queue_id); wq 439 drivers/scsi/lpfc/lpfc_debugfs.h lpfc_debug_dump_q(wq); wq 454 drivers/scsi/lpfc/lpfc_debugfs.h struct lpfc_queue *wq, *cq, *eq; wq 462 drivers/scsi/lpfc/lpfc_debugfs.h wq = phba->sli4_hba.hdwq[wqidx].io_wq; wq 466 drivers/scsi/lpfc/lpfc_debugfs.h wq = phba->sli4_hba.mbx_wq; wq 470 drivers/scsi/lpfc/lpfc_debugfs.h wq = phba->sli4_hba.els_wq; wq 474 drivers/scsi/lpfc/lpfc_debugfs.h wq = phba->sli4_hba.nvmels_wq; wq 494 drivers/scsi/lpfc/lpfc_debugfs.h qtypestr, wqidx, wq->queue_id, wqidx, cq->queue_id, wq 499 drivers/scsi/lpfc/lpfc_debugfs.h qtypestr, wq->queue_id, cq->queue_id, wq 1307 drivers/scsi/lpfc/lpfc_init.c queue_delayed_work(phba->wq, &phba->eq_delay_work, wq 3501 drivers/scsi/lpfc/lpfc_init.c if (phba->wq) wq 3502 drivers/scsi/lpfc/lpfc_init.c flush_workqueue(phba->wq); wq 4601 drivers/scsi/lpfc/lpfc_init.c if (phba->wq) wq 4602 drivers/scsi/lpfc/lpfc_init.c flush_workqueue(phba->wq); wq 6446 drivers/scsi/lpfc/lpfc_init.c phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); wq 7078 drivers/scsi/lpfc/lpfc_init.c if (phba->wq) { wq 7079 drivers/scsi/lpfc/lpfc_init.c flush_workqueue(phba->wq); wq 7080 drivers/scsi/lpfc/lpfc_init.c destroy_workqueue(phba->wq); wq 7081 drivers/scsi/lpfc/lpfc_init.c phba->wq = NULL; wq 9239 drivers/scsi/lpfc/lpfc_init.c struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map, wq 9245 drivers/scsi/lpfc/lpfc_init.c if (!eq || !cq || !wq) { wq 9272 drivers/scsi/lpfc/lpfc_init.c rc = lpfc_wq_create(phba, wq, cq, qtype); wq 9282 drivers/scsi/lpfc/lpfc_init.c pring = wq->pring; wq 9283 drivers/scsi/lpfc/lpfc_init.c pring->sli.sli4.wqp = (void *)wq; wq 9288 drivers/scsi/lpfc/lpfc_init.c qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id); wq 9290 drivers/scsi/lpfc/lpfc_init.c rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX); wq 467 drivers/scsi/lpfc/lpfc_nvmet.c if (!queue_work(phba->wq, &ctx_buf->defer_work)) { wq 937 drivers/scsi/lpfc/lpfc_nvmet.c struct lpfc_queue *wq; wq 1029 drivers/scsi/lpfc/lpfc_nvmet.c wq = ctxp->hdwq->io_wq; wq 1030 drivers/scsi/lpfc/lpfc_nvmet.c pring = wq->pring; wq 1032 drivers/scsi/lpfc/lpfc_nvmet.c list_add_tail(&nvmewqeq->list, &wq->wqfull_list); wq 1033 drivers/scsi/lpfc/lpfc_nvmet.c wq->q_flag |= HBA_NVMET_WQFULL; wq 1071 drivers/scsi/lpfc/lpfc_nvmet.c struct lpfc_queue *wq; wq 1107 drivers/scsi/lpfc/lpfc_nvmet.c wq = ctxp->hdwq->io_wq; wq 1108 drivers/scsi/lpfc/lpfc_nvmet.c lpfc_nvmet_wqfull_flush(phba, wq, ctxp); wq 1813 drivers/scsi/lpfc/lpfc_nvmet.c lpfc_nvmet_wqfull_flush(struct lpfc_hba *phba, struct lpfc_queue *wq, wq 1823 drivers/scsi/lpfc/lpfc_nvmet.c pring = wq->pring; wq 1833 drivers/scsi/lpfc/lpfc_nvmet.c &wq->wqfull_list, list) { wq 1854 drivers/scsi/lpfc/lpfc_nvmet.c wq->q_flag &= ~HBA_NVMET_WQFULL; wq 1860 drivers/scsi/lpfc/lpfc_nvmet.c struct lpfc_queue *wq) wq 1873 drivers/scsi/lpfc/lpfc_nvmet.c pring = wq->pring; wq 1875 drivers/scsi/lpfc/lpfc_nvmet.c while (!list_empty(&wq->wqfull_list)) { wq 1876 drivers/scsi/lpfc/lpfc_nvmet.c list_remove_head(&wq->wqfull_list, nvmewqeq, struct lpfc_iocbq, wq 1884 drivers/scsi/lpfc/lpfc_nvmet.c list_add(&nvmewqeq->list, &wq->wqfull_list); wq 1901 drivers/scsi/lpfc/lpfc_nvmet.c wq->q_flag &= ~HBA_NVMET_WQFULL; wq 1912 drivers/scsi/lpfc/lpfc_nvmet.c struct lpfc_queue *wq; wq 1921 drivers/scsi/lpfc/lpfc_nvmet.c wq = phba->sli4_hba.hdwq[qidx].io_wq; wq 1922 drivers/scsi/lpfc/lpfc_nvmet.c lpfc_nvmet_wqfull_flush(phba, wq, NULL); wq 2365 drivers/scsi/lpfc/lpfc_nvmet.c if (!queue_work(phba->wq, &ctx_buf->defer_work)) { wq 7688 drivers/scsi/lpfc/lpfc_sli.c queue_delayed_work(phba->wq, &phba->eq_delay_work, wq 9886 drivers/scsi/lpfc/lpfc_sli.c struct lpfc_queue *wq; wq 9892 drivers/scsi/lpfc/lpfc_sli.c wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq; wq 9894 drivers/scsi/lpfc/lpfc_sli.c wq = phba->sli4_hba.els_wq; wq 9898 drivers/scsi/lpfc/lpfc_sli.c pring = wq->pring; wq 9955 drivers/scsi/lpfc/lpfc_sli.c if (lpfc_sli4_wq_put(wq, &wqe)) wq 13558 drivers/scsi/lpfc/lpfc_sli.c if (!queue_work_on(cq->chann, phba->wq, &cq->spwork)) wq 13696 drivers/scsi/lpfc/lpfc_sli.c if (!queue_delayed_work_on(cq->chann, phba->wq, wq 14108 drivers/scsi/lpfc/lpfc_sli.c if (!queue_work_on(cq->chann, phba->wq, &cq->irqwork)) wq 14142 drivers/scsi/lpfc/lpfc_sli.c if (!queue_delayed_work_on(cq->chann, phba->wq, wq 15490 drivers/scsi/lpfc/lpfc_sli.c lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, wq 15510 drivers/scsi/lpfc/lpfc_sli.c if (!wq || !cq) wq 15513 drivers/scsi/lpfc/lpfc_sli.c hw_page_size = wq->page_size; wq 15526 drivers/scsi/lpfc/lpfc_sli.c wq->page_count); wq 15535 drivers/scsi/lpfc/lpfc_sli.c (wq->page_size > SLI4_PAGE_SIZE)) wq 15549 drivers/scsi/lpfc/lpfc_sli.c wq->entry_count); wq 15553 drivers/scsi/lpfc/lpfc_sli.c switch (wq->entry_size) { wq 15570 drivers/scsi/lpfc/lpfc_sli.c (wq->page_size / SLI4_PAGE_SIZE)); wq 15578 drivers/scsi/lpfc/lpfc_sli.c list_for_each_entry(dmabuf, &wq->page_list, list) { wq 15601 drivers/scsi/lpfc/lpfc_sli.c wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, wq 15604 drivers/scsi/lpfc/lpfc_sli.c wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id, wq 15607 drivers/scsi/lpfc/lpfc_sli.c if (wq->queue_id == 0xFFFF) { wq 15612 drivers/scsi/lpfc/lpfc_sli.c wq->db_format = LPFC_DB_LIST_FORMAT; wq 15615 drivers/scsi/lpfc/lpfc_sli.c wq->db_format = bf_get(lpfc_mbx_wq_create_db_format, wq 15617 drivers/scsi/lpfc/lpfc_sli.c if ((wq->db_format != LPFC_DB_LIST_FORMAT) && wq 15618 drivers/scsi/lpfc/lpfc_sli.c (wq->db_format != LPFC_DB_RING_FORMAT)) { wq 15622 drivers/scsi/lpfc/lpfc_sli.c wq->queue_id, wq->db_format); wq 15634 drivers/scsi/lpfc/lpfc_sli.c wq->queue_id, pci_barset); wq 15644 drivers/scsi/lpfc/lpfc_sli.c wq->queue_id, db_offset); wq 15648 drivers/scsi/lpfc/lpfc_sli.c wq->db_regaddr = bar_memmap_p + db_offset; wq 15651 drivers/scsi/lpfc/lpfc_sli.c "format:x%x\n", wq->queue_id, wq 15652 drivers/scsi/lpfc/lpfc_sli.c pci_barset, db_offset, wq->db_format); wq 15654 drivers/scsi/lpfc/lpfc_sli.c wq->db_regaddr = phba->sli4_hba.WQDBregaddr; wq 15657 drivers/scsi/lpfc/lpfc_sli.c wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp, wq 15659 drivers/scsi/lpfc/lpfc_sli.c if (wq->dpp_enable) { wq 15668 drivers/scsi/lpfc/lpfc_sli.c wq->queue_id, pci_barset); wq 15673 drivers/scsi/lpfc/lpfc_sli.c wq->db_regaddr = bar_memmap_p + db_offset; wq 15674 drivers/scsi/lpfc/lpfc_sli.c wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id, wq 15684 drivers/scsi/lpfc/lpfc_sli.c wq->queue_id, dpp_barset); wq 15689 drivers/scsi/lpfc/lpfc_sli.c wq->dpp_regaddr = bar_memmap_p + dpp_offset; wq 15694 drivers/scsi/lpfc/lpfc_sli.c wq->queue_id, pci_barset, db_offset, wq 15695 drivers/scsi/lpfc/lpfc_sli.c wq->dpp_id, dpp_barset, dpp_offset); wq 15698 drivers/scsi/lpfc/lpfc_sli.c pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK; wq 15705 drivers/scsi/lpfc/lpfc_sli.c wq->queue_id); wq 15712 drivers/scsi/lpfc/lpfc_sli.c wq->db_regaddr = phba->sli4_hba.WQDBregaddr; wq 15714 drivers/scsi/lpfc/lpfc_sli.c wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL); wq 15715 drivers/scsi/lpfc/lpfc_sli.c if (wq->pring == NULL) { wq 15719 drivers/scsi/lpfc/lpfc_sli.c wq->type = LPFC_WQ; wq 15720 drivers/scsi/lpfc/lpfc_sli.c wq->assoc_qid = cq->queue_id; wq 15721 drivers/scsi/lpfc/lpfc_sli.c wq->subtype = subtype; wq 15722 drivers/scsi/lpfc/lpfc_sli.c wq->host_index = 0; wq 15723 drivers/scsi/lpfc/lpfc_sli.c wq->hba_index = 0; wq 15724 drivers/scsi/lpfc/lpfc_sli.c wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL; wq 15727 drivers/scsi/lpfc/lpfc_sli.c list_add_tail(&wq->list, &cq->child_list); wq 16393 drivers/scsi/lpfc/lpfc_sli.c lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq) wq 16401 drivers/scsi/lpfc/lpfc_sli.c if (!wq) wq 16403 drivers/scsi/lpfc/lpfc_sli.c mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL); wq 16412 drivers/scsi/lpfc/lpfc_sli.c wq->queue_id); wq 16413 drivers/scsi/lpfc/lpfc_sli.c mbox->vport = wq->phba->pport; wq 16415 drivers/scsi/lpfc/lpfc_sli.c rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL); wq 16428 drivers/scsi/lpfc/lpfc_sli.c list_del_init(&wq->list); wq 16429 drivers/scsi/lpfc/lpfc_sli.c kfree(wq->pring); wq 16430 drivers/scsi/lpfc/lpfc_sli.c wq->pring = NULL; wq 16431 drivers/scsi/lpfc/lpfc_sli.c mempool_free(mbox, wq->phba->mbox_mem_pool); wq 19607 drivers/scsi/lpfc/lpfc_sli.c struct lpfc_queue *wq; wq 19611 drivers/scsi/lpfc/lpfc_sli.c wq = phba->sli4_hba.hdwq[0].io_wq; wq 19612 drivers/scsi/lpfc/lpfc_sli.c if (unlikely(!wq)) wq 19614 drivers/scsi/lpfc/lpfc_sli.c pring = wq->pring; wq 19616 drivers/scsi/lpfc/lpfc_sli.c wq = phba->sli4_hba.els_wq; wq 19617 drivers/scsi/lpfc/lpfc_sli.c if (unlikely(!wq)) wq 19663 drivers/scsi/lpfc/lpfc_sli.c else if (lpfc_sli4_wq_put(wq, &wqe)) wq 19824 drivers/scsi/lpfc/lpfc_sli.c struct lpfc_queue *wq; wq 19864 drivers/scsi/lpfc/lpfc_sli.c wq = qp->io_wq; wq 19865 drivers/scsi/lpfc/lpfc_sli.c pring = wq->pring; wq 19871 drivers/scsi/lpfc/lpfc_sli.c ret = lpfc_sli4_wq_put(wq, wqe); wq 19886 drivers/scsi/lpfc/lpfc_sli.c wq = qp->io_wq; wq 19887 drivers/scsi/lpfc/lpfc_sli.c pring = wq->pring; wq 19901 drivers/scsi/lpfc/lpfc_sli.c ret = lpfc_sli4_wq_put(wq, wqe); wq 1946 drivers/scsi/megaraid/megaraid_sas_fusion.c struct workqueue_struct *wq; wq 1949 drivers/scsi/megaraid/megaraid_sas_fusion.c wq = instance->fw_fault_work_q; wq 1952 drivers/scsi/megaraid/megaraid_sas_fusion.c flush_workqueue(wq); wq 1953 drivers/scsi/megaraid/megaraid_sas_fusion.c destroy_workqueue(wq); wq 727 drivers/scsi/mpt3sas/mpt3sas_base.c struct workqueue_struct *wq; wq 730 drivers/scsi/mpt3sas/mpt3sas_base.c wq = ioc->fault_reset_work_q; wq 733 drivers/scsi/mpt3sas/mpt3sas_base.c if (wq) { wq 735 drivers/scsi/mpt3sas/mpt3sas_base.c flush_workqueue(wq); wq 736 drivers/scsi/mpt3sas/mpt3sas_base.c destroy_workqueue(wq); wq 9744 drivers/scsi/mpt3sas/mpt3sas_scsih.c struct workqueue_struct *wq; wq 9756 drivers/scsi/mpt3sas/mpt3sas_scsih.c wq = ioc->firmware_event_thread; wq 9759 drivers/scsi/mpt3sas/mpt3sas_scsih.c if (wq) wq 9760 drivers/scsi/mpt3sas/mpt3sas_scsih.c destroy_workqueue(wq); wq 9828 drivers/scsi/mpt3sas/mpt3sas_scsih.c struct workqueue_struct *wq; wq 9840 drivers/scsi/mpt3sas/mpt3sas_scsih.c wq = ioc->firmware_event_thread; wq 9843 drivers/scsi/mpt3sas/mpt3sas_scsih.c if (wq) wq 9844 drivers/scsi/mpt3sas/mpt3sas_scsih.c destroy_workqueue(wq); wq 1029 drivers/scsi/myrs.h u64 wq; wq 1033 drivers/scsi/myrs.h u.wq = addr; wq 4208 drivers/scsi/qla2xxx/qla_def.h struct workqueue_struct *wq; wq 3423 drivers/scsi/qla2xxx/qla_isr.c queue_work(ha->wq, &qpair->q_work); wq 908 drivers/scsi/qla2xxx/qla_mid.c if (qpair->hw->wq) wq 1121 drivers/scsi/qla2xxx/qla_os.c flush_workqueue(vha->hw->wq); wq 1600 drivers/scsi/qla2xxx/qla_os.c if (ha->wq) wq 1601 drivers/scsi/qla2xxx/qla_os.c flush_workqueue(ha->wq); wq 3238 drivers/scsi/qla2xxx/qla_os.c ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0); wq 3239 drivers/scsi/qla2xxx/qla_os.c if (unlikely(!ha->wq)) { wq 3812 drivers/scsi/qla2xxx/qla_os.c if (ha->wq) { wq 3813 drivers/scsi/qla2xxx/qla_os.c flush_workqueue(ha->wq); wq 3814 drivers/scsi/qla2xxx/qla_os.c destroy_workqueue(ha->wq); wq 3815 drivers/scsi/qla2xxx/qla_os.c ha->wq = NULL; wq 4894 drivers/scsi/qla2xxx/qla_os.c queue_work(vha->hw->wq, &vha->iocb_work); wq 6662 drivers/scsi/qla2xxx/qla_os.c queue_work(vha->hw->wq, &vha->iocb_work); wq 1164 drivers/scsi/qla2xxx/qla_target.c queue_work(sess->vha->hw->wq, &sess->free_work); wq 1269 drivers/scsi/qla2xxx/qla_target.c WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work)); wq 344 drivers/scsi/snic/snic.h ____cacheline_aligned struct vnic_wq wq[SNIC_WQ_MAX]; wq 34 drivers/scsi/snic/snic_io.c snic_wq_cmpl_frame_send(struct vnic_wq *wq, wq 39 drivers/scsi/snic/snic_io.c struct snic *snic = svnic_dev_priv(wq->vdev); wq 69 drivers/scsi/snic/snic_io.c svnic_wq_service(&snic->wq[q_num], wq 97 drivers/scsi/snic/snic_io.c snic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) wq 101 drivers/scsi/snic/snic_io.c struct snic *snic = svnic_dev_priv(wq->vdev); wq 199 drivers/scsi/snic/snic_io.c snic_queue_wq_eth_desc(&snic->wq[q_num], os_buf, pa, len, 0, 0, 1); wq 159 drivers/scsi/snic/snic_isr.c unsigned int n = ARRAY_SIZE(snic->wq); wq 167 drivers/scsi/snic/snic_isr.c BUILD_BUG_ON((ARRAY_SIZE(snic->wq) + SNIC_CQ_IO_CMPL_MAX) > wq 238 drivers/scsi/snic/snic_main.c ret = svnic_wq_disable(&snic->wq[i]); wq 250 drivers/scsi/snic/snic_main.c svnic_wq_clean(&snic->wq[i], snic_free_wq_buf); wq 622 drivers/scsi/snic/snic_main.c svnic_wq_enable(&snic->wq[i]); wq 697 drivers/scsi/snic/snic_main.c rc = svnic_wq_disable(&snic->wq[i]); wq 137 drivers/scsi/snic/snic_res.c svnic_wq_free(&snic->wq[i]); wq 178 drivers/scsi/snic/snic_res.c &snic->wq[i], wq 224 drivers/scsi/snic/snic_res.c svnic_wq_init(&snic->wq[i], wq 288 drivers/scsi/snic/snic_res.c err_status = ioread32(&snic->wq[i].ctrl->error_status); wq 63 drivers/scsi/snic/snic_res.h snic_queue_wq_eth_desc(struct vnic_wq *wq, wq 71 drivers/scsi/snic/snic_res.h struct wq_enet_desc *desc = svnic_wq_next_desc(wq); wq 86 drivers/scsi/snic/snic_res.h svnic_wq_post(wq, os_buf, dma_addr, len, 1, 1); wq 37 drivers/scsi/snic/vnic_dev.c struct vnic_wq wq; wq 379 drivers/scsi/snic/vnic_dev.c &dc2c->wq, wq 385 drivers/scsi/snic/vnic_dev.c fetch_idx = ioread32(&dc2c->wq.ctrl->fetch_index); wq 396 drivers/scsi/snic/vnic_dev.c vnic_wq_init_start(&dc2c->wq, 0, fetch_idx, fetch_idx, 0, 0); wq 397 drivers/scsi/snic/vnic_dev.c svnic_wq_enable(&dc2c->wq); wq 406 drivers/scsi/snic/vnic_dev.c dc2c->cmd_ring = (struct vnic_devcmd2 *) dc2c->wq.ring.descs; wq 407 drivers/scsi/snic/vnic_dev.c dc2c->wq_ctrl = dc2c->wq.ctrl; wq 424 drivers/scsi/snic/vnic_dev.c svnic_wq_disable(&dc2c->wq); wq 425 drivers/scsi/snic/vnic_dev.c svnic_wq_free(&dc2c->wq); wq 442 drivers/scsi/snic/vnic_dev.c svnic_wq_disable(&dc2c->wq); wq 443 drivers/scsi/snic/vnic_dev.c svnic_wq_free(&dc2c->wq); wq 26 drivers/scsi/snic/vnic_wq.c static inline int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq, wq 29 drivers/scsi/snic/vnic_wq.c wq->ctrl = svnic_dev_get_res(vdev, res_type, index); wq 30 drivers/scsi/snic/vnic_wq.c if (!wq->ctrl) wq 36 drivers/scsi/snic/vnic_wq.c static inline int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq, wq 39 drivers/scsi/snic/vnic_wq.c return svnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, wq 43 drivers/scsi/snic/vnic_wq.c static int vnic_wq_alloc_bufs(struct vnic_wq *wq) wq 46 drivers/scsi/snic/vnic_wq.c unsigned int i, j, count = wq->ring.desc_count; wq 50 drivers/scsi/snic/vnic_wq.c wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC); wq 51 drivers/scsi/snic/vnic_wq.c if (!wq->bufs[i]) { wq 59 drivers/scsi/snic/vnic_wq.c buf = wq->bufs[i]; wq 62 drivers/scsi/snic/vnic_wq.c buf->desc = (u8 *)wq->ring.descs + wq 63 drivers/scsi/snic/vnic_wq.c wq->ring.desc_size * buf->index; wq 65 drivers/scsi/snic/vnic_wq.c buf->next = wq->bufs[0]; wq 68 drivers/scsi/snic/vnic_wq.c buf->next = wq->bufs[i + 1]; wq 76 drivers/scsi/snic/vnic_wq.c wq->to_use = wq->to_clean = wq->bufs[0]; wq 81 drivers/scsi/snic/vnic_wq.c void svnic_wq_free(struct vnic_wq *wq) wq 86 drivers/scsi/snic/vnic_wq.c vdev = wq->vdev; wq 88 drivers/scsi/snic/vnic_wq.c svnic_dev_free_desc_ring(vdev, &wq->ring); wq 91 drivers/scsi/snic/vnic_wq.c kfree(wq->bufs[i]); wq 92 drivers/scsi/snic/vnic_wq.c wq->bufs[i] = NULL; wq 95 drivers/scsi/snic/vnic_wq.c wq->ctrl = NULL; wq 99 drivers/scsi/snic/vnic_wq.c int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, wq 104 drivers/scsi/snic/vnic_wq.c wq->index = 0; wq 105 drivers/scsi/snic/vnic_wq.c wq->vdev = vdev; wq 107 drivers/scsi/snic/vnic_wq.c err = vnic_wq_get_ctrl(vdev, wq, 0, RES_TYPE_DEVCMD2); wq 114 drivers/scsi/snic/vnic_wq.c svnic_wq_disable(wq); wq 116 drivers/scsi/snic/vnic_wq.c err = vnic_wq_alloc_ring(vdev, wq, 0, desc_count, desc_size); wq 123 drivers/scsi/snic/vnic_wq.c int svnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, wq 128 drivers/scsi/snic/vnic_wq.c wq->index = index; wq 129 drivers/scsi/snic/vnic_wq.c wq->vdev = vdev; wq 131 drivers/scsi/snic/vnic_wq.c err = vnic_wq_get_ctrl(vdev, wq, index, RES_TYPE_WQ); wq 138 drivers/scsi/snic/vnic_wq.c svnic_wq_disable(wq); wq 140 drivers/scsi/snic/vnic_wq.c err = vnic_wq_alloc_ring(vdev, wq, index, desc_count, desc_size); wq 144 drivers/scsi/snic/vnic_wq.c err = vnic_wq_alloc_bufs(wq); wq 146 drivers/scsi/snic/vnic_wq.c svnic_wq_free(wq); wq 154 drivers/scsi/snic/vnic_wq.c void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, wq 160 drivers/scsi/snic/vnic_wq.c unsigned int count = wq->ring.desc_count; wq 162 drivers/scsi/snic/vnic_wq.c paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; wq 163 drivers/scsi/snic/vnic_wq.c writeq(paddr, &wq->ctrl->ring_base); wq 164 drivers/scsi/snic/vnic_wq.c iowrite32(count, &wq->ctrl->ring_size); wq 165 drivers/scsi/snic/vnic_wq.c iowrite32(fetch_index, &wq->ctrl->fetch_index); wq 166 drivers/scsi/snic/vnic_wq.c iowrite32(posted_index, &wq->ctrl->posted_index); wq 167 drivers/scsi/snic/vnic_wq.c iowrite32(cq_index, &wq->ctrl->cq_index); wq 168 drivers/scsi/snic/vnic_wq.c iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); wq 169 drivers/scsi/snic/vnic_wq.c iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); wq 170 drivers/scsi/snic/vnic_wq.c iowrite32(0, &wq->ctrl->error_status); wq 172 drivers/scsi/snic/vnic_wq.c wq->to_use = wq->to_clean = wq 173 drivers/scsi/snic/vnic_wq.c &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)] wq 177 drivers/scsi/snic/vnic_wq.c void svnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, wq 181 drivers/scsi/snic/vnic_wq.c vnic_wq_init_start(wq, cq_index, 0, 0, error_interrupt_enable, wq 185 drivers/scsi/snic/vnic_wq.c unsigned int svnic_wq_error_status(struct vnic_wq *wq) wq 187 drivers/scsi/snic/vnic_wq.c return ioread32(&wq->ctrl->error_status); wq 190 drivers/scsi/snic/vnic_wq.c void svnic_wq_enable(struct vnic_wq *wq) wq 192 drivers/scsi/snic/vnic_wq.c iowrite32(1, &wq->ctrl->enable); wq 195 drivers/scsi/snic/vnic_wq.c int svnic_wq_disable(struct vnic_wq *wq) wq 199 drivers/scsi/snic/vnic_wq.c iowrite32(0, &wq->ctrl->enable); wq 203 drivers/scsi/snic/vnic_wq.c if (!(ioread32(&wq->ctrl->running))) wq 208 drivers/scsi/snic/vnic_wq.c pr_err("Failed to disable WQ[%d]\n", wq->index); wq 213 drivers/scsi/snic/vnic_wq.c void svnic_wq_clean(struct vnic_wq *wq, wq 214 drivers/scsi/snic/vnic_wq.c void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) wq 218 drivers/scsi/snic/vnic_wq.c BUG_ON(ioread32(&wq->ctrl->enable)); wq 220 drivers/scsi/snic/vnic_wq.c buf = wq->to_clean; wq 222 drivers/scsi/snic/vnic_wq.c while (svnic_wq_desc_used(wq) > 0) { wq 224 drivers/scsi/snic/vnic_wq.c (*buf_clean)(wq, buf); wq 226 drivers/scsi/snic/vnic_wq.c buf = wq->to_clean = buf->next; wq 227 drivers/scsi/snic/vnic_wq.c wq->ring.desc_avail++; wq 230 drivers/scsi/snic/vnic_wq.c wq->to_use = wq->to_clean = wq->bufs[0]; wq 232 drivers/scsi/snic/vnic_wq.c iowrite32(0, &wq->ctrl->fetch_index); wq 233 drivers/scsi/snic/vnic_wq.c iowrite32(0, &wq->ctrl->posted_index); wq 234 drivers/scsi/snic/vnic_wq.c iowrite32(0, &wq->ctrl->error_status); wq 236 drivers/scsi/snic/vnic_wq.c svnic_dev_clear_desc_ring(&wq->ring); wq 85 drivers/scsi/snic/vnic_wq.h static inline unsigned int svnic_wq_desc_avail(struct vnic_wq *wq) wq 88 drivers/scsi/snic/vnic_wq.h return wq->ring.desc_avail; wq 91 drivers/scsi/snic/vnic_wq.h static inline unsigned int svnic_wq_desc_used(struct vnic_wq *wq) wq 94 drivers/scsi/snic/vnic_wq.h return wq->ring.desc_count - wq->ring.desc_avail - 1; wq 97 drivers/scsi/snic/vnic_wq.h static inline void *svnic_wq_next_desc(struct vnic_wq *wq) wq 99 drivers/scsi/snic/vnic_wq.h return wq->to_use->desc; wq 102 drivers/scsi/snic/vnic_wq.h static inline void svnic_wq_post(struct vnic_wq *wq, wq 106 drivers/scsi/snic/vnic_wq.h struct vnic_wq_buf *buf = wq->to_use; wq 121 drivers/scsi/snic/vnic_wq.h iowrite32(buf->index, &wq->ctrl->posted_index); wq 123 drivers/scsi/snic/vnic_wq.h wq->to_use = buf; wq 125 drivers/scsi/snic/vnic_wq.h wq->ring.desc_avail--; wq 128 drivers/scsi/snic/vnic_wq.h static inline void svnic_wq_service(struct vnic_wq *wq, wq 130 drivers/scsi/snic/vnic_wq.h void (*buf_service)(struct vnic_wq *wq, wq 136 drivers/scsi/snic/vnic_wq.h buf = wq->to_clean; wq 139 drivers/scsi/snic/vnic_wq.h (*buf_service)(wq, cq_desc, buf, opaque); wq 141 drivers/scsi/snic/vnic_wq.h wq->ring.desc_avail++; wq 143 drivers/scsi/snic/vnic_wq.h wq->to_clean = buf->next; wq 148 drivers/scsi/snic/vnic_wq.h buf = wq->to_clean; wq 152 drivers/scsi/snic/vnic_wq.h void svnic_wq_free(struct vnic_wq *wq); wq 153 drivers/scsi/snic/vnic_wq.h int svnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, wq 155 drivers/scsi/snic/vnic_wq.h int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, wq 157 drivers/scsi/snic/vnic_wq.h void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, wq 162 drivers/scsi/snic/vnic_wq.h void svnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, wq 165 drivers/scsi/snic/vnic_wq.h unsigned int svnic_wq_error_status(struct vnic_wq *wq); wq 166 drivers/scsi/snic/vnic_wq.h void svnic_wq_enable(struct vnic_wq *wq); wq 167 drivers/scsi/snic/vnic_wq.h int svnic_wq_disable(struct vnic_wq *wq); wq 168 drivers/scsi/snic/vnic_wq.h void svnic_wq_clean(struct vnic_wq *wq, wq 169 drivers/scsi/snic/vnic_wq.h void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)); wq 63 drivers/soc/aspeed/aspeed-lpc-snoop.c wait_queue_head_t wq; wq 90 drivers/soc/aspeed/aspeed-lpc-snoop.c ret = wait_event_interruptible(chan->wq, wq 105 drivers/soc/aspeed/aspeed-lpc-snoop.c poll_wait(file, &chan->wq, pt); wq 124 drivers/soc/aspeed/aspeed-lpc-snoop.c wake_up_interruptible(&chan->wq); wq 191 drivers/soc/aspeed/aspeed-lpc-snoop.c init_waitqueue_head(&lpc_snoop->chan[channel].wq); wq 1922 drivers/soc/fsl/qbman/qman.c int wq = 0; wq 1928 drivers/soc/fsl/qbman/qman.c wq = 4; wq 1930 drivers/soc/fsl/qbman/qman.c qm_fqd_set_destwq(&mcc->initfq.fqd, p->config->channel, wq); wq 2623 drivers/soc/fsl/qbman/qman.c u32 channel, wq, res; wq 2656 drivers/soc/fsl/qbman/qman.c wq = qm_fqd_get_wq(&mcr->queryfq.fqd); wq 2707 drivers/soc/fsl/qbman/qman.c qm_channel_pool1 + 1)<<4 | wq; wq 2710 drivers/soc/fsl/qbman/qman.c dequeue_wq = wq; wq 49 drivers/soc/fsl/qbman/qman_priv.h static inline u16 qm_mcr_querywq_get_chan(const struct qm_mcr_querywq *wq) wq 51 drivers/soc/fsl/qbman/qman_priv.h return wq->channel_wq >> 3; wq 579 drivers/soc/qcom/qmi_interface.c queue_work(qmi->wq, &qmi->work); wq 650 drivers/soc/qcom/qmi_interface.c qmi->wq = alloc_workqueue("qmi_msg_handler", WQ_UNBOUND, 1); wq 651 drivers/soc/qcom/qmi_interface.c if (!qmi->wq) { wq 666 drivers/soc/qcom/qmi_interface.c destroy_workqueue(qmi->wq); wq 695 drivers/soc/qcom/qmi_interface.c destroy_workqueue(qmi->wq); wq 175 drivers/spi/spi-lantiq-ssc.c struct workqueue_struct *wq; wq 467 drivers/spi/spi-lantiq-ssc.c flush_workqueue(spi->wq); wq 641 drivers/spi/spi-lantiq-ssc.c queue_work(spi->wq, &spi->work); wq 673 drivers/spi/spi-lantiq-ssc.c queue_work(spi->wq, &spi->work); wq 910 drivers/spi/spi-lantiq-ssc.c spi->wq = alloc_ordered_workqueue(dev_name(dev), 0); wq 911 drivers/spi/spi-lantiq-ssc.c if (!spi->wq) { wq 938 drivers/spi/spi-lantiq-ssc.c destroy_workqueue(spi->wq); wq 959 drivers/spi/spi-lantiq-ssc.c destroy_workqueue(spi->wq); wq 337 drivers/staging/fieldbus/anybuss/host.c wait_queue_head_t *wq) wq 348 drivers/staging/fieldbus/anybuss/host.c wake_up(wq); wq 354 drivers/staging/fieldbus/anybuss/host.c wait_queue_head_t *wq) wq 358 drivers/staging/fieldbus/anybuss/host.c ret = ab_task_enqueue(t, q, slock, wq); wq 377 drivers/staging/fieldbus/anybuss/host.c wait_queue_head_t wq; wq 475 drivers/staging/fieldbus/anybuss/host.c wake_up(&cd->wq); wq 542 drivers/staging/fieldbus/anybuss/host.c err = ab_task_enqueue_wait(t, cd->powerq, &cd->qlock, &cd->wq); wq 844 drivers/staging/fieldbus/anybuss/host.c err = ab_task_enqueue_wait(t, cd->powerq, &cd->qlock, &cd->wq); wq 956 drivers/staging/fieldbus/anybuss/host.c ret = ab_task_enqueue(t, cd->powerq, &cd->qlock, &cd->wq); wq 991 drivers/staging/fieldbus/anybuss/host.c wait_event_timeout(cd->wq, wq 1071 drivers/staging/fieldbus/anybuss/host.c ret = ab_task_enqueue_wait(t, cd->powerq, &cd->qlock, &cd->wq); wq 1096 drivers/staging/fieldbus/anybuss/host.c ret = ab_task_enqueue_wait(t, cd->powerq, &cd->qlock, &cd->wq); wq 1121 drivers/staging/fieldbus/anybuss/host.c ret = ab_task_enqueue_wait(t, cd->powerq, &cd->qlock, &cd->wq); wq 1276 drivers/staging/fieldbus/anybuss/host.c init_waitqueue_head(&cd->wq); wq 47 drivers/staging/greybus/loopback.c wait_queue_head_t wq; wq 67 drivers/staging/greybus/loopback.c wait_queue_head_t wq; wq 262 drivers/staging/greybus/loopback.c wake_up(&gb->wq); wq 852 drivers/staging/greybus/loopback.c wait_event_interruptible(gb->wq, gb->type || wq 1006 drivers/staging/greybus/loopback.c init_waitqueue_head(&gb->wq); wq 252 drivers/staging/ks7010/ks7010_sdio.c queue_delayed_work(priv->wq, &priv->rw_dwork, 0); wq 274 drivers/staging/ks7010/ks7010_sdio.c queue_delayed_work(priv->wq, &priv->rw_dwork, 1); wq 279 drivers/staging/ks7010/ks7010_sdio.c queue_delayed_work(priv->wq, &priv->rw_dwork, 1); wq 365 drivers/staging/ks7010/ks7010_sdio.c queue_delayed_work(priv->wq, &priv->rw_dwork, 1); wq 375 drivers/staging/ks7010/ks7010_sdio.c queue_delayed_work(priv->wq, &priv->rw_dwork, 0); wq 403 drivers/staging/ks7010/ks7010_sdio.c queue_delayed_work(priv->wq, &priv->rw_dwork, 0); wq 486 drivers/staging/ks7010/ks7010_sdio.c queue_delayed_work(priv->wq, &priv->rw_dwork, 1); wq 504 drivers/staging/ks7010/ks7010_sdio.c queue_delayed_work(priv->wq, &priv->rw_dwork, 1); wq 595 drivers/staging/ks7010/ks7010_sdio.c queue_delayed_work(priv->wq, wq 606 drivers/staging/ks7010/ks7010_sdio.c queue_delayed_work(priv->wq, &priv->rw_dwork, 0); wq 1020 drivers/staging/ks7010/ks7010_sdio.c priv->wq = alloc_workqueue("wq", WQ_MEM_RECLAIM, 1); wq 1021 drivers/staging/ks7010/ks7010_sdio.c if (!priv->wq) { wq 1102 drivers/staging/ks7010/ks7010_sdio.c if (priv->wq) { wq 1103 drivers/staging/ks7010/ks7010_sdio.c flush_workqueue(priv->wq); wq 1104 drivers/staging/ks7010/ks7010_sdio.c destroy_workqueue(priv->wq); wq 701 drivers/staging/ks7010/ks_hostif.c queue_delayed_work(priv->wq, &priv->rw_dwork, 1); wq 1512 drivers/staging/ks7010/ks_hostif.c queue_delayed_work(priv->wq, &priv->rw_dwork, 1); wq 436 drivers/staging/ks7010/ks_wlan.h struct workqueue_struct *wq; wq 56 drivers/staging/media/meson/vdec/esparser.c static DECLARE_WAIT_QUEUE_HEAD(wq); wq 71 drivers/staging/media/meson/vdec/esparser.c wake_up_interruptible(&wq); wq 119 drivers/staging/media/meson/vdec/esparser.c return wait_event_interruptible_timeout(wq, search_done, (HZ / 5)); wq 32 drivers/staging/most/cdev/cdev.c wait_queue_head_t wq; wq 203 drivers/staging/most/cdev/cdev.c if (wait_event_interruptible(c->wq, ch_has_mbo(c) || !c->dev)) wq 255 drivers/staging/most/cdev/cdev.c if (wait_event_interruptible(c->wq, wq 293 drivers/staging/most/cdev/cdev.c poll_wait(filp, &c->wq, wait); wq 347 drivers/staging/most/cdev/cdev.c wake_up_interruptible(&c->wq); wq 385 drivers/staging/most/cdev/cdev.c wake_up_interruptible(&c->wq); wq 412 drivers/staging/most/cdev/cdev.c wake_up_interruptible(&c->wq); wq 468 drivers/staging/most/cdev/cdev.c init_waitqueue_head(&c->wq); wq 145 drivers/staging/nvec/nvec.h struct workqueue_struct *wq; wq 1891 drivers/staging/rtl8192u/ieee80211/ieee80211.h struct workqueue_struct *wq; wq 372 drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c queue_work(ieee->wq, &ieee->wx_sync_scan_wq); wq 3216 drivers/staging/rtl8192u/r8192U_core.c queue_work(ieee->wq, &ieee->associate_complete_wq); wq 3313 drivers/staging/rtl8192u/r8192U_core.c queue_work(priv->ieee80211->wq, wq 697 drivers/staging/unisys/visorhba/visorhba_main.c wait_queue_head_t *wq = wq 701 drivers/staging/unisys/visorhba/visorhba_main.c if (unlikely(!(wq && scsi_result_ptr))) { wq 711 drivers/staging/unisys/visorhba/visorhba_main.c wake_up_all(wq); wq 341 drivers/staging/uwb/lc-rc.c init_waitqueue_head(&rc->uwbd.wq); wq 89 drivers/staging/uwb/uwb.h wait_queue_head_t wq; wq 262 drivers/staging/uwb/uwbd.c rc->uwbd.wq, wq 330 drivers/staging/uwb/uwbd.c wake_up_all(&rc->uwbd.wq); wq 390 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c remote_event_create(wait_queue_head_t *wq, struct remote_event *event) wq 395 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c init_waitqueue_head(wq); wq 407 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c remote_event_wait(wait_queue_head_t *wq, struct remote_event *event) wq 412 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c if (wait_event_interruptible(*wq, event->fired)) { wq 425 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event) wq 429 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c wake_up_all(wq); wq 433 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c remote_event_poll(wait_queue_head_t *wq, struct remote_event *event) wq 436 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c remote_event_signal_local(wq, event); wq 222 drivers/target/tcm_fc/tfc_conf.c struct workqueue_struct *wq; wq 253 drivers/target/tcm_fc/tfc_conf.c wq = alloc_workqueue("tcm_fc", 0, 1); wq 254 drivers/target/tcm_fc/tfc_conf.c if (!wq) { wq 261 drivers/target/tcm_fc/tfc_conf.c destroy_workqueue(wq); wq 265 drivers/target/tcm_fc/tfc_conf.c tpg->workqueue = wq; wq 127 drivers/tee/optee/optee_private.h void optee_wait_queue_init(struct optee_wait_queue *wq); wq 128 drivers/tee/optee/optee_private.h void optee_wait_queue_exit(struct optee_wait_queue *wq); wq 52 drivers/tee/optee/rpc.c static struct wq_entry *wq_entry_get(struct optee_wait_queue *wq, u32 key) wq 56 drivers/tee/optee/rpc.c mutex_lock(&wq->mu); wq 58 drivers/tee/optee/rpc.c list_for_each_entry(w, &wq->db, link) wq 66 drivers/tee/optee/rpc.c list_add_tail(&w->link, &wq->db); wq 69 drivers/tee/optee/rpc.c mutex_unlock(&wq->mu); wq 73 drivers/tee/optee/rpc.c static void wq_sleep(struct optee_wait_queue *wq, u32 key) wq 75 drivers/tee/optee/rpc.c struct wq_entry *w = wq_entry_get(wq, key); wq 79 drivers/tee/optee/rpc.c mutex_lock(&wq->mu); wq 81 drivers/tee/optee/rpc.c mutex_unlock(&wq->mu); wq 86 drivers/tee/optee/rpc.c static void wq_wakeup(struct optee_wait_queue *wq, u32 key) wq 88 drivers/tee/optee/rpc.c struct wq_entry *w = wq_entry_get(wq, key); wq 315 drivers/thunderbolt/domain.c destroy_workqueue(tb->wq); wq 363 drivers/thunderbolt/domain.c tb->wq = alloc_ordered_workqueue("thunderbolt%d", 0, tb->index); wq 364 drivers/thunderbolt/domain.c if (!tb->wq) wq 493 drivers/thunderbolt/domain.c flush_workqueue(tb->wq); wq 315 drivers/thunderbolt/icm.c mod_delayed_work(tb->wq, &icm->rescan_work, wq 1640 drivers/thunderbolt/icm.c queue_work(tb->wq, &n->work); wq 1996 drivers/thunderbolt/icm.c queue_delayed_work(tb->wq, &icm->rescan_work, msecs_to_jiffies(500)); wq 54 drivers/thunderbolt/tb.c queue_work(tb->wq, &ev->work); wq 552 drivers/thunderbolt/xdomain.c queue_delayed_work(tb->wq, &xd->get_properties_work, wq 905 drivers/thunderbolt/xdomain.c queue_delayed_work(xd->tb->wq, &xd->get_uuid_work, wq 937 drivers/thunderbolt/xdomain.c queue_delayed_work(xd->tb->wq, &xd->properties_changed_work, wq 939 drivers/thunderbolt/xdomain.c queue_delayed_work(xd->tb->wq, &xd->get_properties_work, wq 959 drivers/thunderbolt/xdomain.c queue_delayed_work(xd->tb->wq, &xd->get_properties_work, wq 1047 drivers/thunderbolt/xdomain.c queue_delayed_work(xd->tb->wq, wq 1154 drivers/thunderbolt/xdomain.c queue_delayed_work(xd->tb->wq, &xd->get_uuid_work, wq 1158 drivers/thunderbolt/xdomain.c queue_delayed_work(xd->tb->wq, &xd->properties_changed_work, wq 1160 drivers/thunderbolt/xdomain.c queue_delayed_work(xd->tb->wq, &xd->get_properties_work, wq 1602 drivers/thunderbolt/xdomain.c queue_delayed_work(xd->tb->wq, &xd->properties_changed_work, wq 223 drivers/usb/chipidea/ci.h struct workqueue_struct *wq; wq 639 drivers/usb/chipidea/core.c if (ci->wq && role != USB_ROLE_NONE) wq 640 drivers/usb/chipidea/core.c flush_workqueue(ci->wq); wq 1306 drivers/usb/chipidea/core.c if (ci->wq) wq 1307 drivers/usb/chipidea/core.c flush_workqueue(ci->wq); wq 230 drivers/usb/chipidea/otg.c ci->wq = create_freezable_workqueue("ci_otg"); wq 231 drivers/usb/chipidea/otg.c if (!ci->wq) { wq 248 drivers/usb/chipidea/otg.c if (ci->wq) { wq 249 drivers/usb/chipidea/otg.c flush_workqueue(ci->wq); wq 250 drivers/usb/chipidea/otg.c destroy_workqueue(ci->wq); wq 20 drivers/usb/chipidea/otg.h if (queue_work(ci->wq, &ci->work) == false) wq 2075 drivers/usb/gadget/function/f_tcm.c static void tcm_delayed_set_alt(struct work_struct *wq) wq 2077 drivers/usb/gadget/function/f_tcm.c struct guas_setup_wq *work = container_of(wq, struct guas_setup_wq, wq 90 drivers/usb/typec/tcpm/fusb302.c struct workqueue_struct *wq; wq 1081 drivers/usb/typec/tcpm/fusb302.c mod_delayed_work(chip->wq, &chip->bc_lvl_handler, wq 1549 drivers/usb/typec/tcpm/fusb302.c mod_delayed_work(chip->wq, &chip->bc_lvl_handler, wq 1723 drivers/usb/typec/tcpm/fusb302.c chip->wq = create_singlethread_workqueue(dev_name(chip->dev)); wq 1724 drivers/usb/typec/tcpm/fusb302.c if (!chip->wq) wq 1773 drivers/usb/typec/tcpm/fusb302.c destroy_workqueue(chip->wq); wq 1788 drivers/usb/typec/tcpm/fusb302.c destroy_workqueue(chip->wq); wq 198 drivers/usb/typec/tcpm/tcpm.c struct workqueue_struct *wq; wq 898 drivers/usb/typec/tcpm/tcpm.c mod_delayed_work(port->wq, &port->state_machine, wq 915 drivers/usb/typec/tcpm/tcpm.c mod_delayed_work(port->wq, &port->state_machine, 0); wq 936 drivers/usb/typec/tcpm/tcpm.c mod_delayed_work(port->wq, &port->state_machine, 0); wq 1220 drivers/usb/typec/tcpm/tcpm.c mod_delayed_work(port->wq, &port->vdm_state_machine, wq 1232 drivers/usb/typec/tcpm/tcpm.c mod_delayed_work(port->wq, &port->vdm_state_machine, 0); wq 1249 drivers/usb/typec/tcpm/tcpm.c mod_delayed_work(port->wq, &port->vdm_state_machine, 0); wq 1316 drivers/usb/typec/tcpm/tcpm.c mod_delayed_work(port->wq, &port->vdm_state_machine, wq 1497 drivers/usb/typec/tcpm/tcpm.c mod_delayed_work(port->wq, &port->vdm_state_machine, 0); wq 1513 drivers/usb/typec/tcpm/tcpm.c mod_delayed_work(port->wq, &port->vdm_state_machine, 0); wq 1526 drivers/usb/typec/tcpm/tcpm.c mod_delayed_work(port->wq, &port->vdm_state_machine, 0); wq 2008 drivers/usb/typec/tcpm/tcpm.c queue_work(port->wq, &event->work); wq 2062 drivers/usb/typec/tcpm/tcpm.c mod_delayed_work(port->wq, &port->state_machine, wq 3977 drivers/usb/typec/tcpm/tcpm.c queue_work(port->wq, &port->event_work); wq 3986 drivers/usb/typec/tcpm/tcpm.c queue_work(port->wq, &port->event_work); wq 3995 drivers/usb/typec/tcpm/tcpm.c queue_work(port->wq, &port->event_work); wq 4773 drivers/usb/typec/tcpm/tcpm.c port->wq = create_singlethread_workqueue(dev_name(dev)); wq 4774 drivers/usb/typec/tcpm/tcpm.c if (!port->wq) wq 4860 drivers/usb/typec/tcpm/tcpm.c destroy_workqueue(port->wq); wq 4875 drivers/usb/typec/tcpm/tcpm.c destroy_workqueue(port->wq); wq 1696 drivers/video/fbdev/omap2/omapfb/omapfb-main.c struct workqueue_struct *wq; wq 1698 drivers/video/fbdev/omap2/omapfb/omapfb-main.c wq = create_singlethread_workqueue("omapfb_auto_update"); wq 1700 drivers/video/fbdev/omap2/omapfb/omapfb-main.c if (wq == NULL) { wq 1706 drivers/video/fbdev/omap2/omapfb/omapfb-main.c fbdev->auto_update_wq = wq; wq 50 drivers/xen/pvcalls-back.c struct workqueue_struct *wq; wq 80 drivers/xen/pvcalls-back.c struct workqueue_struct *wq; wq 296 drivers/xen/pvcalls-back.c queue_work(iow->wq, &iow->register_work); wq 350 drivers/xen/pvcalls-back.c map->ioworker.wq = alloc_workqueue("pvcalls_io", WQ_UNBOUND, 1); wq 351 drivers/xen/pvcalls-back.c if (!map->ioworker.wq) wq 458 drivers/xen/pvcalls-back.c flush_workqueue(mappass->wq); wq 459 drivers/xen/pvcalls-back.c destroy_workqueue(mappass->wq); wq 559 drivers/xen/pvcalls-back.c queue_work(iow->wq, &iow->register_work); wq 603 drivers/xen/pvcalls-back.c queue_work(mappass->wq, &mappass->register_work); wq 625 drivers/xen/pvcalls-back.c map->wq = alloc_workqueue("pvcalls_wq", WQ_UNBOUND, 1); wq 626 drivers/xen/pvcalls-back.c if (!map->wq) { wq 660 drivers/xen/pvcalls-back.c if (map && map->wq) wq 661 drivers/xen/pvcalls-back.c destroy_workqueue(map->wq); wq 730 drivers/xen/pvcalls-back.c queue_work(mappass->wq, &mappass->register_work); wq 901 drivers/xen/pvcalls-back.c queue_work(iow->wq, &iow->register_work); wq 502 drivers/xen/xen-acpi-processor.c static DECLARE_WORK(wq, xen_acpi_processor_resume_worker); wq 511 drivers/xen/xen-acpi-processor.c schedule_work(&wq); wq 77 drivers/xen/xenbus/xenbus.h wait_queue_head_t wq; wq 403 drivers/xen/xenbus/xenbus_comms.c wake_up(&state.req->wq); wq 121 drivers/xen/xenbus/xenbus_dev_frontend.c struct work_struct wq; wq 306 drivers/xen/xenbus/xenbus_dev_frontend.c static void xenbus_worker(struct work_struct *wq) wq 313 drivers/xen/xenbus/xenbus_dev_frontend.c u = container_of(wq, struct xenbus_file_priv, wq); wq 348 drivers/xen/xenbus/xenbus_dev_frontend.c schedule_work(&u->wq); wq 668 drivers/xen/xenbus/xenbus_dev_frontend.c INIT_WORK(&u->wq, xenbus_worker); wq 209 drivers/xen/xenbus/xenbus_xs.c wait_event(req->wq, test_reply(req)); wq 234 drivers/xen/xenbus/xenbus_xs.c init_waitqueue_head(&req->wq); wq 273 drivers/xen/xenbus/xenbus_xs.c wake_up(&req->wq); wq 915 drivers/xen/xenbus/xenbus_xs.c wake_up(&req->wq); wq 917 drivers/xen/xenbus/xenbus_xs.c wake_up(&req->wq); wq 213 fs/afs/dir_silly.c DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); wq 219 fs/afs/dir_silly.c alias = d_alloc_parallel(dentry->d_parent, &dentry->d_name, &wq); wq 17 fs/autofs/waitq.c struct autofs_wait_queue *wq, *nwq; wq 28 fs/autofs/waitq.c wq = sbi->queues; wq 30 fs/autofs/waitq.c while (wq) { wq 31 fs/autofs/waitq.c nwq = wq->next; wq 32 fs/autofs/waitq.c wq->status = -ENOENT; /* Magic is gone - report failure */ wq 33 fs/autofs/waitq.c kfree(wq->name.name); wq 34 fs/autofs/waitq.c wq->name.name = NULL; wq 35 fs/autofs/waitq.c wq->wait_ctr--; wq 36 fs/autofs/waitq.c wake_up_interruptible(&wq->queue); wq 37 fs/autofs/waitq.c wq = nwq; wq 79 fs/autofs/waitq.c struct autofs_wait_queue *wq, wq 92 fs/autofs/waitq.c (unsigned long) wq->wait_queue_token, wq 93 fs/autofs/waitq.c wq->name.len, wq->name.name, type); wq 108 fs/autofs/waitq.c mp->wait_queue_token = wq->wait_queue_token; wq 109 fs/autofs/waitq.c mp->len = wq->name.len; wq 110 fs/autofs/waitq.c memcpy(mp->name, wq->name.name, wq->name.len); wq 111 fs/autofs/waitq.c mp->name[wq->name.len] = '\0'; wq 121 fs/autofs/waitq.c ep->wait_queue_token = wq->wait_queue_token; wq 122 fs/autofs/waitq.c ep->len = wq->name.len; wq 123 fs/autofs/waitq.c memcpy(ep->name, wq->name.name, wq->name.len); wq 124 fs/autofs/waitq.c ep->name[wq->name.len] = '\0'; wq 141 fs/autofs/waitq.c packet->wait_queue_token = wq->wait_queue_token; wq 142 fs/autofs/waitq.c packet->len = wq->name.len; wq 143 fs/autofs/waitq.c memcpy(packet->name, wq->name.name, wq->name.len); wq 144 fs/autofs/waitq.c packet->name[wq->name.len] = '\0'; wq 145 fs/autofs/waitq.c packet->dev = wq->dev; wq 146 fs/autofs/waitq.c packet->ino = wq->ino; wq 147 fs/autofs/waitq.c packet->uid = from_kuid_munged(user_ns, wq->uid); wq 148 fs/autofs/waitq.c packet->gid = from_kgid_munged(user_ns, wq->gid); wq 149 fs/autofs/waitq.c packet->pid = wq->pid; wq 150 fs/autofs/waitq.c packet->tgid = wq->tgid; wq 169 fs/autofs/waitq.c autofs_wait_release(sbi, wq->wait_queue_token, ret); wq 226 fs/autofs/waitq.c struct autofs_wait_queue *wq; wq 228 fs/autofs/waitq.c for (wq = sbi->queues; wq; wq = wq->next) { wq 229 fs/autofs/waitq.c if (wq->name.hash == qstr->hash && wq 230 fs/autofs/waitq.c wq->name.len == qstr->len && wq 231 fs/autofs/waitq.c wq->name.name && wq 232 fs/autofs/waitq.c !memcmp(wq->name.name, qstr->name, qstr->len)) wq 235 fs/autofs/waitq.c return wq; wq 252 fs/autofs/waitq.c struct autofs_wait_queue *wq; wq 259 fs/autofs/waitq.c wq = autofs_find_wait(sbi, qstr); wq 260 fs/autofs/waitq.c if (wq) { wq 261 fs/autofs/waitq.c *wait = wq; wq 293 fs/autofs/waitq.c wq = autofs_find_wait(sbi, qstr); wq 294 fs/autofs/waitq.c if (wq) { wq 295 fs/autofs/waitq.c *wait = wq; wq 351 fs/autofs/waitq.c struct autofs_wait_queue *wq; wq 409 fs/autofs/waitq.c ret = validate_request(&wq, sbi, &qstr, path, notify); wq 417 fs/autofs/waitq.c if (!wq) { wq 419 fs/autofs/waitq.c wq = kmalloc(sizeof(struct autofs_wait_queue), GFP_KERNEL); wq 420 fs/autofs/waitq.c if (!wq) { wq 426 fs/autofs/waitq.c wq->wait_queue_token = autofs_next_wait_queue; wq 429 fs/autofs/waitq.c wq->next = sbi->queues; wq 430 fs/autofs/waitq.c sbi->queues = wq; wq 431 fs/autofs/waitq.c init_waitqueue_head(&wq->queue); wq 432 fs/autofs/waitq.c memcpy(&wq->name, &qstr, sizeof(struct qstr)); wq 433 fs/autofs/waitq.c wq->dev = autofs_get_dev(sbi); wq 434 fs/autofs/waitq.c wq->ino = autofs_get_ino(sbi); wq 435 fs/autofs/waitq.c wq->uid = current_uid(); wq 436 fs/autofs/waitq.c wq->gid = current_gid(); wq 437 fs/autofs/waitq.c wq->pid = pid; wq 438 fs/autofs/waitq.c wq->tgid = tgid; wq 439 fs/autofs/waitq.c wq->status = -EINTR; /* Status return if interrupted */ wq 440 fs/autofs/waitq.c wq->wait_ctr = 2; wq 459 fs/autofs/waitq.c (unsigned long) wq->wait_queue_token, wq->name.len, wq 460 fs/autofs/waitq.c wq->name.name, notify); wq 465 fs/autofs/waitq.c autofs_notify_daemon(sbi, wq, type); wq 467 fs/autofs/waitq.c wq->wait_ctr++; wq 469 fs/autofs/waitq.c (unsigned long) wq->wait_queue_token, wq->name.len, wq 470 fs/autofs/waitq.c wq->name.name, notify); wq 479 fs/autofs/waitq.c wait_event_killable(wq->queue, wq->name.name == NULL); wq 480 fs/autofs/waitq.c status = wq->status; wq 507 fs/autofs/waitq.c ino->uid = wq->uid; wq 508 fs/autofs/waitq.c ino->gid = wq->gid; wq 518 fs/autofs/waitq.c if (!--wq->wait_ctr) wq 519 fs/autofs/waitq.c kfree(wq); wq 529 fs/autofs/waitq.c struct autofs_wait_queue *wq, **wql; wq 532 fs/autofs/waitq.c for (wql = &sbi->queues; (wq = *wql) != NULL; wql = &wq->next) { wq 533 fs/autofs/waitq.c if (wq->wait_queue_token == wait_queue_token) wq 537 fs/autofs/waitq.c if (!wq) { wq 542 fs/autofs/waitq.c *wql = wq->next; /* Unlink from chain */ wq 543 fs/autofs/waitq.c kfree(wq->name.name); wq 544 fs/autofs/waitq.c wq->name.name = NULL; /* Do not wait on this queue */ wq 545 fs/autofs/waitq.c wq->status = status; wq 546 fs/autofs/waitq.c wake_up(&wq->queue); wq 547 fs/autofs/waitq.c if (!--wq->wait_ctr) wq 548 fs/autofs/waitq.c kfree(wq); wq 1084 fs/block_dev.c wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0); wq 1087 fs/block_dev.c prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); wq 1090 fs/block_dev.c finish_wait(wq, &wait); wq 57 fs/btrfs/async-thread.c btrfs_workqueue_owner(const struct __btrfs_workqueue *wq) wq 59 fs/btrfs/async-thread.c return wq->fs_info; wq 65 fs/btrfs/async-thread.c return work->wq->fs_info; wq 68 fs/btrfs/async-thread.c bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq) wq 76 fs/btrfs/async-thread.c if (wq->normal->thresh == NO_THRESHOLD) wq 79 fs/btrfs/async-thread.c return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2; wq 129 fs/btrfs/async-thread.c __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq); wq 167 fs/btrfs/async-thread.c static inline void thresh_queue_hook(struct __btrfs_workqueue *wq) wq 169 fs/btrfs/async-thread.c if (wq->thresh == NO_THRESHOLD) wq 171 fs/btrfs/async-thread.c atomic_inc(&wq->pending); wq 179 fs/btrfs/async-thread.c static inline void thresh_exec_hook(struct __btrfs_workqueue *wq) wq 185 fs/btrfs/async-thread.c if (wq->thresh == NO_THRESHOLD) wq 188 fs/btrfs/async-thread.c atomic_dec(&wq->pending); wq 189 fs/btrfs/async-thread.c spin_lock(&wq->thres_lock); wq 194 fs/btrfs/async-thread.c wq->count++; wq 195 fs/btrfs/async-thread.c wq->count %= (wq->thresh / 4); wq 196 fs/btrfs/async-thread.c if (!wq->count) wq 198 fs/btrfs/async-thread.c new_current_active = wq->current_active; wq 204 fs/btrfs/async-thread.c pending = atomic_read(&wq->pending); wq 205 fs/btrfs/async-thread.c if (pending > wq->thresh) wq 207 fs/btrfs/async-thread.c if (pending < wq->thresh / 2) wq 209 fs/btrfs/async-thread.c new_current_active = clamp_val(new_current_active, 1, wq->limit_active); wq 210 fs/btrfs/async-thread.c if (new_current_active != wq->current_active) { wq 212 fs/btrfs/async-thread.c wq->current_active = new_current_active; wq 215 fs/btrfs/async-thread.c spin_unlock(&wq->thres_lock); wq 218 fs/btrfs/async-thread.c workqueue_set_max_active(wq->normal_wq, wq->current_active); wq 222 fs/btrfs/async-thread.c static void run_ordered_work(struct __btrfs_workqueue *wq, wq 225 fs/btrfs/async-thread.c struct list_head *list = &wq->ordered_list; wq 227 fs/btrfs/async-thread.c spinlock_t *lock = &wq->list_lock; wq 290 fs/btrfs/async-thread.c trace_btrfs_all_work_done(wq->fs_info, wtag); wq 298 fs/btrfs/async-thread.c trace_btrfs_all_work_done(wq->fs_info, wtag); wq 306 fs/btrfs/async-thread.c struct __btrfs_workqueue *wq; wq 320 fs/btrfs/async-thread.c wq = work->wq; wq 325 fs/btrfs/async-thread.c thresh_exec_hook(wq); wq 329 fs/btrfs/async-thread.c run_ordered_work(wq, work); wq 332 fs/btrfs/async-thread.c trace_btrfs_all_work_done(wq->fs_info, wtag); wq 346 fs/btrfs/async-thread.c static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq, wq 351 fs/btrfs/async-thread.c work->wq = wq; wq 352 fs/btrfs/async-thread.c thresh_queue_hook(wq); wq 354 fs/btrfs/async-thread.c spin_lock_irqsave(&wq->list_lock, flags); wq 355 fs/btrfs/async-thread.c list_add_tail(&work->ordered_list, &wq->ordered_list); wq 356 fs/btrfs/async-thread.c spin_unlock_irqrestore(&wq->list_lock, flags); wq 359 fs/btrfs/async-thread.c queue_work(wq->normal_wq, &work->normal_work); wq 362 fs/btrfs/async-thread.c void btrfs_queue_work(struct btrfs_workqueue *wq, wq 367 fs/btrfs/async-thread.c if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high) wq 368 fs/btrfs/async-thread.c dest_wq = wq->high; wq 370 fs/btrfs/async-thread.c dest_wq = wq->normal; wq 375 fs/btrfs/async-thread.c __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq) wq 377 fs/btrfs/async-thread.c destroy_workqueue(wq->normal_wq); wq 378 fs/btrfs/async-thread.c trace_btrfs_workqueue_destroy(wq); wq 379 fs/btrfs/async-thread.c kfree(wq); wq 382 fs/btrfs/async-thread.c void btrfs_destroy_workqueue(struct btrfs_workqueue *wq) wq 384 fs/btrfs/async-thread.c if (!wq) wq 386 fs/btrfs/async-thread.c if (wq->high) wq 387 fs/btrfs/async-thread.c __btrfs_destroy_workqueue(wq->high); wq 388 fs/btrfs/async-thread.c __btrfs_destroy_workqueue(wq->normal); wq 389 fs/btrfs/async-thread.c kfree(wq); wq 392 fs/btrfs/async-thread.c void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active) wq 394 fs/btrfs/async-thread.c if (!wq) wq 396 fs/btrfs/async-thread.c wq->normal->limit_active = limit_active; wq 397 fs/btrfs/async-thread.c if (wq->high) wq 398 fs/btrfs/async-thread.c wq->high->limit_active = limit_active; wq 406 fs/btrfs/async-thread.c void btrfs_flush_workqueue(struct btrfs_workqueue *wq) wq 408 fs/btrfs/async-thread.c if (wq->high) wq 409 fs/btrfs/async-thread.c flush_workqueue(wq->high->normal_wq); wq 411 fs/btrfs/async-thread.c flush_workqueue(wq->normal->normal_wq); wq 28 fs/btrfs/async-thread.h struct __btrfs_workqueue *wq; wq 39 fs/btrfs/async-thread.h void btrfs_queue_work(struct btrfs_workqueue *wq, wq 41 fs/btrfs/async-thread.h void btrfs_destroy_workqueue(struct btrfs_workqueue *wq); wq 42 fs/btrfs/async-thread.h void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max); wq 45 fs/btrfs/async-thread.h struct btrfs_fs_info *btrfs_workqueue_owner(const struct __btrfs_workqueue *wq); wq 46 fs/btrfs/async-thread.h bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq); wq 47 fs/btrfs/async-thread.h void btrfs_flush_workqueue(struct btrfs_workqueue *wq); wq 708 fs/btrfs/disk-io.c struct btrfs_workqueue *wq; wq 715 fs/btrfs/disk-io.c wq = fs_info->endio_meta_write_workers; wq 717 fs/btrfs/disk-io.c wq = fs_info->endio_freespace_worker; wq 719 fs/btrfs/disk-io.c wq = fs_info->endio_raid56_workers; wq 721 fs/btrfs/disk-io.c wq = fs_info->endio_write_workers; wq 724 fs/btrfs/disk-io.c wq = fs_info->endio_repair_workers; wq 726 fs/btrfs/disk-io.c wq = fs_info->endio_raid56_workers; wq 728 fs/btrfs/disk-io.c wq = fs_info->endio_meta_workers; wq 730 fs/btrfs/disk-io.c wq = fs_info->endio_workers; wq 734 fs/btrfs/disk-io.c btrfs_queue_work(wq, &end_io_wq->work); wq 284 fs/btrfs/extent_io.c ASSERT(!waitqueue_active(&state->wq)); wq 309 fs/btrfs/extent_io.c init_waitqueue_head(&state->wq); wq 622 fs/btrfs/extent_io.c wake_up(&state->wq); wq 790 fs/btrfs/extent_io.c wake_up(&state->wq); wq 829 fs/btrfs/extent_io.c prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE); wq 833 fs/btrfs/extent_io.c finish_wait(&state->wq, &wait); wq 145 fs/btrfs/extent_io.h wait_queue_head_t wq; wq 3345 fs/btrfs/inode.c struct btrfs_workqueue *wq; wq 3355 fs/btrfs/inode.c wq = fs_info->endio_freespace_worker; wq 3357 fs/btrfs/inode.c wq = fs_info->endio_write_workers; wq 3360 fs/btrfs/inode.c btrfs_queue_work(wq, &ordered_extent->work); wq 8320 fs/btrfs/inode.c struct btrfs_workqueue *wq; wq 8326 fs/btrfs/inode.c wq = fs_info->endio_freespace_worker; wq 8328 fs/btrfs/inode.c wq = fs_info->endio_write_workers; wq 8338 fs/btrfs/inode.c btrfs_queue_work(wq, &ordered->work); wq 12 fs/btrfs/misc.h static inline void cond_wake_up(struct wait_queue_head *wq) wq 18 fs/btrfs/misc.h if (wq_has_sleeper(wq)) wq 19 fs/btrfs/misc.h wake_up(wq); wq 22 fs/btrfs/misc.h static inline void cond_wake_up_nomb(struct wait_queue_head *wq) wq 30 fs/btrfs/misc.h if (waitqueue_active(wq)) wq 31 fs/btrfs/misc.h wake_up(wq); wq 195 fs/cachefiles/namei.c wait_queue_head_t *wq; wq 212 fs/cachefiles/namei.c wq = bit_waitqueue(&xobject->flags, CACHEFILES_OBJECT_ACTIVE); wq 216 fs/cachefiles/namei.c prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE); wq 222 fs/cachefiles/namei.c finish_wait(wq, &wait); wq 83 fs/cifs/readdir.c DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); wq 98 fs/cifs/readdir.c dentry = d_alloc_parallel(parent, name, &wq); wq 188 fs/dax.c wait_queue_head_t *wq; wq 190 fs/dax.c wq = dax_entry_waitqueue(xas, entry, &key); wq 198 fs/dax.c if (waitqueue_active(wq)) wq 199 fs/dax.c __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key); wq 216 fs/dax.c wait_queue_head_t *wq; wq 230 fs/dax.c wq = dax_entry_waitqueue(xas, entry, &ewait.key); wq 231 fs/dax.c prepare_to_wait_exclusive(wq, &ewait.wait, wq 236 fs/dax.c finish_wait(wq, &ewait.wait); wq 249 fs/dax.c wait_queue_head_t *wq; wq 254 fs/dax.c wq = dax_entry_waitqueue(xas, entry, &ewait.key); wq 261 fs/dax.c prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE); wq 264 fs/dax.c finish_wait(wq, &ewait.wait); wq 2514 fs/dcache.c wait_queue_head_t *wq) wq 2611 fs/dcache.c new->d_wait = wq; wq 615 fs/direct-io.c struct workqueue_struct *wq = alloc_workqueue("dio/%s", wq 618 fs/direct-io.c if (!wq) wq 623 fs/direct-io.c old = cmpxchg(&sb->s_dio_done_wq, NULL, wq); wq 626 fs/direct-io.c destroy_workqueue(wq); wq 191 fs/eventpoll.c wait_queue_head_t wq; wq 568 fs/eventpoll.c static void ep_poll_safewake(wait_queue_head_t *wq) wq 573 fs/eventpoll.c ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu); wq 580 fs/eventpoll.c static void ep_poll_safewake(wait_queue_head_t *wq) wq 582 fs/eventpoll.c wake_up_poll(wq, EPOLLIN); wq 747 fs/eventpoll.c if (waitqueue_active(&ep->wq)) wq 748 fs/eventpoll.c wake_up(&ep->wq); wq 1027 fs/eventpoll.c init_waitqueue_head(&ep->wq); wq 1261 fs/eventpoll.c if (waitqueue_active(&ep->wq)) { wq 1278 fs/eventpoll.c wake_up(&ep->wq); wq 1580 fs/eventpoll.c if (waitqueue_active(&ep->wq)) wq 1581 fs/eventpoll.c wake_up(&ep->wq); wq 1686 fs/eventpoll.c if (waitqueue_active(&ep->wq)) wq 1687 fs/eventpoll.c wake_up(&ep->wq); wq 1888 fs/eventpoll.c __add_wait_queue_exclusive(&ep->wq, &wait); wq 1930 fs/eventpoll.c __remove_wait_queue(&ep->wq, &wait); wq 109 fs/ext4/file.c wait_queue_head_t *wq = ext4_ioend_wq(inode); wq 111 fs/ext4/file.c wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_unwritten) == 0)); wq 193 fs/ext4/page-io.c struct workqueue_struct *wq; wq 200 fs/ext4/page-io.c wq = sbi->rsv_conversion_wq; wq 202 fs/ext4/page-io.c queue_work(wq, &ei->i_rsv_conversion_work); wq 27 fs/f2fs/gc.c wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head; wq 34 fs/f2fs/gc.c wait_event_interruptible_timeout(*wq, wq 1327 fs/fs-writeback.c DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); wq 1333 fs/fs-writeback.c __wait_on_bit(wqh, &wq, bit_wait, wq 37 fs/fscache/page.c wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0); wq 41 fs/fscache/page.c wait_event(*wq, !__fscache_check_page_write(cookie, page)); wq 52 fs/fscache/page.c wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0); wq 54 fs/fscache/page.c return wait_event_timeout(*wq, !__fscache_check_page_write(cookie, page), wq 161 fs/fuse/readdir.c DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); wq 196 fs/fuse/readdir.c dentry = d_alloc_parallel(parent, &name, &wq); wq 118 fs/gfs2/glock.c wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name); wq 120 fs/gfs2/glock.c if (waitqueue_active(wq)) wq 121 fs/gfs2/glock.c __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name); wq 748 fs/gfs2/glock.c wait_queue_head_t *wq = glock_waitqueue(name); wq 756 fs/gfs2/glock.c prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); wq 774 fs/gfs2/glock.c finish_wait(wq, &wait.wait); wq 1956 fs/inode.c wait_queue_head_t *wq; wq 1958 fs/inode.c wq = bit_waitqueue(&inode->i_state, __I_NEW); wq 1959 fs/inode.c prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); wq 1963 fs/inode.c finish_wait(wq, &wait.wq_entry); wq 2098 fs/inode.c wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP); wq 2102 fs/inode.c prepare_to_wait(wq, &q.wq_entry, TASK_UNINTERRUPTIBLE); wq 2106 fs/inode.c finish_wait(wq, &q.wq_entry); wq 2935 fs/io_uring.c struct wait_queue_entry wq; wq 2958 fs/io_uring.c wq); wq 2974 fs/io_uring.c .wq = { wq 2977 fs/io_uring.c .entry = LIST_HEAD_INIT(iowq.wq.entry), wq 3004 fs/io_uring.c prepare_to_wait_exclusive(&ctx->wait, &iowq.wq, wq 3014 fs/io_uring.c finish_wait(&ctx->wait, &iowq.wq); wq 2590 fs/jbd2/journal.c wait_queue_head_t *wq; wq 2592 fs/jbd2/journal.c wq = bit_waitqueue(&jinode->i_flags, __JI_COMMIT_RUNNING); wq 2593 fs/jbd2/journal.c prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); wq 2596 fs/jbd2/journal.c finish_wait(wq, &wait.wq_entry); wq 41 fs/jffs2/os-linux.h #define sleep_on_spinunlock(wq, s) \ wq 44 fs/jffs2/os-linux.h add_wait_queue((wq), &__wait); \ wq 48 fs/jffs2/os-linux.h remove_wait_queue((wq), &__wait); \ wq 22 fs/jfs/jfs_lock.h #define __SLEEP_COND(wq, cond, lock_cmd, unlock_cmd) \ wq 26 fs/jfs/jfs_lock.h add_wait_queue(&wq, &__wait); \ wq 36 fs/jfs/jfs_lock.h remove_wait_queue(&wq, &__wait); \ wq 116 fs/jfs/jfs_logmgr.c #define LCACHE_SLEEP_COND(wq, cond, flags) \ wq 120 fs/jfs/jfs_logmgr.c __SLEEP_COND(wq, cond, LCACHE_LOCK(flags), LCACHE_UNLOCK(flags)); \ wq 2787 fs/jfs/jfs_txnmgr.c DECLARE_WAITQUEUE(wq, current); wq 2789 fs/jfs/jfs_txnmgr.c add_wait_queue(&jfs_commit_thread_wait, &wq); wq 2793 fs/jfs/jfs_txnmgr.c remove_wait_queue(&jfs_commit_thread_wait, &wq); wq 1641 fs/namei.c DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); wq 1647 fs/namei.c dentry = d_alloc_parallel(dir, name, &wq); wq 3129 fs/namei.c DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); wq 3138 fs/namei.c dentry = d_alloc_parallel(dir, &nd->last, &wq); wq 62 fs/nfs/blocklayout/rpc_pipefs.c DECLARE_WAITQUEUE(wq, current); wq 87 fs/nfs/blocklayout/rpc_pipefs.c add_wait_queue(&nn->bl_wq, &wq); wq 90 fs/nfs/blocklayout/rpc_pipefs.c remove_wait_queue(&nn->bl_wq, &wq); wq 96 fs/nfs/blocklayout/rpc_pipefs.c remove_wait_queue(&nn->bl_wq, &wq); wq 111 fs/nfs/callback.c DEFINE_WAIT(wq); wq 120 fs/nfs/callback.c prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE); wq 127 fs/nfs/callback.c finish_wait(&serv->sv_cb_waitq, &wq); wq 136 fs/nfs/callback.c finish_wait(&serv->sv_cb_waitq, &wq); wq 460 fs/nfs/dir.c DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); wq 490 fs/nfs/dir.c dentry = d_alloc_parallel(parent, &filename, &wq); wq 1520 fs/nfs/dir.c DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); wq 1575 fs/nfs/dir.c &dentry->d_name, &wq); wq 2144 fs/nfs/inode.c struct workqueue_struct *wq; wq 2146 fs/nfs/inode.c wq = alloc_workqueue("nfsiod", WQ_MEM_RECLAIM, 0); wq 2147 fs/nfs/inode.c if (wq == NULL) wq 2149 fs/nfs/inode.c nfsiod_workqueue = wq; wq 2158 fs/nfs/inode.c struct workqueue_struct *wq; wq 2160 fs/nfs/inode.c wq = nfsiod_workqueue; wq 2161 fs/nfs/inode.c if (wq == NULL) wq 2164 fs/nfs/inode.c destroy_workqueue(wq); wq 123 fs/nfs/unlink.c alias = d_alloc_parallel(dentry->d_parent, &data->args.name, &data->wq); wq 183 fs/nfs/unlink.c init_waitqueue_head(&data->wq); wq 2153 fs/nilfs2/segment.c wait_queue_entry_t wq; wq 2165 fs/nilfs2/segment.c init_wait(&wait_req.wq); wq 2171 fs/nilfs2/segment.c init_waitqueue_entry(&wait_req.wq, current); wq 2172 fs/nilfs2/segment.c add_wait_queue(&sci->sc_wait_request, &wait_req.wq); wq 2188 fs/nilfs2/segment.c finish_wait(&sci->sc_wait_request, &wait_req.wq); wq 2198 fs/nilfs2/segment.c list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) { wq 2205 fs/nilfs2/segment.c wrq->wq.func(&wrq->wq, wq 49 fs/ocfs2/dlm/dlmcommon.h wait_queue_head_t wq; wq 315 fs/ocfs2/dlm/dlmcommon.h wait_queue_head_t wq; wq 79 fs/ocfs2/dlm/dlmconvert.c wake_up(&res->wq); wq 343 fs/ocfs2/dlm/dlmconvert.c wake_up(&res->wq); wq 530 fs/ocfs2/dlm/dlmconvert.c wake_up(&res->wq); wq 168 fs/ocfs2/dlm/dlmlock.c wake_up(&res->wq); wq 273 fs/ocfs2/dlm/dlmlock.c wake_up(&res->wq); wq 265 fs/ocfs2/dlm/dlmmaster.c init_waitqueue_head(&mle->wq); wq 541 fs/ocfs2/dlm/dlmmaster.c init_waitqueue_head(&res->wq); wq 655 fs/ocfs2/dlm/dlmmaster.c wake_up(&res->wq); wq 990 fs/ocfs2/dlm/dlmmaster.c wake_up(&res->wq); wq 1106 fs/ocfs2/dlm/dlmmaster.c (void)wait_event_timeout(mle->wq, wq 1747 fs/ocfs2/dlm/dlmmaster.c wake_up(&res->wq); wq 1926 fs/ocfs2/dlm/dlmmaster.c wake_up(&mle->wq); wq 1947 fs/ocfs2/dlm/dlmmaster.c wake_up(&res->wq); wq 2045 fs/ocfs2/dlm/dlmmaster.c wake_up(&res->wq); wq 2398 fs/ocfs2/dlm/dlmmaster.c wake_up(&res->wq); wq 2685 fs/ocfs2/dlm/dlmmaster.c ret = wait_event_interruptible_timeout(mle->wq, wq 2725 fs/ocfs2/dlm/dlmmaster.c wake_up(&res->wq); wq 2742 fs/ocfs2/dlm/dlmmaster.c wake_up(&res->wq); wq 3227 fs/ocfs2/dlm/dlmmaster.c wake_up(&tmp->wq); wq 3300 fs/ocfs2/dlm/dlmmaster.c wake_up(&mle->wq); wq 3324 fs/ocfs2/dlm/dlmmaster.c wake_up(&mle->wq); wq 3476 fs/ocfs2/dlm/dlmmaster.c wake_up(&res->wq); wq 3529 fs/ocfs2/dlm/dlmmaster.c wake_up(&res->wq); wq 3560 fs/ocfs2/dlm/dlmmaster.c wake_up(&mle->wq); wq 1490 fs/ocfs2/dlm/dlmrecovery.c wake_up(&res->wq); wq 2160 fs/ocfs2/dlm/dlmrecovery.c wake_up(&res->wq); wq 2176 fs/ocfs2/dlm/dlmrecovery.c wake_up(&res->wq); wq 2202 fs/ocfs2/dlm/dlmrecovery.c wake_up(&res->wq); wq 2384 fs/ocfs2/dlm/dlmrecovery.c wake_up(&res->wq); wq 2406 fs/ocfs2/dlm/dlmrecovery.c wake_up(&res->wq); wq 52 fs/ocfs2/dlm/dlmthread.c add_wait_queue(&res->wq, &wait); wq 61 fs/ocfs2/dlm/dlmthread.c remove_wait_queue(&res->wq, &wait); wq 275 fs/ocfs2/dlm/dlmthread.c wake_up(&res->wq); wq 234 fs/ocfs2/dlm/dlmunlock.c wake_up(&res->wq); wq 1894 fs/proc/base.c DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); wq 1895 fs/proc/base.c child = d_alloc_parallel(dir, &qname, &wq); wq 705 fs/proc/proc_sysctl.c DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); wq 706 fs/proc/proc_sysctl.c child = d_alloc_parallel(dir, &qname, &wq); wq 96 fs/userfaultfd.c wait_queue_entry_t wq; wq 106 fs/userfaultfd.c static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode, wq 114 fs/userfaultfd.c uwq = container_of(wq, struct userfaultfd_wait_queue, wq); wq 127 fs/userfaultfd.c ret = wake_up_state(wq->private, mode); wq 140 fs/userfaultfd.c list_del_init(&wq->entry); wq 458 fs/userfaultfd.c init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function); wq 459 fs/userfaultfd.c uwq.wq.private = current; wq 476 fs/userfaultfd.c __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq); wq 564 fs/userfaultfd.c if (!list_empty_careful(&uwq.wq.entry)) { wq 570 fs/userfaultfd.c list_del(&uwq.wq.entry); wq 593 fs/userfaultfd.c init_waitqueue_entry(&ewq->wq, current); wq 601 fs/userfaultfd.c __add_wait_queue(&ctx->event_wqh, &ewq->wq); wq 614 fs/userfaultfd.c __remove_wait_queue(&ctx->event_wqh, &ewq->wq); wq 668 fs/userfaultfd.c __remove_wait_queue(&ctx->event_wqh, &ewq->wq); wq 949 fs/userfaultfd.c wait_queue_entry_t *wq; wq 958 fs/userfaultfd.c wq = list_last_entry(&wqh->head, typeof(*wq), entry); wq 959 fs/userfaultfd.c uwq = container_of(wq, struct userfaultfd_wait_queue, wq); wq 1089 fs/userfaultfd.c list_del(&uwq->wq.entry); wq 1090 fs/userfaultfd.c add_wait_queue(&ctx->fault_wqh, &uwq->wq); wq 1111 fs/userfaultfd.c list_move(&uwq->wq.entry, &fork_event); wq 1158 fs/userfaultfd.c wq.entry); wq 1169 fs/userfaultfd.c list_del(&uwq->wq.entry); wq 1170 fs/userfaultfd.c __add_wait_queue(&ctx->event_wqh, &uwq->wq); wq 1898 fs/userfaultfd.c wait_queue_entry_t *wq; wq 1902 fs/userfaultfd.c list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) { wq 1906 fs/userfaultfd.c list_for_each_entry(wq, &ctx->fault_wqh.head, entry) { wq 261 fs/xfs/xfs_icache.c wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT); wq 265 fs/xfs/xfs_icache.c prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); wq 270 fs/xfs/xfs_icache.c finish_wait(wq, &wait.wq_entry); wq 599 fs/xfs/xfs_inode.c wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT); wq 603 fs/xfs/xfs_inode.c prepare_to_wait_exclusive(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); wq 608 fs/xfs/xfs_inode.c finish_wait(wq, &wait.wq_entry); wq 2809 fs/xfs/xfs_inode.c wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT); wq 2815 fs/xfs/xfs_inode.c prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); wq 2819 fs/xfs/xfs_inode.c finish_wait(wq, &wait.wq_entry); wq 545 fs/xfs/xfs_log_priv.h static inline void xlog_wait(wait_queue_head_t *wq, spinlock_t *lock) wq 549 fs/xfs/xfs_log_priv.h add_wait_queue_exclusive(wq, &wait); wq 553 fs/xfs/xfs_log_priv.h remove_wait_queue(wq, &wait); wq 73 fs/xfs/xfs_pwork.c pctl->wq = alloc_workqueue("%s-%d", WQ_FREEZABLE, nr_threads, tag, wq 75 fs/xfs/xfs_pwork.c if (!pctl->wq) wq 95 fs/xfs/xfs_pwork.c queue_work(pctl->wq, &pwork->work); wq 103 fs/xfs/xfs_pwork.c destroy_workqueue(pctl->wq); wq 104 fs/xfs/xfs_pwork.c pctl->wq = NULL; wq 18 fs/xfs/xfs_pwork.h struct workqueue_struct *wq; wq 60 include/drm/drm_debugfs_crc.h wait_queue_head_t wq; wq 87 include/drm/drm_flip_work.h struct workqueue_struct *wq); wq 494 include/drm/ttm/ttm_bo_driver.h struct delayed_work wq; wq 251 include/linux/freezer.h #define wait_event_freezekillable_unsafe(wq, condition) \ wq 255 include/linux/freezer.h __retval = wait_event_killable(wq, (condition)); \ wq 297 include/linux/freezer.h #define wait_event_freezekillable_unsafe(wq, condition) \ wq 298 include/linux/freezer.h wait_event_killable(wq, condition) wq 58 include/linux/greybus/connection.h struct workqueue_struct *wq; wq 46 include/linux/greybus/svc.h struct workqueue_struct *wq; wq 92 include/linux/hmm.h wait_queue_head_t wq; wq 184 include/linux/hmm.h return wait_event_timeout(range->hmm->wq, range->valid, wq 495 include/linux/i3c/master.h struct workqueue_struct *wq; wq 282 include/linux/kvm_host.h struct swait_queue_head wq; wq 948 include/linux/kvm_host.h return &vcpu->wq; wq 298 include/linux/mlx5/driver.h struct workqueue_struct *wq; wq 442 include/linux/mlx5/driver.h struct workqueue_struct *wq; wq 491 include/linux/mlx5/driver.h struct workqueue_struct *wq; wq 3067 include/linux/mlx5/mlx5_ifc.h struct mlx5_ifc_wq_bits wq; wq 3158 include/linux/mlx5/mlx5_ifc.h struct mlx5_ifc_wq_bits wq; wq 3176 include/linux/mlx5/mlx5_ifc.h struct mlx5_ifc_wq_bits wq; wq 3643 include/linux/mlx5/mlx5_ifc.h struct mlx5_ifc_wq_bits wq; wq 416 include/linux/mmc/host.h wait_queue_head_t wq; wq 77 include/linux/mtd/flashchip.h wait_queue_head_t wq; /* Wait on here when we're waiting for the chip wq 125 include/linux/mtd/onenand.h wait_queue_head_t wq; wq 123 include/linux/net.h struct socket_wq wq; wq 1599 include/linux/nfs_xdr.h wait_queue_head_t wq; wq 68 include/linux/power/charger-manager.h struct work_struct wq; wq 222 include/linux/soc/qcom/qmi.h struct workqueue_struct *wq; wq 252 include/linux/sunrpc/sched.h struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq, wq 134 include/linux/swait.h static inline int swait_active(struct swait_queue_head *wq) wq 136 include/linux/swait.h return !list_empty(&wq->task_list); wq 147 include/linux/swait.h static inline bool swq_has_sleeper(struct swait_queue_head *wq) wq 157 include/linux/swait.h return swait_active(wq); wq 171 include/linux/swait.h #define ___swait_event(wq, condition, state, ret, cmd) \ wq 179 include/linux/swait.h long __int = prepare_to_swait_event(&wq, &__wait, state);\ wq 191 include/linux/swait.h finish_swait(&wq, &__wait); \ wq 195 include/linux/swait.h #define __swait_event(wq, condition) \ wq 196 include/linux/swait.h (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ wq 199 include/linux/swait.h #define swait_event_exclusive(wq, condition) \ wq 203 include/linux/swait.h __swait_event(wq, condition); \ wq 206 include/linux/swait.h #define __swait_event_timeout(wq, condition, timeout) \ wq 207 include/linux/swait.h ___swait_event(wq, ___wait_cond_timeout(condition), \ wq 211 include/linux/swait.h #define swait_event_timeout_exclusive(wq, condition, timeout) \ wq 215 include/linux/swait.h __ret = __swait_event_timeout(wq, condition, timeout); \ wq 219 include/linux/swait.h #define __swait_event_interruptible(wq, condition) \ wq 220 include/linux/swait.h ___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \ wq 223 include/linux/swait.h #define swait_event_interruptible_exclusive(wq, condition) \ wq 227 include/linux/swait.h __ret = __swait_event_interruptible(wq, condition); \ wq 231 include/linux/swait.h #define __swait_event_interruptible_timeout(wq, condition, timeout) \ wq 232 include/linux/swait.h ___swait_event(wq, ___wait_cond_timeout(condition), \ wq 236 include/linux/swait.h #define swait_event_interruptible_timeout_exclusive(wq, condition, timeout)\ wq 240 include/linux/swait.h __ret = __swait_event_interruptible_timeout(wq, \ wq 245 include/linux/swait.h #define __swait_event_idle(wq, condition) \ wq 246 include/linux/swait.h (void)___swait_event(wq, condition, TASK_IDLE, 0, schedule()) wq 260 include/linux/swait.h #define swait_event_idle_exclusive(wq, condition) \ wq 264 include/linux/swait.h __swait_event_idle(wq, condition); \ wq 267 include/linux/swait.h #define __swait_event_idle_timeout(wq, condition, timeout) \ wq 268 include/linux/swait.h ___swait_event(wq, ___wait_cond_timeout(condition), \ wq 291 include/linux/swait.h #define swait_event_idle_timeout_exclusive(wq, condition, timeout) \ wq 295 include/linux/swait.h __ret = __swait_event_idle_timeout(wq, \ wq 49 include/linux/sync_file.h wait_queue_head_t wq; wq 77 include/linux/thunderbolt.h struct workqueue_struct *wq; wq 565 include/linux/wait.h #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \ wq 570 include/linux/wait.h __ret = __wait_event_hrtimeout(wq, condition, timeout, \ wq 575 include/linux/wait.h #define __wait_event_interruptible_exclusive(wq, condition) \ wq 576 include/linux/wait.h ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \ wq 579 include/linux/wait.h #define wait_event_interruptible_exclusive(wq, condition) \ wq 584 include/linux/wait.h __ret = __wait_event_interruptible_exclusive(wq, condition); \ wq 588 include/linux/wait.h #define __wait_event_killable_exclusive(wq, condition) \ wq 589 include/linux/wait.h ___wait_event(wq, condition, TASK_KILLABLE, 1, 0, \ wq 592 include/linux/wait.h #define wait_event_killable_exclusive(wq, condition) \ wq 597 include/linux/wait.h __ret = __wait_event_killable_exclusive(wq, condition); \ wq 602 include/linux/wait.h #define __wait_event_freezable_exclusive(wq, condition) \ wq 603 include/linux/wait.h ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \ wq 606 include/linux/wait.h #define wait_event_freezable_exclusive(wq, condition) \ wq 611 include/linux/wait.h __ret = __wait_event_freezable_exclusive(wq, condition); \ wq 732 include/linux/wait.h #define __wait_event_interruptible_locked(wq, condition, exclusive, fn) \ wq 739 include/linux/wait.h __ret = fn(&(wq), &__wait); \ wq 743 include/linux/wait.h __remove_wait_queue(&(wq), &__wait); \ wq 772 include/linux/wait.h #define wait_event_interruptible_locked(wq, condition) \ wq 774 include/linux/wait.h ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr)) wq 799 include/linux/wait.h #define wait_event_interruptible_locked_irq(wq, condition) \ wq 801 include/linux/wait.h ? 0 : __wait_event_interruptible_locked(wq, condition, 0, do_wait_intr_irq)) wq 830 include/linux/wait.h #define wait_event_interruptible_exclusive_locked(wq, condition) \ wq 832 include/linux/wait.h ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr)) wq 861 include/linux/wait.h #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \ wq 863 include/linux/wait.h ? 0 : __wait_event_interruptible_locked(wq, condition, 1, do_wait_intr_irq)) wq 866 include/linux/wait.h #define __wait_event_killable(wq, condition) \ wq 867 include/linux/wait.h ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule()) wq 120 include/linux/workqueue.h struct workqueue_struct *wq; wq 129 include/linux/workqueue.h struct workqueue_struct *wq; wq 436 include/linux/workqueue.h extern void destroy_workqueue(struct workqueue_struct *wq); wq 440 include/linux/workqueue.h int apply_workqueue_attrs(struct workqueue_struct *wq, wq 444 include/linux/workqueue.h extern bool queue_work_on(int cpu, struct workqueue_struct *wq, wq 446 include/linux/workqueue.h extern bool queue_work_node(int node, struct workqueue_struct *wq, wq 448 include/linux/workqueue.h extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, wq 450 include/linux/workqueue.h extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, wq 452 include/linux/workqueue.h extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork); wq 454 include/linux/workqueue.h extern void flush_workqueue(struct workqueue_struct *wq); wq 455 include/linux/workqueue.h extern void drain_workqueue(struct workqueue_struct *wq); wq 470 include/linux/workqueue.h extern void workqueue_set_max_active(struct workqueue_struct *wq, wq 474 include/linux/workqueue.h extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); wq 491 include/linux/workqueue.h static inline bool queue_work(struct workqueue_struct *wq, wq 494 include/linux/workqueue.h return queue_work_on(WORK_CPU_UNBOUND, wq, work); wq 505 include/linux/workqueue.h static inline bool queue_delayed_work(struct workqueue_struct *wq, wq 509 include/linux/workqueue.h return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); wq 520 include/linux/workqueue.h static inline bool mod_delayed_work(struct workqueue_struct *wq, wq 524 include/linux/workqueue.h return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); wq 634 include/linux/workqueue.h int workqueue_sysfs_register(struct workqueue_struct *wq); wq 636 include/linux/workqueue.h static inline int workqueue_sysfs_register(struct workqueue_struct *wq) wq 83 include/net/9p/client.h wait_queue_head_t wq; wq 228 include/net/bonding.h struct workqueue_struct *wq; wq 165 include/net/caif/caif_hsi.h struct workqueue_struct *wq; wq 115 include/net/caif/caif_spi.h struct workqueue_struct *wq; wq 98 include/net/sock.h wait_queue_head_t wq; wq 1511 include/net/sock.h init_waitqueue_head(&sk->sk_lock.wq); \ wq 1835 include/net/sock.h rcu_assign_pointer(sk->sk_wq, &parent->wq); wq 2096 include/net/sock.h static inline bool skwq_has_sleeper(struct socket_wq *wq) wq 2098 include/net/sock.h return wq && wq_has_sleeper(&wq->wait); wq 2113 include/net/sock.h poll_wait(filp, &sock->wq.wait, p); wq 724 include/rdma/ib_verbs.h struct ib_wq *wq; wq 2455 include/rdma/ib_verbs.h void (*destroy_wq)(struct ib_wq *wq, struct ib_udata *udata); wq 2456 include/rdma/ib_verbs.h int (*modify_wq)(struct ib_wq *wq, struct ib_wq_attr *attr, wq 4325 include/rdma/ib_verbs.h int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata); wq 4326 include/rdma/ib_verbs.h int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *attr, wq 219 include/rdma/rdmavt_qp.h struct rvt_rwqe wq[]; wq 273 include/rdma/rdmavt_qp.h struct rvt_rwq *wq; wq 972 include/rdma/rdmavt_qp.h vfree(rq->wq); wq 973 include/rdma/rdmavt_qp.h rq->wq = NULL; wq 482 include/soc/fsl/qman.h static inline void qm_fqd_set_destwq(struct qm_fqd *fqd, int ch, int wq) wq 485 include/soc/fsl/qman.h (wq & QM_FQD_WQ_MASK)); wq 1369 include/trace/events/btrfs.h __field( const void *, wq ) wq 1378 include/trace/events/btrfs.h __entry->wq = work->wq; wq 1387 include/trace/events/btrfs.h __entry->work, __entry->normal_work, __entry->wq, wq 1443 include/trace/events/btrfs.h TP_PROTO(const struct __btrfs_workqueue *wq, wq 1446 include/trace/events/btrfs.h TP_ARGS(wq, name, high), wq 1449 include/trace/events/btrfs.h __field( const void *, wq ) wq 1454 include/trace/events/btrfs.h TP_fast_assign_btrfs(btrfs_workqueue_owner(wq), wq 1455 include/trace/events/btrfs.h __entry->wq = wq; wq 1463 include/trace/events/btrfs.h __entry->wq) wq 1468 include/trace/events/btrfs.h TP_PROTO(const struct __btrfs_workqueue *wq, wq 1471 include/trace/events/btrfs.h TP_ARGS(wq, name, high) wq 1476 include/trace/events/btrfs.h TP_PROTO(const struct __btrfs_workqueue *wq), wq 1478 include/trace/events/btrfs.h TP_ARGS(wq), wq 1481 include/trace/events/btrfs.h __field( const void *, wq ) wq 1484 include/trace/events/btrfs.h TP_fast_assign_btrfs(btrfs_workqueue_owner(wq), wq 1485 include/trace/events/btrfs.h __entry->wq = wq; wq 1488 include/trace/events/btrfs.h TP_printk_btrfs("wq=%p", __entry->wq) wq 1493 include/trace/events/btrfs.h TP_PROTO(const struct __btrfs_workqueue *wq), wq 1495 include/trace/events/btrfs.h TP_ARGS(wq) wq 58 include/trace/events/workqueue.h __entry->workqueue = pwq->wq; wq 64 include/uapi/rdma/rvt-abi.h struct rvt_rwqe wq[]; wq 99 kernel/events/uprobes.c wait_queue_head_t wq; /* if all slots are busy */ wq 1512 kernel/events/uprobes.c init_waitqueue_head(&area->wq); wq 1604 kernel/events/uprobes.c wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE)); wq 1668 kernel/events/uprobes.c if (waitqueue_active(&area->wq)) wq 1669 kernel/events/uprobes.c wake_up(&area->wq); wq 17 kernel/locking/test-ww_mutex.c struct workqueue_struct *wq; wq 305 kernel/locking/test-ww_mutex.c queue_work(wq, &cycles[n].work); wq 307 kernel/locking/test-ww_mutex.c flush_workqueue(wq); wq 568 kernel/locking/test-ww_mutex.c queue_work(wq, &stress->work); wq 572 kernel/locking/test-ww_mutex.c flush_workqueue(wq); wq 586 kernel/locking/test-ww_mutex.c wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0); wq 587 kernel/locking/test-ww_mutex.c if (!wq) wq 627 kernel/locking/test-ww_mutex.c destroy_workqueue(wq); wq 301 kernel/sched/wait.c int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait) wq 304 kernel/sched/wait.c __add_wait_queue_entry_tail(wq, wait); wq 310 kernel/sched/wait.c spin_unlock(&wq->lock); wq 312 kernel/sched/wait.c spin_lock(&wq->lock); wq 318 kernel/sched/wait.c int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait) wq 321 kernel/sched/wait.c __add_wait_queue_entry_tail(wq, wait); wq 327 kernel/sched/wait.c spin_unlock_irq(&wq->lock); wq 329 kernel/sched/wait.c spin_lock_irq(&wq->lock); wq 201 kernel/workqueue.c struct workqueue_struct *wq; /* I: the owning workqueue */ wq 357 kernel/workqueue.c static void workqueue_sysfs_unregister(struct workqueue_struct *wq); wq 367 kernel/workqueue.c #define assert_rcu_or_wq_mutex(wq) \ wq 369 kernel/workqueue.c !lockdep_is_held(&wq->mutex), \ wq 372 kernel/workqueue.c #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ wq 374 kernel/workqueue.c !lockdep_is_held(&wq->mutex) && \ wq 427 kernel/workqueue.c #define for_each_pwq(pwq, wq) \ wq 428 kernel/workqueue.c list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node, \ wq 429 kernel/workqueue.c lockdep_is_held(&wq->mutex)) \ wq 430 kernel/workqueue.c if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \ wq 564 kernel/workqueue.c static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq, wq 567 kernel/workqueue.c assert_rcu_or_wq_mutex_or_pool_mutex(wq); wq 576 kernel/workqueue.c return wq->dfl_pwq; wq 578 kernel/workqueue.c return rcu_dereference_raw(wq->numa_pwq_tbl[node]); wq 1109 kernel/workqueue.c if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND))) wq 1202 kernel/workqueue.c if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) wq 1203 kernel/workqueue.c complete(&pwq->wq->first_flusher->done); wq 1350 kernel/workqueue.c static bool is_chained_work(struct workqueue_struct *wq) wq 1359 kernel/workqueue.c return worker && worker->current_pwq->wq == wq; wq 1395 kernel/workqueue.c static void __queue_work(int cpu, struct workqueue_struct *wq, wq 1415 kernel/workqueue.c if (unlikely(wq->flags & __WQ_DRAINING) && wq 1416 kernel/workqueue.c WARN_ON_ONCE(!is_chained_work(wq))) wq 1421 kernel/workqueue.c if (wq->flags & WQ_UNBOUND) { wq 1424 kernel/workqueue.c pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); wq 1428 kernel/workqueue.c pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); wq 1444 kernel/workqueue.c if (worker && worker->current_pwq->wq == wq) { wq 1464 kernel/workqueue.c if (wq->flags & WQ_UNBOUND) { wq 1471 kernel/workqueue.c wq->name, cpu); wq 1512 kernel/workqueue.c bool queue_work_on(int cpu, struct workqueue_struct *wq, wq 1521 kernel/workqueue.c __queue_work(cpu, wq, work); wq 1583 kernel/workqueue.c bool queue_work_node(int node, struct workqueue_struct *wq, wq 1598 kernel/workqueue.c WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)); wq 1605 kernel/workqueue.c __queue_work(cpu, wq, work); wq 1619 kernel/workqueue.c __queue_work(dwork->cpu, dwork->wq, &dwork->work); wq 1623 kernel/workqueue.c static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, wq 1629 kernel/workqueue.c WARN_ON_ONCE(!wq); wq 1641 kernel/workqueue.c __queue_work(cpu, wq, &dwork->work); wq 1645 kernel/workqueue.c dwork->wq = wq; wq 1666 kernel/workqueue.c bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, wq 1677 kernel/workqueue.c __queue_delayed_work(cpu, wq, dwork, delay); wq 1704 kernel/workqueue.c bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, wq 1715 kernel/workqueue.c __queue_delayed_work(cpu, wq, dwork, delay); wq 1730 kernel/workqueue.c __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work); wq 1744 kernel/workqueue.c bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork) wq 1749 kernel/workqueue.c rwork->wq = wq; wq 2017 kernel/workqueue.c struct workqueue_struct *wq = pwq->wq; wq 2021 kernel/workqueue.c if (!wq->rescuer) wq 2032 kernel/workqueue.c list_add_tail(&pwq->mayday_node, &wq->maydays); wq 2033 kernel/workqueue.c wake_up_process(wq->rescuer->task); wq 2171 kernel/workqueue.c bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE; wq 2214 kernel/workqueue.c strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN); wq 2247 kernel/workqueue.c lock_map_acquire(&pwq->wq->lockdep_map); wq 2279 kernel/workqueue.c lock_map_release(&pwq->wq->lockdep_map); wq 2467 kernel/workqueue.c struct workqueue_struct *wq = rescuer->rescue_wq; wq 2494 kernel/workqueue.c while (!list_empty(&wq->maydays)) { wq 2495 kernel/workqueue.c struct pool_workqueue *pwq = list_first_entry(&wq->maydays, wq 2542 kernel/workqueue.c if (wq->rescuer && list_empty(&pwq->mayday_node)) { wq 2544 kernel/workqueue.c list_add_tail(&pwq->mayday_node, &wq->maydays); wq 2610 kernel/workqueue.c WARN_ONCE(worker && ((worker->current_pwq->wq->flags & wq 2613 kernel/workqueue.c worker->current_pwq->wq->name, worker->current_func, wq 2724 kernel/workqueue.c static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, wq 2731 kernel/workqueue.c WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush)); wq 2732 kernel/workqueue.c atomic_set(&wq->nr_pwqs_to_flush, 1); wq 2735 kernel/workqueue.c for_each_pwq(pwq, wq) { wq 2745 kernel/workqueue.c atomic_inc(&wq->nr_pwqs_to_flush); wq 2758 kernel/workqueue.c if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) wq 2759 kernel/workqueue.c complete(&wq->first_flusher->done); wq 2771 kernel/workqueue.c void flush_workqueue(struct workqueue_struct *wq) wq 2776 kernel/workqueue.c .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map), wq 2783 kernel/workqueue.c lock_map_acquire(&wq->lockdep_map); wq 2784 kernel/workqueue.c lock_map_release(&wq->lockdep_map); wq 2786 kernel/workqueue.c mutex_lock(&wq->mutex); wq 2791 kernel/workqueue.c next_color = work_next_color(wq->work_color); wq 2793 kernel/workqueue.c if (next_color != wq->flush_color) { wq 2799 kernel/workqueue.c WARN_ON_ONCE(!list_empty(&wq->flusher_overflow)); wq 2800 kernel/workqueue.c this_flusher.flush_color = wq->work_color; wq 2801 kernel/workqueue.c wq->work_color = next_color; wq 2803 kernel/workqueue.c if (!wq->first_flusher) { wq 2805 kernel/workqueue.c WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); wq 2807 kernel/workqueue.c wq->first_flusher = &this_flusher; wq 2809 kernel/workqueue.c if (!flush_workqueue_prep_pwqs(wq, wq->flush_color, wq 2810 kernel/workqueue.c wq->work_color)) { wq 2812 kernel/workqueue.c wq->flush_color = next_color; wq 2813 kernel/workqueue.c wq->first_flusher = NULL; wq 2818 kernel/workqueue.c WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color); wq 2819 kernel/workqueue.c list_add_tail(&this_flusher.list, &wq->flusher_queue); wq 2820 kernel/workqueue.c flush_workqueue_prep_pwqs(wq, -1, wq->work_color); wq 2828 kernel/workqueue.c list_add_tail(&this_flusher.list, &wq->flusher_overflow); wq 2831 kernel/workqueue.c check_flush_dependency(wq, NULL); wq 2833 kernel/workqueue.c mutex_unlock(&wq->mutex); wq 2843 kernel/workqueue.c if (wq->first_flusher != &this_flusher) wq 2846 kernel/workqueue.c mutex_lock(&wq->mutex); wq 2849 kernel/workqueue.c if (wq->first_flusher != &this_flusher) wq 2852 kernel/workqueue.c wq->first_flusher = NULL; wq 2855 kernel/workqueue.c WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); wq 2861 kernel/workqueue.c list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { wq 2862 kernel/workqueue.c if (next->flush_color != wq->flush_color) wq 2868 kernel/workqueue.c WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) && wq 2869 kernel/workqueue.c wq->flush_color != work_next_color(wq->work_color)); wq 2872 kernel/workqueue.c wq->flush_color = work_next_color(wq->flush_color); wq 2875 kernel/workqueue.c if (!list_empty(&wq->flusher_overflow)) { wq 2882 kernel/workqueue.c list_for_each_entry(tmp, &wq->flusher_overflow, list) wq 2883 kernel/workqueue.c tmp->flush_color = wq->work_color; wq 2885 kernel/workqueue.c wq->work_color = work_next_color(wq->work_color); wq 2887 kernel/workqueue.c list_splice_tail_init(&wq->flusher_overflow, wq 2888 kernel/workqueue.c &wq->flusher_queue); wq 2889 kernel/workqueue.c flush_workqueue_prep_pwqs(wq, -1, wq->work_color); wq 2892 kernel/workqueue.c if (list_empty(&wq->flusher_queue)) { wq 2893 kernel/workqueue.c WARN_ON_ONCE(wq->flush_color != wq->work_color); wq 2901 kernel/workqueue.c WARN_ON_ONCE(wq->flush_color == wq->work_color); wq 2902 kernel/workqueue.c WARN_ON_ONCE(wq->flush_color != next->flush_color); wq 2905 kernel/workqueue.c wq->first_flusher = next; wq 2907 kernel/workqueue.c if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1)) wq 2914 kernel/workqueue.c wq->first_flusher = NULL; wq 2918 kernel/workqueue.c mutex_unlock(&wq->mutex); wq 2933 kernel/workqueue.c void drain_workqueue(struct workqueue_struct *wq) wq 2943 kernel/workqueue.c mutex_lock(&wq->mutex); wq 2944 kernel/workqueue.c if (!wq->nr_drainers++) wq 2945 kernel/workqueue.c wq->flags |= __WQ_DRAINING; wq 2946 kernel/workqueue.c mutex_unlock(&wq->mutex); wq 2948 kernel/workqueue.c flush_workqueue(wq); wq 2950 kernel/workqueue.c mutex_lock(&wq->mutex); wq 2952 kernel/workqueue.c for_each_pwq(pwq, wq) { wq 2965 kernel/workqueue.c wq->name, flush_cnt); wq 2967 kernel/workqueue.c mutex_unlock(&wq->mutex); wq 2971 kernel/workqueue.c if (!--wq->nr_drainers) wq 2972 kernel/workqueue.c wq->flags &= ~__WQ_DRAINING; wq 2973 kernel/workqueue.c mutex_unlock(&wq->mutex); wq 3006 kernel/workqueue.c check_flush_dependency(pwq->wq, work); wq 3021 kernel/workqueue.c (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) { wq 3022 kernel/workqueue.c lock_map_acquire(&pwq->wq->lockdep_map); wq 3023 kernel/workqueue.c lock_map_release(&pwq->wq->lockdep_map); wq 3192 kernel/workqueue.c __queue_work(dwork->cpu, dwork->wq, &dwork->work); wq 3449 kernel/workqueue.c static void wq_init_lockdep(struct workqueue_struct *wq) wq 3453 kernel/workqueue.c lockdep_register_key(&wq->key); wq 3454 kernel/workqueue.c lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name); wq 3456 kernel/workqueue.c lock_name = wq->name; wq 3458 kernel/workqueue.c wq->lock_name = lock_name; wq 3459 kernel/workqueue.c lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0); wq 3462 kernel/workqueue.c static void wq_unregister_lockdep(struct workqueue_struct *wq) wq 3464 kernel/workqueue.c lockdep_unregister_key(&wq->key); wq 3467 kernel/workqueue.c static void wq_free_lockdep(struct workqueue_struct *wq) wq 3469 kernel/workqueue.c if (wq->lock_name != wq->name) wq 3470 kernel/workqueue.c kfree(wq->lock_name); wq 3473 kernel/workqueue.c static void wq_init_lockdep(struct workqueue_struct *wq) wq 3477 kernel/workqueue.c static void wq_unregister_lockdep(struct workqueue_struct *wq) wq 3481 kernel/workqueue.c static void wq_free_lockdep(struct workqueue_struct *wq) wq 3488 kernel/workqueue.c struct workqueue_struct *wq = wq 3491 kernel/workqueue.c wq_free_lockdep(wq); wq 3493 kernel/workqueue.c if (!(wq->flags & WQ_UNBOUND)) wq 3494 kernel/workqueue.c free_percpu(wq->cpu_pwqs); wq 3496 kernel/workqueue.c free_workqueue_attrs(wq->unbound_attrs); wq 3498 kernel/workqueue.c kfree(wq->rescuer); wq 3499 kernel/workqueue.c kfree(wq); wq 3661 kernel/workqueue.c struct workqueue_struct *wq = pwq->wq; wq 3665 kernel/workqueue.c if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND))) wq 3668 kernel/workqueue.c mutex_lock(&wq->mutex); wq 3670 kernel/workqueue.c is_last = list_empty(&wq->pwqs); wq 3671 kernel/workqueue.c mutex_unlock(&wq->mutex); wq 3684 kernel/workqueue.c wq_unregister_lockdep(wq); wq 3685 kernel/workqueue.c call_rcu(&wq->rcu, rcu_free_wq); wq 3699 kernel/workqueue.c struct workqueue_struct *wq = pwq->wq; wq 3700 kernel/workqueue.c bool freezable = wq->flags & WQ_FREEZABLE; wq 3704 kernel/workqueue.c lockdep_assert_held(&wq->mutex); wq 3707 kernel/workqueue.c if (!freezable && pwq->max_active == wq->saved_max_active) wq 3719 kernel/workqueue.c pwq->max_active = wq->saved_max_active; wq 3738 kernel/workqueue.c static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, wq 3746 kernel/workqueue.c pwq->wq = wq; wq 3758 kernel/workqueue.c struct workqueue_struct *wq = pwq->wq; wq 3760 kernel/workqueue.c lockdep_assert_held(&wq->mutex); wq 3767 kernel/workqueue.c pwq->work_color = wq->work_color; wq 3773 kernel/workqueue.c list_add_rcu(&pwq->pwqs_node, &wq->pwqs); wq 3777 kernel/workqueue.c static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq, wq 3795 kernel/workqueue.c init_pwq(pwq, wq, pool); wq 3852 kernel/workqueue.c static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq, wq 3859 kernel/workqueue.c lockdep_assert_held(&wq->mutex); wq 3864 kernel/workqueue.c old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]); wq 3865 kernel/workqueue.c rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq); wq 3871 kernel/workqueue.c struct workqueue_struct *wq; /* target workqueue */ wq 3896 kernel/workqueue.c apply_wqattrs_prepare(struct workqueue_struct *wq, wq 3934 kernel/workqueue.c ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs); wq 3940 kernel/workqueue.c ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs); wq 3954 kernel/workqueue.c ctx->wq = wq; wq 3971 kernel/workqueue.c mutex_lock(&ctx->wq->mutex); wq 3973 kernel/workqueue.c copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs); wq 3977 kernel/workqueue.c ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node, wq 3982 kernel/workqueue.c swap(ctx->wq->dfl_pwq, ctx->dfl_pwq); wq 3984 kernel/workqueue.c mutex_unlock(&ctx->wq->mutex); wq 4000 kernel/workqueue.c static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, wq 4006 kernel/workqueue.c if (WARN_ON(!(wq->flags & WQ_UNBOUND))) wq 4010 kernel/workqueue.c if (!list_empty(&wq->pwqs)) { wq 4011 kernel/workqueue.c if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) wq 4014 kernel/workqueue.c wq->flags &= ~__WQ_ORDERED; wq 4017 kernel/workqueue.c ctx = apply_wqattrs_prepare(wq, attrs); wq 4046 kernel/workqueue.c int apply_workqueue_attrs(struct workqueue_struct *wq, wq 4054 kernel/workqueue.c ret = apply_workqueue_attrs_locked(wq, attrs); wq 4082 kernel/workqueue.c static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu, wq 4093 kernel/workqueue.c if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND) || wq 4094 kernel/workqueue.c wq->unbound_attrs->no_numa) wq 4105 kernel/workqueue.c copy_workqueue_attrs(target_attrs, wq->unbound_attrs); wq 4106 kernel/workqueue.c pwq = unbound_pwq_by_node(wq, node); wq 4114 kernel/workqueue.c if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) { wq 4122 kernel/workqueue.c pwq = alloc_unbound_pwq(wq, target_attrs); wq 4125 kernel/workqueue.c wq->name); wq 4130 kernel/workqueue.c mutex_lock(&wq->mutex); wq 4131 kernel/workqueue.c old_pwq = numa_pwq_tbl_install(wq, node, pwq); wq 4135 kernel/workqueue.c mutex_lock(&wq->mutex); wq 4136 kernel/workqueue.c spin_lock_irq(&wq->dfl_pwq->pool->lock); wq 4137 kernel/workqueue.c get_pwq(wq->dfl_pwq); wq 4138 kernel/workqueue.c spin_unlock_irq(&wq->dfl_pwq->pool->lock); wq 4139 kernel/workqueue.c old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq); wq 4141 kernel/workqueue.c mutex_unlock(&wq->mutex); wq 4145 kernel/workqueue.c static int alloc_and_link_pwqs(struct workqueue_struct *wq) wq 4147 kernel/workqueue.c bool highpri = wq->flags & WQ_HIGHPRI; wq 4150 kernel/workqueue.c if (!(wq->flags & WQ_UNBOUND)) { wq 4151 kernel/workqueue.c wq->cpu_pwqs = alloc_percpu(struct pool_workqueue); wq 4152 kernel/workqueue.c if (!wq->cpu_pwqs) wq 4157 kernel/workqueue.c per_cpu_ptr(wq->cpu_pwqs, cpu); wq 4161 kernel/workqueue.c init_pwq(pwq, wq, &cpu_pools[highpri]); wq 4163 kernel/workqueue.c mutex_lock(&wq->mutex); wq 4165 kernel/workqueue.c mutex_unlock(&wq->mutex); wq 4171 kernel/workqueue.c if (wq->flags & __WQ_ORDERED) { wq 4172 kernel/workqueue.c ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]); wq 4174 kernel/workqueue.c WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node || wq 4175 kernel/workqueue.c wq->pwqs.prev != &wq->dfl_pwq->pwqs_node), wq 4176 kernel/workqueue.c "ordering guarantee broken for workqueue %s\n", wq->name); wq 4178 kernel/workqueue.c ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); wq 4201 kernel/workqueue.c static int init_rescuer(struct workqueue_struct *wq) wq 4206 kernel/workqueue.c if (!(wq->flags & WQ_MEM_RECLAIM)) wq 4213 kernel/workqueue.c rescuer->rescue_wq = wq; wq 4214 kernel/workqueue.c rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", wq->name); wq 4221 kernel/workqueue.c wq->rescuer = rescuer; wq 4235 kernel/workqueue.c struct workqueue_struct *wq; wq 4254 kernel/workqueue.c tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]); wq 4256 kernel/workqueue.c wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL); wq 4257 kernel/workqueue.c if (!wq) wq 4261 kernel/workqueue.c wq->unbound_attrs = alloc_workqueue_attrs(); wq 4262 kernel/workqueue.c if (!wq->unbound_attrs) wq 4267 kernel/workqueue.c vsnprintf(wq->name, sizeof(wq->name), fmt, args); wq 4271 kernel/workqueue.c max_active = wq_clamp_max_active(max_active, flags, wq->name); wq 4274 kernel/workqueue.c wq->flags = flags; wq 4275 kernel/workqueue.c wq->saved_max_active = max_active; wq 4276 kernel/workqueue.c mutex_init(&wq->mutex); wq 4277 kernel/workqueue.c atomic_set(&wq->nr_pwqs_to_flush, 0); wq 4278 kernel/workqueue.c INIT_LIST_HEAD(&wq->pwqs); wq 4279 kernel/workqueue.c INIT_LIST_HEAD(&wq->flusher_queue); wq 4280 kernel/workqueue.c INIT_LIST_HEAD(&wq->flusher_overflow); wq 4281 kernel/workqueue.c INIT_LIST_HEAD(&wq->maydays); wq 4283 kernel/workqueue.c wq_init_lockdep(wq); wq 4284 kernel/workqueue.c INIT_LIST_HEAD(&wq->list); wq 4286 kernel/workqueue.c if (alloc_and_link_pwqs(wq) < 0) wq 4289 kernel/workqueue.c if (wq_online && init_rescuer(wq) < 0) wq 4292 kernel/workqueue.c if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq)) wq 4302 kernel/workqueue.c mutex_lock(&wq->mutex); wq 4303 kernel/workqueue.c for_each_pwq(pwq, wq) wq 4305 kernel/workqueue.c mutex_unlock(&wq->mutex); wq 4307 kernel/workqueue.c list_add_tail_rcu(&wq->list, &workqueues); wq 4311 kernel/workqueue.c return wq; wq 4314 kernel/workqueue.c wq_unregister_lockdep(wq); wq 4315 kernel/workqueue.c wq_free_lockdep(wq); wq 4317 kernel/workqueue.c free_workqueue_attrs(wq->unbound_attrs); wq 4318 kernel/workqueue.c kfree(wq); wq 4321 kernel/workqueue.c destroy_workqueue(wq); wq 4332 kernel/workqueue.c void destroy_workqueue(struct workqueue_struct *wq) wq 4341 kernel/workqueue.c workqueue_sysfs_unregister(wq); wq 4344 kernel/workqueue.c drain_workqueue(wq); wq 4347 kernel/workqueue.c if (wq->rescuer) { wq 4348 kernel/workqueue.c struct worker *rescuer = wq->rescuer; wq 4352 kernel/workqueue.c wq->rescuer = NULL; wq 4361 kernel/workqueue.c mutex_lock(&wq->mutex); wq 4362 kernel/workqueue.c for_each_pwq(pwq, wq) { wq 4367 kernel/workqueue.c mutex_unlock(&wq->mutex); wq 4373 kernel/workqueue.c if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) || wq 4376 kernel/workqueue.c mutex_unlock(&wq->mutex); wq 4381 kernel/workqueue.c mutex_unlock(&wq->mutex); wq 4388 kernel/workqueue.c list_del_rcu(&wq->list); wq 4391 kernel/workqueue.c if (!(wq->flags & WQ_UNBOUND)) { wq 4392 kernel/workqueue.c wq_unregister_lockdep(wq); wq 4397 kernel/workqueue.c call_rcu(&wq->rcu, rcu_free_wq); wq 4405 kernel/workqueue.c pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]); wq 4406 kernel/workqueue.c RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL); wq 4414 kernel/workqueue.c pwq = wq->dfl_pwq; wq 4415 kernel/workqueue.c wq->dfl_pwq = NULL; wq 4431 kernel/workqueue.c void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) wq 4436 kernel/workqueue.c if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) wq 4439 kernel/workqueue.c max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); wq 4441 kernel/workqueue.c mutex_lock(&wq->mutex); wq 4443 kernel/workqueue.c wq->flags &= ~__WQ_ORDERED; wq 4444 kernel/workqueue.c wq->saved_max_active = max_active; wq 4446 kernel/workqueue.c for_each_pwq(pwq, wq) wq 4449 kernel/workqueue.c mutex_unlock(&wq->mutex); wq 4502 kernel/workqueue.c bool workqueue_congested(int cpu, struct workqueue_struct *wq) wq 4513 kernel/workqueue.c if (!(wq->flags & WQ_UNBOUND)) wq 4514 kernel/workqueue.c pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); wq 4516 kernel/workqueue.c pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); wq 4602 kernel/workqueue.c struct workqueue_struct *wq = NULL; wq 4620 kernel/workqueue.c probe_kernel_read(&wq, &pwq->wq, sizeof(wq)); wq 4621 kernel/workqueue.c probe_kernel_read(name, wq->name, sizeof(name) - 1); wq 4685 kernel/workqueue.c worker == pwq->wq->rescuer ? "(RESCUER)" : "", wq 4734 kernel/workqueue.c struct workqueue_struct *wq; wq 4743 kernel/workqueue.c list_for_each_entry_rcu(wq, &workqueues, list) { wq 4747 kernel/workqueue.c for_each_pwq(pwq, wq) { wq 4756 kernel/workqueue.c pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); wq 4758 kernel/workqueue.c for_each_pwq(pwq, wq) { wq 5025 kernel/workqueue.c struct workqueue_struct *wq; wq 5042 kernel/workqueue.c list_for_each_entry(wq, &workqueues, list) wq 5043 kernel/workqueue.c wq_update_unbound_numa(wq, cpu, true); wq 5051 kernel/workqueue.c struct workqueue_struct *wq; wq 5061 kernel/workqueue.c list_for_each_entry(wq, &workqueues, list) wq 5062 kernel/workqueue.c wq_update_unbound_numa(wq, cpu, false); wq 5143 kernel/workqueue.c struct workqueue_struct *wq; wq 5151 kernel/workqueue.c list_for_each_entry(wq, &workqueues, list) { wq 5152 kernel/workqueue.c mutex_lock(&wq->mutex); wq 5153 kernel/workqueue.c for_each_pwq(pwq, wq) wq 5155 kernel/workqueue.c mutex_unlock(&wq->mutex); wq 5177 kernel/workqueue.c struct workqueue_struct *wq; wq 5184 kernel/workqueue.c list_for_each_entry(wq, &workqueues, list) { wq 5185 kernel/workqueue.c if (!(wq->flags & WQ_FREEZABLE)) wq 5192 kernel/workqueue.c for_each_pwq(pwq, wq) { wq 5218 kernel/workqueue.c struct workqueue_struct *wq; wq 5229 kernel/workqueue.c list_for_each_entry(wq, &workqueues, list) { wq 5230 kernel/workqueue.c mutex_lock(&wq->mutex); wq 5231 kernel/workqueue.c for_each_pwq(pwq, wq) wq 5233 kernel/workqueue.c mutex_unlock(&wq->mutex); wq 5245 kernel/workqueue.c struct workqueue_struct *wq; wq 5250 kernel/workqueue.c list_for_each_entry(wq, &workqueues, list) { wq 5251 kernel/workqueue.c if (!(wq->flags & WQ_UNBOUND)) wq 5254 kernel/workqueue.c if (wq->flags & __WQ_ORDERED) wq 5257 kernel/workqueue.c ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs); wq 5338 kernel/workqueue.c struct workqueue_struct *wq; wq 5346 kernel/workqueue.c return wq_dev->wq; wq 5352 kernel/workqueue.c struct workqueue_struct *wq = dev_to_wq(dev); wq 5354 kernel/workqueue.c return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND)); wq 5361 kernel/workqueue.c struct workqueue_struct *wq = dev_to_wq(dev); wq 5363 kernel/workqueue.c return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active); wq 5370 kernel/workqueue.c struct workqueue_struct *wq = dev_to_wq(dev); wq 5376 kernel/workqueue.c workqueue_set_max_active(wq, val); wq 5391 kernel/workqueue.c struct workqueue_struct *wq = dev_to_wq(dev); wq 5400 kernel/workqueue.c unbound_pwq_by_node(wq, node)->pool->id); wq 5413 kernel/workqueue.c struct workqueue_struct *wq = dev_to_wq(dev); wq 5416 kernel/workqueue.c mutex_lock(&wq->mutex); wq 5417 kernel/workqueue.c written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice); wq 5418 kernel/workqueue.c mutex_unlock(&wq->mutex); wq 5424 kernel/workqueue.c static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq) wq 5434 kernel/workqueue.c copy_workqueue_attrs(attrs, wq->unbound_attrs); wq 5441 kernel/workqueue.c struct workqueue_struct *wq = dev_to_wq(dev); wq 5447 kernel/workqueue.c attrs = wq_sysfs_prep_attrs(wq); wq 5453 kernel/workqueue.c ret = apply_workqueue_attrs_locked(wq, attrs); wq 5466 kernel/workqueue.c struct workqueue_struct *wq = dev_to_wq(dev); wq 5469 kernel/workqueue.c mutex_lock(&wq->mutex); wq 5471 kernel/workqueue.c cpumask_pr_args(wq->unbound_attrs->cpumask)); wq 5472 kernel/workqueue.c mutex_unlock(&wq->mutex); wq 5480 kernel/workqueue.c struct workqueue_struct *wq = dev_to_wq(dev); wq 5486 kernel/workqueue.c attrs = wq_sysfs_prep_attrs(wq); wq 5492 kernel/workqueue.c ret = apply_workqueue_attrs_locked(wq, attrs); wq 5503 kernel/workqueue.c struct workqueue_struct *wq = dev_to_wq(dev); wq 5506 kernel/workqueue.c mutex_lock(&wq->mutex); wq 5508 kernel/workqueue.c !wq->unbound_attrs->no_numa); wq 5509 kernel/workqueue.c mutex_unlock(&wq->mutex); wq 5517 kernel/workqueue.c struct workqueue_struct *wq = dev_to_wq(dev); wq 5523 kernel/workqueue.c attrs = wq_sysfs_prep_attrs(wq); wq 5530 kernel/workqueue.c ret = apply_workqueue_attrs_locked(wq, attrs); wq 5620 kernel/workqueue.c int workqueue_sysfs_register(struct workqueue_struct *wq) wq 5630 kernel/workqueue.c if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT)) wq 5633 kernel/workqueue.c wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); wq 5637 kernel/workqueue.c wq_dev->wq = wq; wq 5640 kernel/workqueue.c dev_set_name(&wq_dev->dev, "%s", wq->name); wq 5651 kernel/workqueue.c wq->wq_dev = NULL; wq 5655 kernel/workqueue.c if (wq->flags & WQ_UNBOUND) { wq 5662 kernel/workqueue.c wq->wq_dev = NULL; wq 5679 kernel/workqueue.c static void workqueue_sysfs_unregister(struct workqueue_struct *wq) wq 5681 kernel/workqueue.c struct wq_device *wq_dev = wq->wq_dev; wq 5683 kernel/workqueue.c if (!wq->wq_dev) wq 5686 kernel/workqueue.c wq->wq_dev = NULL; wq 5690 kernel/workqueue.c static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { } wq 5973 kernel/workqueue.c struct workqueue_struct *wq; wq 5996 kernel/workqueue.c list_for_each_entry(wq, &workqueues, list) { wq 5997 kernel/workqueue.c wq_update_unbound_numa(wq, smp_processor_id(), true); wq 5998 kernel/workqueue.c WARN(init_rescuer(wq), wq 6000 kernel/workqueue.c wq->name); wq 37 mm/hmm.c init_waitqueue_head(&hmm->wq); wq 92 mm/hmm.c wake_up_all(&hmm->wq); wq 47 mm/vmalloc.c struct work_struct wq; wq 55 mm/vmalloc.c struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq); wq 1921 mm/vmalloc.c INIT_WORK(&p->wq, free_work); wq 2280 mm/vmalloc.c schedule_work(&p->wq); wq 286 net/9p/client.c init_waitqueue_head(&req->wq); wq 430 net/9p/client.c wake_up(&req->wq); wq 757 net/9p/client.c err = wait_event_killable(req->wq, req->status >= REQ_STATUS_RCVD); wq 130 net/9p/trans_fd.c struct work_struct wq; wq 443 net/9p/trans_fd.c m = container_of(work, struct p9_conn, wq); wq 507 net/9p/trans_fd.c schedule_work(&m->wq); wq 589 net/9p/trans_fd.c INIT_WORK(&m->wq, p9_write_work); wq 640 net/9p/trans_fd.c schedule_work(&m->wq); wq 678 net/9p/trans_fd.c schedule_work(&m->wq); wq 868 net/9p/trans_fd.c cancel_work_sync(&m->wq); wq 500 net/9p/trans_virtio.c err = wait_event_killable(req->wq, req->status >= REQ_STATUS_RCVD); wq 69 net/9p/trans_xen.c wait_queue_head_t wq; wq 161 net/9p/trans_xen.c while (wait_event_killable(ring->wq, wq 262 net/9p/trans_xen.c wake_up_interruptible(&ring->wq); wq 332 net/9p/trans_xen.c init_waitqueue_head(&ring->wq); wq 91 net/atm/common.c struct socket_wq *wq; wq 94 net/atm/common.c wq = rcu_dereference(sk->sk_wq); wq 95 net/atm/common.c if (skwq_has_sleeper(wq)) wq 96 net/atm/common.c wake_up(&wq->wait); wq 110 net/atm/common.c struct socket_wq *wq; wq 115 net/atm/common.c wq = rcu_dereference(sk->sk_wq); wq 116 net/atm/common.c if (skwq_has_sleeper(wq)) wq 117 net/atm/common.c wake_up_interruptible(&wq->wait); wq 383 net/core/page_pool.c static void page_pool_release_retry(struct work_struct *wq) wq 385 net/core/page_pool.c struct delayed_work *dwq = to_delayed_work(wq); wq 2415 net/core/sock.c prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, wq 2423 net/core/sock.c finish_wait(&sk->sk_lock.wq, &wait); wq 2773 net/core/sock.c struct socket_wq *wq; wq 2776 net/core/sock.c wq = rcu_dereference(sk->sk_wq); wq 2777 net/core/sock.c if (skwq_has_sleeper(wq)) wq 2778 net/core/sock.c wake_up_interruptible_all(&wq->wait); wq 2784 net/core/sock.c struct socket_wq *wq; wq 2787 net/core/sock.c wq = rcu_dereference(sk->sk_wq); wq 2788 net/core/sock.c if (skwq_has_sleeper(wq)) wq 2789 net/core/sock.c wake_up_interruptible_poll(&wq->wait, EPOLLERR); wq 2796 net/core/sock.c struct socket_wq *wq; wq 2799 net/core/sock.c wq = rcu_dereference(sk->sk_wq); wq 2800 net/core/sock.c if (skwq_has_sleeper(wq)) wq 2801 net/core/sock.c wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI | wq 2809 net/core/sock.c struct socket_wq *wq; wq 2817 net/core/sock.c wq = rcu_dereference(sk->sk_wq); wq 2818 net/core/sock.c if (skwq_has_sleeper(wq)) wq 2819 net/core/sock.c wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | wq 2874 net/core/sock.c RCU_INIT_POINTER(sk->sk_wq, &sock->wq); wq 2967 net/core/sock.c if (waitqueue_active(&sk->sk_lock.wq)) wq 2968 net/core/sock.c wake_up(&sk->sk_lock.wq); wq 33 net/core/stream.c struct socket_wq *wq; wq 39 net/core/stream.c wq = rcu_dereference(sk->sk_wq); wq 40 net/core/stream.c if (skwq_has_sleeper(wq)) wq 41 net/core/stream.c wake_up_interruptible_poll(&wq->wait, EPOLLOUT | wq 43 net/core/stream.c if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) wq 44 net/core/stream.c sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); wq 197 net/dccp/output.c struct socket_wq *wq; wq 200 net/dccp/output.c wq = rcu_dereference(sk->sk_wq); wq 201 net/dccp/output.c if (skwq_has_sleeper(wq)) wq 202 net/dccp/output.c wake_up_interruptible(&wq->wait); wq 304 net/iucv/af_iucv.c struct socket_wq *wq; wq 307 net/iucv/af_iucv.c wq = rcu_dereference(sk->sk_wq); wq 308 net/iucv/af_iucv.c if (skwq_has_sleeper(wq)) wq 309 net/iucv/af_iucv.c wake_up_interruptible_all(&wq->wait); wq 52 net/nfc/hci/command.c wake_up(hcp_ew->wq); wq 61 net/nfc/hci/command.c hcp_ew.wq = &ew_wq; wq 27 net/nfc/hci/hci.h wait_queue_head_t *wq; wq 64 net/rxrpc/af_rxrpc.c struct socket_wq *wq = rcu_dereference(sk->sk_wq); wq 66 net/rxrpc/af_rxrpc.c if (skwq_has_sleeper(wq)) wq 67 net/rxrpc/af_rxrpc.c wake_up_interruptible(&wq->wait); wq 8918 net/sctp/socket.c struct socket_wq *wq; wq 8921 net/sctp/socket.c wq = rcu_dereference(sk->sk_wq); wq 8922 net/sctp/socket.c if (wq) { wq 8923 net/sctp/socket.c if (waitqueue_active(&wq->wait)) wq 8924 net/sctp/socket.c wake_up_interruptible(&wq->wait); wq 8931 net/sctp/socket.c sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); wq 9110 net/sctp/socket.c struct socket_wq *wq; wq 9113 net/sctp/socket.c wq = rcu_dereference(sk->sk_wq); wq 9114 net/sctp/socket.c if (skwq_has_sleeper(wq)) wq 9115 net/sctp/socket.c wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | wq 470 net/smc/af_smc.c smc->clcsock->wq.fasync_list = wq 471 net/smc/af_smc.c smc->sk.sk_socket->wq.fasync_list; wq 30 net/smc/smc_rx.c struct socket_wq *wq; wq 35 net/smc/smc_rx.c wq = rcu_dereference(sk->sk_wq); wq 36 net/smc/smc_rx.c if (skwq_has_sleeper(wq)) wq 37 net/smc/smc_rx.c wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI | wq 44 net/smc/smc_tx.c struct socket_wq *wq; wq 50 net/smc/smc_tx.c wq = rcu_dereference(sk->sk_wq); wq 51 net/smc/smc_tx.c if (skwq_has_sleeper(wq)) wq 52 net/smc/smc_tx.c wake_up_interruptible_poll(&wq->wait, wq 55 net/smc/smc_tx.c if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) wq 56 net/smc/smc_tx.c sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); wq 242 net/socket.c init_waitqueue_head(&ei->socket.wq.wait); wq 243 net/socket.c ei->socket.wq.fasync_list = NULL; wq 244 net/socket.c ei->socket.wq.flags = 0; wq 598 net/socket.c if (sock->wq.fasync_list) wq 1287 net/socket.c struct socket_wq *wq = &sock->wq; wq 1293 net/socket.c fasync_helper(fd, filp, on, &wq->fasync_list); wq 1295 net/socket.c if (!wq->fasync_list) wq 1306 net/socket.c int sock_wake_async(struct socket_wq *wq, int how, int band) wq 1308 net/socket.c if (!wq || !wq->fasync_list) wq 1313 net/socket.c if (test_bit(SOCKWQ_ASYNC_WAITDATA, &wq->flags)) wq 1317 net/socket.c if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags)) wq 1322 net/socket.c kill_fasync(&wq->fasync_list, SIGIO, band); wq 1325 net/socket.c kill_fasync(&wq->fasync_list, SIGURG, band); wq 321 net/sunrpc/sched.c wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE); wq 328 net/sunrpc/sched.c spin_lock_irqsave(&wq->lock, flags); wq 331 net/sunrpc/sched.c if (waitqueue_active(wq)) wq 332 net/sunrpc/sched.c __wake_up_locked_key(wq, TASK_NORMAL, &k); wq 333 net/sunrpc/sched.c spin_unlock_irqrestore(&wq->lock, flags); wq 364 net/sunrpc/sched.c static void rpc_make_runnable(struct workqueue_struct *wq, wq 374 net/sunrpc/sched.c queue_work(wq, &task->u.tk_work); wq 502 net/sunrpc/sched.c static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq, wq 519 net/sunrpc/sched.c rpc_make_runnable(wq, task); wq 528 net/sunrpc/sched.c rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct *wq, wq 536 net/sunrpc/sched.c __rpc_do_wake_up_task_on_wq(wq, queue, task); wq 653 net/sunrpc/sched.c struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq, wq 664 net/sunrpc/sched.c task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue, wq 1237 net/sunrpc/sched.c struct workqueue_struct *wq; wq 1243 net/sunrpc/sched.c wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0); wq 1244 net/sunrpc/sched.c if (!wq) wq 1246 net/sunrpc/sched.c rpciod_workqueue = wq; wq 1248 net/sunrpc/sched.c wq = alloc_workqueue("xprtiod", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_HIGHPRI, 0); wq 1249 net/sunrpc/sched.c if (!wq) wq 1251 net/sunrpc/sched.c xprtiod_workqueue = wq; wq 1254 net/sunrpc/sched.c wq = rpciod_workqueue; wq 1256 net/sunrpc/sched.c destroy_workqueue(wq); wq 1263 net/sunrpc/sched.c struct workqueue_struct *wq = NULL; wq 1269 net/sunrpc/sched.c wq = rpciod_workqueue; wq 1271 net/sunrpc/sched.c destroy_workqueue(wq); wq 1272 net/sunrpc/sched.c wq = xprtiod_workqueue; wq 1274 net/sunrpc/sched.c destroy_workqueue(wq); wq 903 net/sunrpc/xprtsock.c struct socket_wq *wq; wq 906 net/sunrpc/xprtsock.c wq = rcu_dereference(sk->sk_wq); wq 907 net/sunrpc/xprtsock.c set_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags); wq 1577 net/sunrpc/xprtsock.c struct socket_wq *wq; wq 1589 net/sunrpc/xprtsock.c wq = rcu_dereference(sk->sk_wq); wq 1590 net/sunrpc/xprtsock.c if (!wq || test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags) == 0) wq 1947 net/tipc/socket.c struct socket_wq *wq; wq 1950 net/tipc/socket.c wq = rcu_dereference(sk->sk_wq); wq 1951 net/tipc/socket.c if (skwq_has_sleeper(wq)) wq 1952 net/tipc/socket.c wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT | wq 1964 net/tipc/socket.c struct socket_wq *wq; wq 1967 net/tipc/socket.c wq = rcu_dereference(sk->sk_wq); wq 1968 net/tipc/socket.c if (skwq_has_sleeper(wq)) wq 1969 net/tipc/socket.c wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | wq 454 net/unix/af_unix.c struct socket_wq *wq; wq 458 net/unix/af_unix.c wq = rcu_dereference(sk->sk_wq); wq 459 net/unix/af_unix.c if (skwq_has_sleeper(wq)) wq 460 net/unix/af_unix.c wake_up_interruptible_sync_poll(&wq->wait, wq 2268 sound/soc/codecs/cs43130.c queue_work(cs43130->wq, &cs43130->work); wq 2321 sound/soc/codecs/cs43130.c cs43130->wq = create_singlethread_workqueue("cs43130_hp"); wq 2322 sound/soc/codecs/cs43130.c if (!cs43130->wq) wq 2598 sound/soc/codecs/cs43130.c flush_workqueue(cs43130->wq); wq 532 sound/soc/codecs/cs43130.h struct workqueue_struct *wq; wq 44 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h struct workqueue_struct *wq; wq 61 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h static inline bool queue_work(struct workqueue_struct *wq, wq 68 tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/workqueues.h static inline bool queue_delayed_work(struct workqueue_struct *wq, wq 631 virt/kvm/arm/arm.c struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu); wq 633 virt/kvm/arm/arm.c swait_event_interruptible_exclusive(*wq, ((!vcpu->arch.power_off) && wq 97 virt/kvm/async_pf.c if (swq_has_sleeper(&vcpu->wq)) wq 98 virt/kvm/async_pf.c swake_up_one(&vcpu->wq); wq 334 virt/kvm/kvm_main.c init_swait_queue_head(&vcpu->wq); wq 2499 virt/kvm/kvm_main.c prepare_to_swait_exclusive(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); wq 2508 virt/kvm/kvm_main.c finish_swait(&vcpu->wq, &wait); wq 2685 virt/kvm/kvm_main.c if (swait_active(&vcpu->wq) && !vcpu_dy_runnable(vcpu))