/linux-4.4.14/drivers/net/ethernet/cisco/enic/ |
D | vnic_wq.c | 31 static int vnic_wq_alloc_bufs(struct vnic_wq *wq) in vnic_wq_alloc_bufs() argument 34 unsigned int i, j, count = wq->ring.desc_count; in vnic_wq_alloc_bufs() 38 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC); in vnic_wq_alloc_bufs() 39 if (!wq->bufs[i]) in vnic_wq_alloc_bufs() 44 buf = wq->bufs[i]; in vnic_wq_alloc_bufs() 47 buf->desc = (u8 *)wq->ring.descs + in vnic_wq_alloc_bufs() 48 wq->ring.desc_size * buf->index; in vnic_wq_alloc_bufs() 50 buf->next = wq->bufs[0]; in vnic_wq_alloc_bufs() 54 buf->next = wq->bufs[i + 1]; in vnic_wq_alloc_bufs() 64 wq->to_use = wq->to_clean = wq->bufs[0]; in vnic_wq_alloc_bufs() [all …]
|
D | vnic_wq.h | 99 struct vnic_wq wq; member 103 static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) in vnic_wq_desc_avail() argument 106 return wq->ring.desc_avail; in vnic_wq_desc_avail() 109 static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) in vnic_wq_desc_used() argument 112 return wq->ring.desc_count - wq->ring.desc_avail - 1; in vnic_wq_desc_used() 115 static inline void *vnic_wq_next_desc(struct vnic_wq *wq) in vnic_wq_next_desc() argument 117 return wq->to_use->desc; in vnic_wq_next_desc() 120 static inline void vnic_wq_doorbell(struct vnic_wq *wq) in vnic_wq_doorbell() argument 128 iowrite32(wq->to_use->index, &wq->ctrl->posted_index); in vnic_wq_doorbell() 131 static inline void vnic_wq_post(struct vnic_wq *wq, in vnic_wq_post() argument [all …]
|
D | enic_res.h | 43 static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq, in enic_queue_wq_desc_ex() argument 49 struct wq_enet_desc *desc = vnic_wq_next_desc(wq); in enic_queue_wq_desc_ex() 65 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt, in enic_queue_wq_desc_ex() 69 static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq, in enic_queue_wq_desc_cont() argument 73 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, in enic_queue_wq_desc_cont() 78 static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf, in enic_queue_wq_desc() argument 82 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, in enic_queue_wq_desc() 88 static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq, in enic_queue_wq_desc_csum() argument 93 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, in enic_queue_wq_desc_csum() 100 static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq, in enic_queue_wq_desc_csum_l4() argument [all …]
|
D | enic.h | 169 ____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX]; member 225 static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq) in enic_cq_wq() argument 227 return enic->rq_count + wq; in enic_cq_wq() 252 unsigned int wq) in enic_msix_wq_intr() argument 254 return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset; in enic_msix_wq_intr()
|
D | enic_main.c | 205 static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) in enic_free_wq_buf() argument 207 struct enic *enic = vnic_dev_priv(wq->vdev); in enic_free_wq_buf() 220 static void enic_wq_free_buf(struct vnic_wq *wq, in enic_wq_free_buf() argument 223 enic_free_wq_buf(wq, buf); in enic_wq_free_buf() 233 vnic_wq_service(&enic->wq[q_number], cq_desc, in enic_wq_service() 238 vnic_wq_desc_avail(&enic->wq[q_number]) >= in enic_wq_service() 254 error_status = vnic_wq_error_status(&enic->wq[i]); in enic_log_q_error() 425 static int enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq, in enic_queue_wq_skb_cont() argument 440 enic_queue_wq_desc_cont(wq, skb, dma_addr, skb_frag_size(frag), in enic_queue_wq_skb_cont() 448 static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq, in enic_queue_wq_skb_vlan() argument [all …]
|
D | vnic_dev.c | 397 err = enic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq, DEVCMD2_RING_SIZE, in vnic_dev_init_devcmd2() 402 fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index); in vnic_dev_init_devcmd2() 409 enic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, fetch_index, 0, in vnic_dev_init_devcmd2() 412 vnic_wq_enable(&vdev->devcmd2->wq); in vnic_dev_init_devcmd2() 420 vdev->devcmd2->cmd_ring = vdev->devcmd2->wq.ring.descs; in vnic_dev_init_devcmd2() 421 vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl; in vnic_dev_init_devcmd2() 437 vnic_wq_disable(&vdev->devcmd2->wq); in vnic_dev_init_devcmd2() 438 vnic_wq_free(&vdev->devcmd2->wq); in vnic_dev_init_devcmd2() 449 vnic_wq_disable(&vdev->devcmd2->wq); in vnic_dev_deinit_devcmd2() 450 vnic_wq_free(&vdev->devcmd2->wq); in vnic_dev_deinit_devcmd2()
|
D | enic_res.c | 187 vnic_wq_free(&enic->wq[i]); in enic_free_vnic_resources() 252 vnic_wq_init(&enic->wq[i], in enic_init_vnic_resources() 333 err = vnic_wq_alloc(enic->vdev, &enic->wq[i], i, in enic_alloc_vnic_resources()
|
/linux-4.4.14/drivers/scsi/snic/ |
D | vnic_wq.c | 26 static inline int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq, in vnic_wq_get_ctrl() argument 29 wq->ctrl = svnic_dev_get_res(vdev, res_type, index); in vnic_wq_get_ctrl() 30 if (!wq->ctrl) in vnic_wq_get_ctrl() 36 static inline int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq, in vnic_wq_alloc_ring() argument 39 return svnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, in vnic_wq_alloc_ring() 43 static int vnic_wq_alloc_bufs(struct vnic_wq *wq) in vnic_wq_alloc_bufs() argument 46 unsigned int i, j, count = wq->ring.desc_count; in vnic_wq_alloc_bufs() 50 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC); in vnic_wq_alloc_bufs() 51 if (!wq->bufs[i]) { in vnic_wq_alloc_bufs() 59 buf = wq->bufs[i]; in vnic_wq_alloc_bufs() [all …]
|
D | vnic_wq.h | 85 static inline unsigned int svnic_wq_desc_avail(struct vnic_wq *wq) in svnic_wq_desc_avail() argument 88 return wq->ring.desc_avail; in svnic_wq_desc_avail() 91 static inline unsigned int svnic_wq_desc_used(struct vnic_wq *wq) in svnic_wq_desc_used() argument 94 return wq->ring.desc_count - wq->ring.desc_avail - 1; in svnic_wq_desc_used() 97 static inline void *svnic_wq_next_desc(struct vnic_wq *wq) in svnic_wq_next_desc() argument 99 return wq->to_use->desc; in svnic_wq_next_desc() 102 static inline void svnic_wq_post(struct vnic_wq *wq, in svnic_wq_post() argument 106 struct vnic_wq_buf *buf = wq->to_use; in svnic_wq_post() 121 iowrite32(buf->index, &wq->ctrl->posted_index); in svnic_wq_post() 123 wq->to_use = buf; in svnic_wq_post() [all …]
|
D | vnic_dev.c | 37 struct vnic_wq wq; member 364 &dc2c->wq, in svnic_dev_init_devcmd2() 370 fetch_idx = ioread32(&dc2c->wq.ctrl->fetch_index); in svnic_dev_init_devcmd2() 381 vnic_wq_init_start(&dc2c->wq, 0, fetch_idx, fetch_idx, 0, 0); in svnic_dev_init_devcmd2() 382 svnic_wq_enable(&dc2c->wq); in svnic_dev_init_devcmd2() 391 dc2c->cmd_ring = (struct vnic_devcmd2 *) dc2c->wq.ring.descs; in svnic_dev_init_devcmd2() 392 dc2c->wq_ctrl = dc2c->wq.ctrl; in svnic_dev_init_devcmd2() 409 svnic_wq_disable(&dc2c->wq); in svnic_dev_init_devcmd2() 410 svnic_wq_free(&dc2c->wq); in svnic_dev_init_devcmd2() 427 svnic_wq_disable(&dc2c->wq); in vnic_dev_deinit_devcmd2() [all …]
|
D | snic_res.h | 63 snic_queue_wq_eth_desc(struct vnic_wq *wq, in snic_queue_wq_eth_desc() argument 71 struct wq_enet_desc *desc = svnic_wq_next_desc(wq); in snic_queue_wq_eth_desc() 86 svnic_wq_post(wq, os_buf, dma_addr, len, 1, 1); in snic_queue_wq_eth_desc()
|
D | snic_io.c | 34 snic_wq_cmpl_frame_send(struct vnic_wq *wq, in snic_wq_cmpl_frame_send() argument 39 struct snic *snic = svnic_dev_priv(wq->vdev); in snic_wq_cmpl_frame_send() 69 svnic_wq_service(&snic->wq[q_num], in snic_wq_cmpl_handler_cont() 97 snic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) in snic_free_wq_buf() argument 101 struct snic *snic = svnic_dev_priv(wq->vdev); in snic_free_wq_buf() 162 if (!svnic_wq_desc_avail(snic->wq)) { in snic_queue_wq_desc() 171 snic_queue_wq_eth_desc(&snic->wq[q_num], os_buf, pa, len, 0, 0, 1); in snic_queue_wq_desc()
|
D | snic_res.c | 137 svnic_wq_free(&snic->wq[i]); in snic_free_vnic_res() 178 &snic->wq[i], in snic_alloc_vnic_res() 224 svnic_wq_init(&snic->wq[i], in snic_alloc_vnic_res() 288 err_status = ioread32(&snic->wq[i].ctrl->error_status); in snic_log_q_error()
|
D | snic_isr.c | 159 unsigned int n = ARRAY_SIZE(snic->wq); in snic_set_intr_mode() 168 BUILD_BUG_ON((ARRAY_SIZE(snic->wq) + SNIC_CQ_IO_CMPL_MAX) > in snic_set_intr_mode()
|
D | snic_main.c | 232 ret = svnic_wq_disable(&snic->wq[i]); in snic_cleanup() 244 svnic_wq_clean(&snic->wq[i], snic_free_wq_buf); in snic_cleanup() 651 svnic_wq_enable(&snic->wq[i]); in snic_probe() 711 rc = svnic_wq_disable(&snic->wq[i]); in snic_probe()
|
/linux-4.4.14/drivers/scsi/fnic/ |
D | vnic_wq.c | 27 static int vnic_wq_alloc_bufs(struct vnic_wq *wq) in vnic_wq_alloc_bufs() argument 31 unsigned int i, j, count = wq->ring.desc_count; in vnic_wq_alloc_bufs() 34 vdev = wq->vdev; in vnic_wq_alloc_bufs() 37 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC); in vnic_wq_alloc_bufs() 38 if (!wq->bufs[i]) { in vnic_wq_alloc_bufs() 45 buf = wq->bufs[i]; in vnic_wq_alloc_bufs() 48 buf->desc = (u8 *)wq->ring.descs + in vnic_wq_alloc_bufs() 49 wq->ring.desc_size * buf->index; in vnic_wq_alloc_bufs() 51 buf->next = wq->bufs[0]; in vnic_wq_alloc_bufs() 54 buf->next = wq->bufs[i + 1]; in vnic_wq_alloc_bufs() [all …]
|
D | vnic_wq_copy.h | 36 static inline unsigned int vnic_wq_copy_desc_avail(struct vnic_wq_copy *wq) in vnic_wq_copy_desc_avail() argument 38 return wq->ring.desc_avail; in vnic_wq_copy_desc_avail() 41 static inline unsigned int vnic_wq_copy_desc_in_use(struct vnic_wq_copy *wq) in vnic_wq_copy_desc_in_use() argument 43 return wq->ring.desc_count - 1 - wq->ring.desc_avail; in vnic_wq_copy_desc_in_use() 46 static inline void *vnic_wq_copy_next_desc(struct vnic_wq_copy *wq) in vnic_wq_copy_next_desc() argument 48 struct fcpio_host_req *desc = wq->ring.descs; in vnic_wq_copy_next_desc() 49 return &desc[wq->to_use_index]; in vnic_wq_copy_next_desc() 52 static inline void vnic_wq_copy_post(struct vnic_wq_copy *wq) in vnic_wq_copy_post() argument 55 ((wq->to_use_index + 1) == wq->ring.desc_count) ? in vnic_wq_copy_post() 56 (wq->to_use_index = 0) : (wq->to_use_index++); in vnic_wq_copy_post() [all …]
|
D | vnic_wq_copy.c | 25 void vnic_wq_copy_enable(struct vnic_wq_copy *wq) in vnic_wq_copy_enable() argument 27 iowrite32(1, &wq->ctrl->enable); in vnic_wq_copy_enable() 30 int vnic_wq_copy_disable(struct vnic_wq_copy *wq) in vnic_wq_copy_disable() argument 34 iowrite32(0, &wq->ctrl->enable); in vnic_wq_copy_disable() 38 if (!(ioread32(&wq->ctrl->running))) in vnic_wq_copy_disable() 45 wq->index, ioread32(&wq->ctrl->fetch_index), in vnic_wq_copy_disable() 46 ioread32(&wq->ctrl->posted_index)); in vnic_wq_copy_disable() 51 void vnic_wq_copy_clean(struct vnic_wq_copy *wq, in vnic_wq_copy_clean() argument 52 void (*q_clean)(struct vnic_wq_copy *wq, in vnic_wq_copy_clean() argument 55 BUG_ON(ioread32(&wq->ctrl->enable)); in vnic_wq_copy_clean() [all …]
|
D | vnic_wq.h | 96 static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) in vnic_wq_desc_avail() argument 99 return wq->ring.desc_avail; in vnic_wq_desc_avail() 102 static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) in vnic_wq_desc_used() argument 105 return wq->ring.desc_count - wq->ring.desc_avail - 1; in vnic_wq_desc_used() 108 static inline void *vnic_wq_next_desc(struct vnic_wq *wq) in vnic_wq_next_desc() argument 110 return wq->to_use->desc; in vnic_wq_next_desc() 113 static inline void vnic_wq_post(struct vnic_wq *wq, in vnic_wq_post() argument 117 struct vnic_wq_buf *buf = wq->to_use; in vnic_wq_post() 132 iowrite32(buf->index, &wq->ctrl->posted_index); in vnic_wq_post() 134 wq->to_use = buf; in vnic_wq_post() [all …]
|
D | fnic_res.h | 30 static inline void fnic_queue_wq_desc(struct vnic_wq *wq, in fnic_queue_wq_desc() argument 37 struct wq_enet_desc *desc = vnic_wq_next_desc(wq); in fnic_queue_wq_desc() 51 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop); in fnic_queue_wq_desc() 54 static inline void fnic_queue_wq_eth_desc(struct vnic_wq *wq, in fnic_queue_wq_eth_desc() argument 61 struct wq_enet_desc *desc = vnic_wq_next_desc(wq); in fnic_queue_wq_eth_desc() 76 vnic_wq_post(wq, os_buf, dma_addr, len, 1, 1); in fnic_queue_wq_eth_desc() 79 static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq, in fnic_queue_wq_copy_desc_icmnd_16() argument 91 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); in fnic_queue_wq_copy_desc_icmnd_16() 121 vnic_wq_copy_post(wq); in fnic_queue_wq_copy_desc_icmnd_16() 124 static inline void fnic_queue_wq_copy_desc_itmf(struct vnic_wq_copy *wq, in fnic_queue_wq_copy_desc_itmf() argument [all …]
|
D | fnic_scsi.c | 143 static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq) in free_wq_copy_descs() argument 153 if (wq->to_clean_index <= fnic->fw_ack_index[0]) in free_wq_copy_descs() 154 wq->ring.desc_avail += (fnic->fw_ack_index[0] in free_wq_copy_descs() 155 - wq->to_clean_index + 1); in free_wq_copy_descs() 157 wq->ring.desc_avail += (wq->ring.desc_count in free_wq_copy_descs() 158 - wq->to_clean_index in free_wq_copy_descs() 166 wq->to_clean_index = in free_wq_copy_descs() 167 (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count; in free_wq_copy_descs() 208 struct vnic_wq_copy *wq = &fnic->wq_copy[0]; in fnic_fw_reset_handler() local 224 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) in fnic_fw_reset_handler() [all …]
|
D | fnic_fcs.c | 991 struct vnic_wq *wq = &fnic->wq[0]; in fnic_eth_send() local 1026 if (!vnic_wq_desc_avail(wq)) in fnic_eth_send() 1029 fnic_queue_wq_eth_desc(wq, skb, pa, skb->len, in fnic_eth_send() 1047 struct vnic_wq *wq = &fnic->wq[0]; in fnic_send_frame() local 1109 if (!vnic_wq_desc_avail(wq)) { in fnic_send_frame() 1116 fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp), in fnic_send_frame() 1218 static void fnic_wq_complete_frame_send(struct vnic_wq *wq, in fnic_wq_complete_frame_send() argument 1224 struct fnic *fnic = vnic_dev_priv(wq->vdev); in fnic_wq_complete_frame_send() 1241 vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index, in fnic_wq_cmpl_handler_cont() 1264 void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) in fnic_free_wq_buf() argument [all …]
|
D | fnic.h | 305 ____cacheline_aligned struct vnic_wq wq[FNIC_WQ_MAX]; member 330 void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf); 355 void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
|
D | fnic_res.c | 215 vnic_wq_free(&fnic->wq[i]); in fnic_free_vnic_resources() 256 err = vnic_wq_alloc(fnic->vdev, &fnic->wq[i], i, in fnic_alloc_vnic_resources() 365 vnic_wq_init(&fnic->wq[i], in fnic_alloc_vnic_resources()
|
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx5/core/ |
D | wq.h | 78 void *wqc, struct mlx5_wq_cyc *wq, 80 u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); 83 void *cqc, struct mlx5_cqwq *wq, 85 u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq); 88 void *wqc, struct mlx5_wq_ll *wq, 90 u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq); 94 static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr) in mlx5_wq_cyc_ctr2ix() argument 96 return ctr & wq->sz_m1; in mlx5_wq_cyc_ctr2ix() 99 static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix) in mlx5_wq_cyc_get_wqe() argument 101 return wq->buf + (ix << wq->log_stride); in mlx5_wq_cyc_get_wqe() [all …]
|
D | wq.c | 37 u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq) in mlx5_wq_cyc_get_size() argument 39 return (u32)wq->sz_m1 + 1; in mlx5_wq_cyc_get_size() 42 u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) in mlx5_cqwq_get_size() argument 44 return wq->sz_m1 + 1; in mlx5_cqwq_get_size() 47 u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq) in mlx5_wq_ll_get_size() argument 49 return (u32)wq->sz_m1 + 1; in mlx5_wq_ll_get_size() 52 static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq) in mlx5_wq_cyc_get_byte_size() argument 54 return mlx5_wq_cyc_get_size(wq) << wq->log_stride; in mlx5_wq_cyc_get_byte_size() 57 static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq) in mlx5_cqwq_get_byte_size() argument 59 return mlx5_cqwq_get_size(wq) << wq->log_stride; in mlx5_cqwq_get_byte_size() [all …]
|
D | srq.c | 83 void *wq = MLX5_ADDR_OF(rmpc, rmpc, wq); in rmpc_srqc_reformat() local 99 MLX5_SET(wq, wq, wq_signature, MLX5_GET(srqc, srqc, wq_signature)); in rmpc_srqc_reformat() 100 MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(srqc, srqc, log_page_size)); in rmpc_srqc_reformat() 101 MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(srqc, srqc, log_rq_stride) + 4); in rmpc_srqc_reformat() 102 MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(srqc, srqc, log_srq_size)); in rmpc_srqc_reformat() 103 MLX5_SET(wq, wq, page_offset, MLX5_GET(srqc, srqc, page_offset)); in rmpc_srqc_reformat() 104 MLX5_SET(wq, wq, lwm, MLX5_GET(srqc, srqc, lwm)); in rmpc_srqc_reformat() 105 MLX5_SET(wq, wq, pd, MLX5_GET(srqc, srqc, pd)); in rmpc_srqc_reformat() 106 MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(srqc, srqc, dbr_addr)); in rmpc_srqc_reformat() 123 MLX5_SET(srqc, srqc, wq_signature, MLX5_GET(wq, wq, wq_signature)); in rmpc_srqc_reformat() [all …]
|
D | en_rx.c | 75 struct mlx5_wq_ll *wq = &rq->wq; in mlx5e_post_rx_wqes() local 80 while (!mlx5_wq_ll_is_full(wq)) { in mlx5e_post_rx_wqes() 81 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); in mlx5e_post_rx_wqes() 83 if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, wq->head))) in mlx5e_post_rx_wqes() 86 mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); in mlx5e_post_rx_wqes() 92 mlx5_wq_ll_update_db_record(wq); in mlx5e_post_rx_wqes() 94 return !mlx5_wq_ll_is_full(wq); in mlx5e_post_rx_wqes() 238 mlx5_cqwq_pop(&cq->wq); in mlx5e_poll_rx_cq() 242 wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); in mlx5e_poll_rx_cq() 263 mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, in mlx5e_poll_rx_cq() [all …]
|
D | en_main.c | 38 struct mlx5_wq_param wq; member 43 struct mlx5_wq_param wq; member 49 struct mlx5_wq_param wq; member 316 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); in mlx5e_create_rq() 321 param->wq.db_numa_node = cpu_to_node(c->cpu); in mlx5e_create_rq() 323 err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq, in mlx5e_create_rq() 328 rq->wq.db = &rq->wq.db[MLX5_RCV_DBR]; in mlx5e_create_rq() 330 wq_sz = mlx5_wq_ll_get_size(&rq->wq); in mlx5e_create_rq() 343 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); in mlx5e_create_rq() 378 void *wq; in mlx5e_enable_rq() local [all …]
|
D | en_txrx.c | 37 struct mlx5_cqwq *wq = &cq->wq; in mlx5e_get_cqe() local 38 u32 ci = mlx5_cqwq_get_ci(wq); in mlx5e_get_cqe() 39 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); in mlx5e_get_cqe() 41 int sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1; in mlx5e_get_cqe()
|
D | en_tx.c | 43 struct mlx5_wq_cyc *wq = &sq->wq; in mlx5e_send_nop() local 45 u16 pi = sq->pc & wq->sz_m1; in mlx5e_send_nop() 46 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); in mlx5e_send_nop() 159 struct mlx5_wq_cyc *wq = &sq->wq; in mlx5e_sq_xmit() local 161 u16 pi = sq->pc & wq->sz_m1; in mlx5e_sq_xmit() 162 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); in mlx5e_sq_xmit() 290 while ((sq->pc & wq->sz_m1) > sq->edge) in mlx5e_sq_xmit() 350 mlx5_cqwq_pop(&cq->wq); in mlx5e_poll_tx_cq() 361 ci = sqcc & sq->wq.sz_m1; in mlx5e_poll_tx_cq() 384 mlx5_cqwq_update_db_record(&cq->wq); in mlx5e_poll_tx_cq()
|
D | en.h | 295 struct mlx5_cqwq wq; member 310 struct mlx5_wq_ll wq; member 374 struct mlx5_wq_cyc wq; member 396 return (((sq->wq.sz_m1 & (sq->cc - sq->pc)) >= n) || in mlx5e_sq_has_room_for() 598 *sq->wq.db = cpu_to_be32(sq->pc); in mlx5e_tx_notify_hw() 623 mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc); in mlx5e_cq_arm()
|
D | health.c | 270 queue_work(health->wq, &health->work); in poll_health() 299 destroy_workqueue(health->wq); in mlx5_health_cleanup() 314 health->wq = create_singlethread_workqueue(name); in mlx5_health_init() 316 if (!health->wq) in mlx5_health_init()
|
D | transobj.c | 276 void *wq; in mlx5_core_arm_rmp() local 286 wq = MLX5_ADDR_OF(rmpc, rmpc, wq); in mlx5_core_arm_rmp() 290 MLX5_SET(wq, wq, lwm, lwm); in mlx5_core_arm_rmp()
|
D | Makefile | 6 mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o flow_table.o \
|
/linux-4.4.14/fs/autofs4/ |
D | waitq.c | 29 struct autofs_wait_queue *wq, *nwq; in autofs4_catatonic_mode() local 40 wq = sbi->queues; in autofs4_catatonic_mode() 42 while (wq) { in autofs4_catatonic_mode() 43 nwq = wq->next; in autofs4_catatonic_mode() 44 wq->status = -ENOENT; /* Magic is gone - report failure */ in autofs4_catatonic_mode() 45 kfree(wq->name.name); in autofs4_catatonic_mode() 46 wq->name.name = NULL; in autofs4_catatonic_mode() 47 wq->wait_ctr--; in autofs4_catatonic_mode() 48 wake_up_interruptible(&wq->queue); in autofs4_catatonic_mode() 49 wq = nwq; in autofs4_catatonic_mode() [all …]
|
/linux-4.4.14/fs/btrfs/ |
D | async-thread.c | 144 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq); 180 static inline void thresh_queue_hook(struct __btrfs_workqueue *wq) in thresh_queue_hook() argument 182 if (wq->thresh == NO_THRESHOLD) in thresh_queue_hook() 184 atomic_inc(&wq->pending); in thresh_queue_hook() 192 static inline void thresh_exec_hook(struct __btrfs_workqueue *wq) in thresh_exec_hook() argument 198 if (wq->thresh == NO_THRESHOLD) in thresh_exec_hook() 201 atomic_dec(&wq->pending); in thresh_exec_hook() 202 spin_lock(&wq->thres_lock); in thresh_exec_hook() 207 wq->count++; in thresh_exec_hook() 208 wq->count %= (wq->thresh / 4); in thresh_exec_hook() [all …]
|
D | async-thread.h | 39 struct __btrfs_workqueue *wq; member 78 void btrfs_queue_work(struct btrfs_workqueue *wq, 80 void btrfs_destroy_workqueue(struct btrfs_workqueue *wq); 81 void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max);
|
/linux-4.4.14/include/linux/ |
D | wait.h | 212 #define ___wait_event(wq, condition, state, exclusive, ret, cmd) \ argument 225 long __int = prepare_to_wait_event(&wq, &__wait, state);\ 233 abort_exclusive_wait(&wq, &__wait, \ 242 finish_wait(&wq, &__wait); \ 246 #define __wait_event(wq, condition) \ argument 247 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ 262 #define wait_event(wq, condition) \ argument 267 __wait_event(wq, condition); \ 270 #define __io_wait_event(wq, condition) \ argument 271 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ [all …]
|
D | workqueue.h | 118 struct workqueue_struct *wq; member 421 extern void destroy_workqueue(struct workqueue_struct *wq); 425 int apply_workqueue_attrs(struct workqueue_struct *wq, 429 extern bool queue_work_on(int cpu, struct workqueue_struct *wq, 431 extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 433 extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 436 extern void flush_workqueue(struct workqueue_struct *wq); 437 extern void drain_workqueue(struct workqueue_struct *wq); 450 extern void workqueue_set_max_active(struct workqueue_struct *wq, 453 extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); [all …]
|
D | freezer.h | 250 #define wait_event_freezekillable_unsafe(wq, condition) \ argument 254 __retval = wait_event_killable(wq, (condition)); \ 296 #define wait_event_freezekillable_unsafe(wq, condition) \ argument 297 wait_event_killable(wq, condition)
|
D | padata.h | 155 struct workqueue_struct *wq; member 168 struct workqueue_struct *wq); 169 extern struct padata_instance *padata_alloc(struct workqueue_struct *wq,
|
/linux-4.4.14/drivers/infiniband/hw/cxgb4/ |
D | t4.h | 344 static inline int t4_rqes_posted(struct t4_wq *wq) in t4_rqes_posted() argument 346 return wq->rq.in_use; in t4_rqes_posted() 349 static inline int t4_rq_empty(struct t4_wq *wq) in t4_rq_empty() argument 351 return wq->rq.in_use == 0; in t4_rq_empty() 354 static inline int t4_rq_full(struct t4_wq *wq) in t4_rq_full() argument 356 return wq->rq.in_use == (wq->rq.size - 1); in t4_rq_full() 359 static inline u32 t4_rq_avail(struct t4_wq *wq) in t4_rq_avail() argument 361 return wq->rq.size - 1 - wq->rq.in_use; in t4_rq_avail() 364 static inline void t4_rq_produce(struct t4_wq *wq, u8 len16) in t4_rq_produce() argument 366 wq->rq.in_use++; in t4_rq_produce() [all …]
|
D | cq.c | 183 static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq) in insert_recv_cqe() argument 188 wq, cq, cq->sw_cidx, cq->sw_pidx); in insert_recv_cqe() 194 CQE_QPID_V(wq->sq.qid)); in insert_recv_cqe() 200 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count) in c4iw_flush_rq() argument 203 int in_use = wq->rq.in_use - count; in c4iw_flush_rq() 207 wq, cq, wq->rq.in_use, count); in c4iw_flush_rq() 209 insert_recv_cqe(wq, cq); in c4iw_flush_rq() 215 static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq, in insert_sq_cqe() argument 221 wq, cq, cq->sw_cidx, cq->sw_pidx); in insert_sq_cqe() 227 CQE_QPID_V(wq->sq.qid)); in insert_sq_cqe() [all …]
|
D | qp.c | 149 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, in destroy_qp() argument 157 wq->rq.memsize, wq->rq.queue, in destroy_qp() 158 dma_unmap_addr(&wq->rq, mapping)); in destroy_qp() 159 dealloc_sq(rdev, &wq->sq); in destroy_qp() 160 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); in destroy_qp() 161 kfree(wq->rq.sw_rq); in destroy_qp() 162 kfree(wq->sq.sw_sq); in destroy_qp() 163 c4iw_put_qpid(rdev, wq->rq.qid, uctx); in destroy_qp() 164 c4iw_put_qpid(rdev, wq->sq.qid, uctx); in destroy_qp() 195 static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, in create_qp() argument [all …]
|
D | device.c | 117 void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe) in c4iw_log_wr_stats() argument 122 if (!wq->rdev->wr_log) in c4iw_log_wr_stats() 125 idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) & in c4iw_log_wr_stats() 126 (wq->rdev->wr_log_size - 1); in c4iw_log_wr_stats() 127 le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]); in c4iw_log_wr_stats() 132 le.qid = wq->sq.qid; in c4iw_log_wr_stats() 134 le.post_host_ts = wq->sq.sw_sq[wq->sq.cidx].host_ts; in c4iw_log_wr_stats() 135 le.post_sge_ts = wq->sq.sw_sq[wq->sq.cidx].sge_ts; in c4iw_log_wr_stats() 138 le.qid = wq->rq.qid; in c4iw_log_wr_stats() 140 le.post_host_ts = wq->rq.sw_rq[wq->rq.cidx].host_ts; in c4iw_log_wr_stats() [all …]
|
/linux-4.4.14/kernel/ |
D | workqueue.c | 200 struct workqueue_struct *wq; /* I: the owning workqueue */ member 335 static void workqueue_sysfs_unregister(struct workqueue_struct *wq); 345 #define assert_rcu_or_wq_mutex(wq) \ argument 347 !lockdep_is_held(&wq->mutex), \ 350 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ argument 352 !lockdep_is_held(&wq->mutex) && \ 405 #define for_each_pwq(pwq, wq) \ argument 406 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \ 407 if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \ 567 static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq, in unbound_pwq_by_node() argument [all …]
|
D | cpu.c | 64 wait_queue_head_t wq; member 78 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq), 116 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq)) in put_online_cpus() 117 wake_up(&cpu_hotplug.wq); in put_online_cpus() 155 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE); in cpu_hotplug_begin() 161 finish_wait(&cpu_hotplug.wq, &wait); in cpu_hotplug_begin()
|
D | padata.c | 143 queue_work_on(target_cpu, pinst->wq, &queue->work); in padata_do_parallel() 266 queue_work_on(cb_cpu, pinst->wq, &squeue->work); in padata_reorder() 1022 struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq) in padata_alloc_possible() argument 1024 return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask); in padata_alloc_possible() 1036 struct padata_instance *padata_alloc(struct workqueue_struct *wq, in padata_alloc() argument 1064 pinst->wq = wq; in padata_alloc()
|
/linux-4.4.14/drivers/infiniband/hw/cxgb3/ |
D | cxio_hal.c | 275 struct t3_wq *wq, struct cxio_ucontext *uctx) in cxio_create_qp() argument 277 int depth = 1UL << wq->size_log2; in cxio_create_qp() 278 int rqsize = 1UL << wq->rq_size_log2; in cxio_create_qp() 280 wq->qpid = get_qpid(rdev_p, uctx); in cxio_create_qp() 281 if (!wq->qpid) in cxio_create_qp() 284 wq->rq = kzalloc(depth * sizeof(struct t3_swrq), GFP_KERNEL); in cxio_create_qp() 285 if (!wq->rq) in cxio_create_qp() 288 wq->rq_addr = cxio_hal_rqtpool_alloc(rdev_p, rqsize); in cxio_create_qp() 289 if (!wq->rq_addr) in cxio_create_qp() 292 wq->sq = kzalloc(depth * sizeof(struct t3_swsq), GFP_KERNEL); in cxio_create_qp() [all …]
|
D | iwch_qp.c | 150 u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq) in build_memreg() argument 175 wqe = (union t3_wr *)(wq->queue + in build_memreg() 176 Q_PTR2IDX((wq->wptr+1), wq->size_log2)); in build_memreg() 178 Q_GENBIT(wq->wptr + 1, wq->size_log2), in build_memreg() 281 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr, in build_rdma_recv() 282 qhp->wq.rq_size_log2)].wr_id = wr->wr_id; in build_rdma_recv() 283 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr, in build_rdma_recv() 284 qhp->wq.rq_size_log2)].pbl_addr = 0; in build_rdma_recv() 344 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr, in build_zero_stag_recv() 345 qhp->wq.rq_size_log2)].wr_id = wr->wr_id; in build_zero_stag_recv() [all …]
|
D | iwch_cq.c | 49 struct t3_wq *wq; in iwch_poll_cq_one() local 62 wq = NULL; in iwch_poll_cq_one() 65 wq = &(qhp->wq); in iwch_poll_cq_one() 67 ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, in iwch_poll_cq_one() 190 if (wq) in iwch_poll_cq_one()
|
D | cxio_hal.h | 165 int cxio_create_qp(struct cxio_rdev *rdev, u32 kernel_domain, struct t3_wq *wq, 167 int cxio_destroy_qp(struct cxio_rdev *rdev, struct t3_wq *wq, 190 int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count); 191 int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count); 192 void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count); 193 void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count); 195 int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
|
D | iwch_ev.c | 66 qhp->attr.state, qhp->wq.qpid, CQE_STATUS(rsp_msg->cqe)); in post_qp_event() 141 __func__, qhp->wq.qpid, qhp->ep); in iwch_ev_dispatch() 145 qhp->wq.qpid); in iwch_ev_dispatch() 222 CQE_STATUS(rsp_msg->cqe), qhp->wq.qpid); in iwch_ev_dispatch()
|
D | iwch_provider.c | 901 remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid); in iwch_destroy_qp() 908 cxio_destroy_qp(&rhp->rdev, &qhp->wq, in iwch_destroy_qp() 912 ib_qp, qhp->wq.qpid, qhp); in iwch_destroy_qp() 976 qhp->wq.size_log2 = ilog2(wqsize); in iwch_create_qp() 977 qhp->wq.rq_size_log2 = ilog2(rqsize); in iwch_create_qp() 978 qhp->wq.sq_size_log2 = ilog2(sqsize); in iwch_create_qp() 979 if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq, in iwch_create_qp() 1016 if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) { in iwch_create_qp() 1017 cxio_destroy_qp(&rhp->rdev, &qhp->wq, in iwch_create_qp() 1040 uresp.qpid = qhp->wq.qpid; in iwch_create_qp() [all …]
|
D | cxio_wr.h | 747 static inline void cxio_set_wq_in_error(struct t3_wq *wq) in cxio_set_wq_in_error() argument 749 wq->queue->wq_in_err.err |= 1; in cxio_set_wq_in_error() 752 static inline void cxio_disable_wq_db(struct t3_wq *wq) in cxio_disable_wq_db() argument 754 wq->queue->wq_in_err.err |= 2; in cxio_disable_wq_db() 757 static inline void cxio_enable_wq_db(struct t3_wq *wq) in cxio_enable_wq_db() argument 759 wq->queue->wq_in_err.err &= ~2; in cxio_enable_wq_db() 762 static inline int cxio_wq_db_enabled(struct t3_wq *wq) in cxio_wq_db_enabled() argument 764 return !(wq->queue->wq_in_err.err & 2); in cxio_wq_db_enabled()
|
D | iwch.c | 70 cxio_disable_wq_db(&qhp->wq); in disable_qp_db() 79 ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell, qhp->wq.qpid); in enable_qp_db() 80 cxio_enable_wq_db(&qhp->wq); in enable_qp_db()
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
D | srq.c | 69 struct hfi1_rwq *wq; in hfi1_post_srq_receive() local 85 wq = srq->rq.wq; in hfi1_post_srq_receive() 86 next = wq->head + 1; in hfi1_post_srq_receive() 89 if (next == wq->tail) { in hfi1_post_srq_receive() 96 wqe = get_rwqe_ptr(&srq->rq, wq->head); in hfi1_post_srq_receive() 103 wq->head = next; in hfi1_post_srq_receive() 153 srq->rq.wq = vmalloc_user(sizeof(struct hfi1_rwq) + srq->rq.size * sz); in hfi1_create_srq() 154 if (!srq->rq.wq) { in hfi1_create_srq() 169 srq->rq.wq); in hfi1_create_srq() 188 srq->rq.wq->head = 0; in hfi1_create_srq() [all …]
|
D | qp.c | 396 if (qp->r_rq.wq) { in reset_qp() 397 qp->r_rq.wq->head = 0; in reset_qp() 398 qp->r_rq.wq->tail = 0; in reset_qp() 513 if (qp->r_rq.wq) { in hfi1_error_qp() 514 struct hfi1_rwq *wq; in hfi1_error_qp() local 521 wq = qp->r_rq.wq; in hfi1_error_qp() 522 head = wq->head; in hfi1_error_qp() 525 tail = wq->tail; in hfi1_error_qp() 534 wq->tail = tail; in hfi1_error_qp() 960 struct hfi1_rwq *wq = qp->r_rq.wq; in hfi1_compute_aeth() local [all …]
|
D | iowait.h | 156 struct workqueue_struct *wq) in iowait_schedule() argument 158 queue_work(wq, &wait->iowork); in iowait_schedule()
|
D | ruc.c | 161 struct hfi1_rwq *wq; in hfi1_get_rwqe() local 184 wq = rq->wq; in hfi1_get_rwqe() 185 tail = wq->tail; in hfi1_get_rwqe() 189 if (unlikely(tail == wq->head)) { in hfi1_get_rwqe() 203 wq->tail = tail; in hfi1_get_rwqe() 219 n = wq->head; in hfi1_get_rwqe()
|
/linux-4.4.14/drivers/infiniband/hw/qib/ |
D | qib_srq.c | 52 struct qib_rwq *wq; in qib_post_srq_receive() local 68 wq = srq->rq.wq; in qib_post_srq_receive() 69 next = wq->head + 1; in qib_post_srq_receive() 72 if (next == wq->tail) { in qib_post_srq_receive() 79 wqe = get_rwqe_ptr(&srq->rq, wq->head); in qib_post_srq_receive() 86 wq->head = next; in qib_post_srq_receive() 136 srq->rq.wq = vmalloc_user(sizeof(struct qib_rwq) + srq->rq.size * sz); in qib_create_srq() 137 if (!srq->rq.wq) { in qib_create_srq() 152 srq->rq.wq); in qib_create_srq() 171 srq->rq.wq->head = 0; in qib_create_srq() [all …]
|
D | qib_qp.c | 411 if (qp->r_rq.wq) { in qib_reset_qp() 412 qp->r_rq.wq->head = 0; in qib_reset_qp() 413 qp->r_rq.wq->tail = 0; in qib_reset_qp() 529 if (qp->r_rq.wq) { in qib_error_qp() 530 struct qib_rwq *wq; in qib_error_qp() local 537 wq = qp->r_rq.wq; in qib_error_qp() 538 head = wq->head; in qib_error_qp() 541 tail = wq->tail; in qib_error_qp() 550 wq->tail = tail; in qib_error_qp() 924 struct qib_rwq *wq = qp->r_rq.wq; in qib_compute_aeth() local [all …]
|
D | qib_ruc.c | 143 struct qib_rwq *wq; in qib_get_rwqe() local 166 wq = rq->wq; in qib_get_rwqe() 167 tail = wq->tail; in qib_get_rwqe() 171 if (unlikely(tail == wq->head)) { in qib_get_rwqe() 185 wq->tail = tail; in qib_get_rwqe() 201 n = wq->head; in qib_get_rwqe()
|
/linux-4.4.14/drivers/staging/rdma/ipath/ |
D | ipath_srq.c | 52 struct ipath_rwq *wq; in ipath_post_srq_receive() local 68 wq = srq->rq.wq; in ipath_post_srq_receive() 69 next = wq->head + 1; in ipath_post_srq_receive() 72 if (next == wq->tail) { in ipath_post_srq_receive() 79 wqe = get_rwqe_ptr(&srq->rq, wq->head); in ipath_post_srq_receive() 86 wq->head = next; in ipath_post_srq_receive() 139 srq->rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + srq->rq.size * sz); in ipath_create_srq() 140 if (!srq->rq.wq) { in ipath_create_srq() 156 srq->rq.wq); in ipath_create_srq() 175 srq->rq.wq->head = 0; in ipath_create_srq() [all …]
|
D | ipath_qp.c | 359 if (qp->r_rq.wq) { in ipath_reset_qp() 360 qp->r_rq.wq->head = 0; in ipath_reset_qp() 361 qp->r_rq.wq->tail = 0; in ipath_reset_qp() 409 if (qp->r_rq.wq) { in ipath_error_qp() 410 struct ipath_rwq *wq; in ipath_error_qp() local 417 wq = qp->r_rq.wq; in ipath_error_qp() 418 head = wq->head; in ipath_error_qp() 421 tail = wq->tail; in ipath_error_qp() 430 wq->tail = tail; in ipath_error_qp() 689 struct ipath_rwq *wq = qp->r_rq.wq; in ipath_compute_aeth() local [all …]
|
D | ipath_ud.c | 59 struct ipath_rwq *wq; in ipath_ud_loopback() local 122 wq = rq->wq; in ipath_ud_loopback() 123 tail = wq->tail; in ipath_ud_loopback() 127 if (unlikely(tail == wq->head)) { in ipath_ud_loopback() 147 wq->tail = tail; in ipath_ud_loopback() 156 n = wq->head; in ipath_ud_loopback()
|
D | ipath_ruc.c | 169 struct ipath_rwq *wq; in ipath_get_rwqe() local 192 wq = rq->wq; in ipath_get_rwqe() 193 tail = wq->tail; in ipath_get_rwqe() 198 if (unlikely(tail == wq->head)) { in ipath_get_rwqe() 212 wq->tail = tail; in ipath_get_rwqe() 223 n = wq->head; in ipath_get_rwqe()
|
D | ipath_verbs.h | 316 struct ipath_rwqe wq[0]; member 320 struct ipath_rwq *wq; member 503 ((char *) rq->wq->wq + in get_rwqe_ptr()
|
/linux-4.4.14/lib/raid6/ |
D | neon.uc | 59 register unative_t wd$$, wq$$, wp$$, w1$$, w2$$; 67 wq$$ = wp$$ = vld1q_u8(&dptr[z0][d+$$*NSIZE]); 71 w2$$ = MASK(wq$$); 72 w1$$ = SHLBYTE(wq$$); 76 wq$$ = veorq_u8(w1$$, wd$$); 79 vst1q_u8(&q[d+NSIZE*$$], wq$$); 90 register unative_t wd$$, wq$$, wp$$, w1$$, w2$$; 98 wq$$ = vld1q_u8(&dptr[z0][d+$$*NSIZE]); 99 wp$$ = veorq_u8(vld1q_u8(&p[d+$$*NSIZE]), wq$$); 105 w2$$ = MASK(wq$$); [all …]
|
D | int.uc | 88 unative_t wd$$, wq$$, wp$$, w1$$, w2$$; 95 wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; 99 w2$$ = MASK(wq$$); 100 w1$$ = SHLBYTE(wq$$); 103 wq$$ = w1$$ ^ wd$$; 106 *(unative_t *)&q[d+NSIZE*$$] = wq$$; 117 unative_t wd$$, wq$$, wp$$, w1$$, w2$$; 125 wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; 129 w2$$ = MASK(wq$$); 130 w1$$ = SHLBYTE(wq$$); [all …]
|
D | tilegx.uc | 56 u64 wd$$, wq$$, wp$$, w1$$, w2$$; 66 wq$$ = wp$$ = *z0ptr++; 70 w2$$ = MASK(wq$$); 71 w1$$ = SHLBYTE(wq$$); 74 wq$$ = w1$$ ^ wd$$; 77 *q++ = wq$$;
|
D | altivec.uc | 74 unative_t wd$$, wq$$, wp$$, w1$$, w2$$; 82 wq$$ = wp$$ = *(unative_t *)&dptr[z0][d+$$*NSIZE]; 86 w2$$ = MASK(wq$$); 87 w1$$ = SHLBYTE(wq$$); 90 wq$$ = vec_xor(w1$$, wd$$); 93 *(unative_t *)&q[d+NSIZE*$$] = wq$$;
|
/linux-4.4.14/Documentation/ |
D | workqueue.txt | 21 is needed and the workqueue (wq) API is the most commonly used 37 In the original wq implementation, a multi threaded (MT) wq had one 38 worker thread per CPU and a single threaded (ST) wq had one worker 39 thread system-wide. A single MT wq needed to keep around the same 41 wq users over the years and with the number of CPU cores continuously 45 Although MT wq wasted a lot of resource, the level of concurrency 47 MT wq albeit less severe on MT. Each wq maintained its own separate 48 worker pool. A MT wq could provide only one execution context per CPU 49 while a ST wq one for the whole system. Work items had to compete for 55 choosing to use ST wq for polling PIOs and accepting an unnecessary [all …]
|
/linux-4.4.14/drivers/md/bcache/ |
D | closure.h | 151 struct workqueue_struct *wq; member 239 struct workqueue_struct *wq) in set_closure_fn() argument 244 cl->wq = wq; in set_closure_fn() 251 struct workqueue_struct *wq = cl->wq; in closure_queue() local 252 if (wq) { in closure_queue() 254 BUG_ON(!queue_work(wq, &cl->work)); in closure_queue() 376 struct workqueue_struct *wq, in closure_call() argument 380 continue_at_nobarrier(cl, fn, wq); in closure_call()
|
D | movinggc.c | 116 continue_at(cl, write_moving_finish, op->wq); in write_moving() 126 continue_at(cl, write_moving, io->op.wq); in read_moving_submit() 161 io->op.wq = c->moving_gc_wq; in read_moving()
|
D | request.h | 8 struct workqueue_struct *wq; member
|
D | request.c | 92 continue_at(cl, bch_data_insert_start, op->wq); in bch_data_insert_keys() 143 continue_at(cl, bch_data_insert_keys, op->wq); in bch_data_invalidate() 186 set_closure_fn(cl, bch_data_insert_error, op->wq); in bch_data_insert_endio() 222 continue_at(cl, bch_data_insert_keys, op->wq); in bch_data_insert_start() 261 continue_at(cl, bch_data_insert_keys, op->wq); in bch_data_insert_start() 291 continue_at(cl, bch_data_insert_keys, op->wq); in bch_data_insert_start() 672 s->iop.wq = bcache_wq; in search_alloc()
|
/linux-4.4.14/kernel/sched/ |
D | wait.c | 387 __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q, in __wait_on_bit() argument 393 prepare_to_wait(wq, &q->wait, mode); in __wait_on_bit() 397 finish_wait(wq, &q->wait); in __wait_on_bit() 405 wait_queue_head_t *wq = bit_waitqueue(word, bit); in out_of_line_wait_on_bit() local 408 return __wait_on_bit(wq, &wait, action, mode); in out_of_line_wait_on_bit() 416 wait_queue_head_t *wq = bit_waitqueue(word, bit); in out_of_line_wait_on_bit_timeout() local 420 return __wait_on_bit(wq, &wait, action, mode); in out_of_line_wait_on_bit_timeout() 425 __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q, in __wait_on_bit_lock() argument 431 prepare_to_wait_exclusive(wq, &q->wait, mode); in __wait_on_bit_lock() 437 abort_exclusive_wait(wq, &q->wait, mode, &q->key); in __wait_on_bit_lock() [all …]
|
/linux-4.4.14/drivers/staging/lustre/lustre/include/ |
D | lustre_lib.h | 528 #define __l_wait_event(wq, condition, info, ret, l_add_wait) \ argument 540 l_add_wait(&wq, &__wait); \ 601 remove_wait_queue(&wq, &__wait); \ 604 #define l_wait_event(wq, condition, info) \ argument 609 __l_wait_event(wq, condition, __info, \ 614 #define l_wait_event_exclusive(wq, condition, info) \ argument 619 __l_wait_event(wq, condition, __info, \ 624 #define l_wait_event_exclusive_head(wq, condition, info) \ argument 629 __l_wait_event(wq, condition, __info, \ 634 #define l_wait_condition(wq, condition) \ argument [all …]
|
/linux-4.4.14/net/core/ |
D | stream.c | 31 struct socket_wq *wq; in sk_stream_write_space() local 37 wq = rcu_dereference(sk->sk_wq); in sk_stream_write_space() 38 if (wq_has_sleeper(wq)) in sk_stream_write_space() 39 wake_up_interruptible_poll(&wq->wait, POLLOUT | in sk_stream_write_space() 41 if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) in sk_stream_write_space() 42 sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); in sk_stream_write_space()
|
D | sock.c | 1982 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, in __lock_sock() 1990 finish_wait(&sk->sk_lock.wq, &wait); in __lock_sock() 2280 struct socket_wq *wq; in sock_def_wakeup() local 2283 wq = rcu_dereference(sk->sk_wq); in sock_def_wakeup() 2284 if (wq_has_sleeper(wq)) in sock_def_wakeup() 2285 wake_up_interruptible_all(&wq->wait); in sock_def_wakeup() 2291 struct socket_wq *wq; in sock_def_error_report() local 2294 wq = rcu_dereference(sk->sk_wq); in sock_def_error_report() 2295 if (wq_has_sleeper(wq)) in sock_def_error_report() 2296 wake_up_interruptible_poll(&wq->wait, POLLERR); in sock_def_error_report() [all …]
|
/linux-4.4.14/drivers/net/ethernet/cavium/liquidio/ |
D | response_manager.c | 58 oct->dma_comp_wq.wq = create_workqueue("dma-comp"); in octeon_setup_response_list() 59 if (!oct->dma_comp_wq.wq) { in octeon_setup_response_list() 67 queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100)); in octeon_setup_response_list() 75 flush_workqueue(oct->dma_comp_wq.wq); in octeon_delete_response_list() 76 destroy_workqueue(oct->dma_comp_wq.wq); in octeon_delete_response_list() 177 queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100)); in oct_poll_req_completion()
|
D | request_manager.c | 147 oct->check_db_wq[iq_no].wq = create_workqueue("check_iq_db"); in octeon_init_instr_queue() 148 if (!oct->check_db_wq[iq_no].wq) { in octeon_init_instr_queue() 160 queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1)); in octeon_init_instr_queue() 171 flush_workqueue(oct->check_db_wq[iq_no].wq); in octeon_delete_instr_queue() 172 destroy_workqueue(oct->check_db_wq[iq_no].wq); in octeon_delete_instr_queue() 515 queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1)); in check_db_timeout()
|
/linux-4.4.14/drivers/gpu/drm/radeon/ |
D | radeon_sa.c | 56 init_waitqueue_head(&sa_manager->wq); in radeon_sa_bo_manager_init() 333 spin_lock(&sa_manager->wq.lock); in radeon_sa_bo_new() 345 spin_unlock(&sa_manager->wq.lock); in radeon_sa_bo_new() 355 spin_unlock(&sa_manager->wq.lock); in radeon_sa_bo_new() 359 spin_lock(&sa_manager->wq.lock); in radeon_sa_bo_new() 363 sa_manager->wq, in radeon_sa_bo_new() 370 spin_unlock(&sa_manager->wq.lock); in radeon_sa_bo_new() 386 spin_lock(&sa_manager->wq.lock); in radeon_sa_bo_free() 394 wake_up_all_locked(&sa_manager->wq); in radeon_sa_bo_free() 395 spin_unlock(&sa_manager->wq.lock); in radeon_sa_bo_free() [all …]
|
/linux-4.4.14/drivers/power/ |
D | ipaq_micro_battery.c | 43 struct workqueue_struct *wq; member 91 queue_delayed_work(mb->wq, &mb->update, msecs_to_jiffies(BATT_PERIOD)); in micro_battery_work() 238 mb->wq = create_singlethread_workqueue("ipaq-battery-wq"); in micro_batt_probe() 239 if (!mb->wq) in micro_batt_probe() 244 queue_delayed_work(mb->wq, &mb->update, 1); in micro_batt_probe() 267 destroy_workqueue(mb->wq); in micro_batt_probe() 279 destroy_workqueue(mb->wq); in micro_batt_remove() 296 queue_delayed_work(mb->wq, &mb->update, msecs_to_jiffies(BATT_PERIOD)); in micro_batt_resume()
|
/linux-4.4.14/drivers/hid/ |
D | hid-elo.c | 35 static struct workqueue_struct *wq; variable 177 queue_delayed_work(wq, &priv->work, ELO_PERIODIC_READ_INTERVAL); in elo_work() 250 queue_delayed_work(wq, &priv->work, ELO_PERIODIC_READ_INTERVAL); in elo_probe() 264 flush_workqueue(wq); in elo_remove() 288 wq = create_singlethread_workqueue("elousb"); in elo_driver_init() 289 if (!wq) in elo_driver_init() 294 destroy_workqueue(wq); in elo_driver_init() 303 destroy_workqueue(wq); in elo_driver_exit()
|
/linux-4.4.14/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_sa.c | 56 init_waitqueue_head(&sa_manager->wq); in amdgpu_sa_bo_manager_init() 336 spin_lock(&sa_manager->wq.lock); in amdgpu_sa_bo_new() 348 spin_unlock(&sa_manager->wq.lock); in amdgpu_sa_bo_new() 360 spin_unlock(&sa_manager->wq.lock); in amdgpu_sa_bo_new() 367 spin_lock(&sa_manager->wq.lock); in amdgpu_sa_bo_new() 371 sa_manager->wq, in amdgpu_sa_bo_new() 378 spin_unlock(&sa_manager->wq.lock); in amdgpu_sa_bo_new() 394 spin_lock(&sa_manager->wq.lock); in amdgpu_sa_bo_free() 403 wake_up_all_locked(&sa_manager->wq); in amdgpu_sa_bo_free() 404 spin_unlock(&sa_manager->wq.lock); in amdgpu_sa_bo_free() [all …]
|
/linux-4.4.14/fs/jfs/ |
D | jfs_lock.h | 35 #define __SLEEP_COND(wq, cond, lock_cmd, unlock_cmd) \ argument 39 add_wait_queue(&wq, &__wait); \ 49 remove_wait_queue(&wq, &__wait); \
|
/linux-4.4.14/fs/ |
D | userfaultfd.c | 64 wait_queue_t wq; member 73 static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode, in userfaultfd_wake_function() argument 81 uwq = container_of(wq, struct userfaultfd_wait_queue, wq); in userfaultfd_wake_function() 89 ret = wake_up_state(wq->private, mode); in userfaultfd_wake_function() 105 list_del_init(&wq->task_list); in userfaultfd_wake_function() 333 init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function); in handle_userfault() 334 uwq.wq.private = current; in handle_userfault() 346 __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq); in handle_userfault() 406 if (!list_empty_careful(&uwq.wq.task_list)) { in handle_userfault() 412 list_del(&uwq.wq.task_list); in handle_userfault() [all …]
|
D | eventpoll.c | 193 wait_queue_head_t wq; member 506 static void ep_poll_safewake(wait_queue_head_t *wq) in ep_poll_safewake() argument 511 ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu); in ep_poll_safewake() 665 if (waitqueue_active(&ep->wq)) in ep_scan_ready_list() 666 wake_up_locked(&ep->wq); in ep_scan_ready_list() 949 init_waitqueue_head(&ep->wq); in ep_alloc() 1069 if (waitqueue_active(&ep->wq)) in ep_poll_callback() 1070 wake_up_locked(&ep->wq); in ep_poll_callback() 1343 if (waitqueue_active(&ep->wq)) in ep_insert() 1344 wake_up_locked(&ep->wq); in ep_insert() [all …]
|
/linux-4.4.14/drivers/usb/chipidea/ |
D | otg.c | 161 ci->wq = create_freezable_workqueue("ci_otg"); in ci_hdrc_otg_init() 162 if (!ci->wq) { in ci_hdrc_otg_init() 179 if (ci->wq) { in ci_hdrc_otg_destroy() 180 flush_workqueue(ci->wq); in ci_hdrc_otg_destroy() 181 destroy_workqueue(ci->wq); in ci_hdrc_otg_destroy()
|
D | otg.h | 23 queue_work(ci->wq, &ci->work); in ci_otg_queue_work()
|
/linux-4.4.14/drivers/infiniband/hw/mlx5/ |
D | cq.c | 103 static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx) in get_umr_comp() argument 105 switch (wq->wr_data[idx]) { in get_umr_comp() 122 struct mlx5_ib_wq *wq, int idx) in handle_good_req() argument 161 wc->opcode = get_umr_comp(wq, idx); in handle_good_req() 176 struct mlx5_ib_wq *wq; in handle_responder() local 198 wq = &qp->rq; in handle_responder() 199 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; in handle_responder() 200 ++wq->tail; in handle_responder() 418 struct mlx5_ib_wq *wq; in mlx5_poll_one() local 474 wq = &(*cur_qp)->sq; in mlx5_poll_one() [all …]
|
D | mr.c | 372 queue_delayed_work(cache->wq, &ent->dwork, in __cache_work_func() 377 queue_delayed_work(cache->wq, &ent->dwork, in __cache_work_func() 380 queue_work(cache->wq, &ent->work); in __cache_work_func() 400 queue_work(cache->wq, &ent->work); in __cache_work_func() 402 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); in __cache_work_func() 450 queue_work(cache->wq, &ent->work); in alloc_cached_mr() 455 queue_work(cache->wq, &ent->work); in alloc_cached_mr() 485 queue_work(cache->wq, &ent->work); in free_cached_mr() 582 cache->wq = create_singlethread_workqueue("mkey_cache"); in mlx5_mr_cache_init() 583 if (!cache->wq) { in mlx5_mr_cache_init() [all …]
|
D | qp.c | 121 struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq; in mlx5_ib_read_user_wqe() local 129 if (wq->wqe_cnt == 0) { in mlx5_ib_read_user_wqe() 135 offset = wq->offset + ((wqe_index % wq->wqe_cnt) << wq->wqe_shift); in mlx5_ib_read_user_wqe() 136 wq_end = wq->offset + (wq->wqe_cnt << wq->wqe_shift); in mlx5_ib_read_user_wqe() 156 wqe_length = 1 << wq->wqe_shift; in mlx5_ib_read_user_wqe() 162 ret = ib_umem_copy_from(buffer + first_copy_length, umem, wq->offset, in mlx5_ib_read_user_wqe() 1813 static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq) in mlx5_wq_overflow() argument 1818 cur = wq->head - wq->tail; in mlx5_wq_overflow() 1819 if (likely(cur + nreq < wq->max_post)) in mlx5_wq_overflow() 1824 cur = wq->head - wq->tail; in mlx5_wq_overflow() [all …]
|
/linux-4.4.14/drivers/i2c/busses/ |
D | i2c-taos-evm.c | 38 static DECLARE_WAIT_QUEUE_HEAD(wq); 112 wait_event_interruptible_timeout(wq, taos->state == TAOS_STATE_IDLE, in taos_smbus_xfer() 163 wake_up_interruptible(&wq); in taos_interrupt() 168 wake_up_interruptible(&wq); in taos_interrupt() 175 wake_up_interruptible(&wq); in taos_interrupt() 228 wait_event_interruptible_timeout(wq, taos->state == TAOS_STATE_IDLE, in taos_connect() 250 wait_event_interruptible_timeout(wq, taos->state == TAOS_STATE_IDLE, in taos_connect()
|
D | i2c-ibm_iic.h | 48 wait_queue_head_t wq; member
|
/linux-4.4.14/drivers/infiniband/ulp/ipoib/ |
D | ipoib_verbs.c | 159 priv->wq = create_singlethread_workqueue("ipoib_wq"); in ipoib_transport_dev_init() 160 if (!priv->wq) { in ipoib_transport_dev_init() 248 destroy_workqueue(priv->wq); in ipoib_transport_dev_init() 249 priv->wq = NULL; in ipoib_transport_dev_init() 277 if (priv->wq) { in ipoib_transport_dev_cleanup() 278 flush_workqueue(priv->wq); in ipoib_transport_dev_cleanup() 279 destroy_workqueue(priv->wq); in ipoib_transport_dev_cleanup() 280 priv->wq = NULL; in ipoib_transport_dev_cleanup()
|
D | ipoib_multicast.c | 97 queue_delayed_work(priv->wq, &priv->mcast_task, 0); in __ipoib_mcast_schedule_join_thread() 104 queue_delayed_work(priv->wq, &priv->mcast_task, HZ); in __ipoib_mcast_schedule_join_thread() 106 queue_delayed_work(priv->wq, &priv->mcast_task, 0); in __ipoib_mcast_schedule_join_thread() 391 queue_work(priv->wq, &priv->carrier_on_task); in ipoib_mcast_join_complete() 649 queue_delayed_work(priv->wq, &priv->mcast_task, in ipoib_mcast_join_task() 685 flush_workqueue(priv->wq); in ipoib_mcast_stop_thread()
|
/linux-4.4.14/drivers/mtd/chips/ |
D | cfi_cmdset_0020.c | 159 init_waitqueue_head(&(cfi->chips[i].wq)); in cfi_cmdset_0020() 297 wake_up(&chip->wq); in do_read_onechip() 352 add_wait_queue(&chip->wq, &wait); in do_read_onechip() 355 remove_wait_queue(&chip->wq, &wait); in do_read_onechip() 377 wake_up(&chip->wq); in do_read_onechip() 485 add_wait_queue(&chip->wq, &wait); in do_write_buffer() 488 remove_wait_queue(&chip->wq, &wait); in do_write_buffer() 542 add_wait_queue(&chip->wq, &wait); in do_write_buffer() 545 remove_wait_queue(&chip->wq, &wait); in do_write_buffer() 595 wake_up(&chip->wq); in do_write_buffer() [all …]
|
D | cfi_cmdset_0002.c | 666 init_waitqueue_head(&(cfi->chips[i].wq)); in cfi_cmdset_0002() 872 add_wait_queue(&chip->wq, &wait); in get_chip() 875 remove_wait_queue(&chip->wq, &wait); in get_chip() 907 wake_up(&chip->wq); in put_chip() 1022 add_wait_queue(&chip->wq, &wait); in xip_udelay() 1025 remove_wait_queue(&chip->wq, &wait); in xip_udelay() 1222 add_wait_queue(&chip->wq, &wait); in do_read_secsi_onechip() 1227 remove_wait_queue(&chip->wq, &wait); in do_read_secsi_onechip() 1241 wake_up(&chip->wq); in do_read_secsi_onechip() 1619 add_wait_queue(&chip->wq, &wait); in do_write_oneword() [all …]
|
D | cfi_cmdset_0001.c | 573 init_waitqueue_head(&(cfi->chips[i].wq)); in cfi_cmdset_0001() 762 init_waitqueue_head(&chip->wq); in cfi_intelext_partition_fixup() 883 add_wait_queue(&chip->wq, &wait); in chip_ready() 886 remove_wait_queue(&chip->wq, &wait); in chip_ready() 967 add_wait_queue(&chip->wq, &wait); in get_chip() 970 remove_wait_queue(&chip->wq, &wait); in get_chip() 1007 wake_up(&chip->wq); in put_chip() 1021 wake_up(&chip->wq); in put_chip() 1056 wake_up(&chip->wq); in put_chip() 1187 add_wait_queue(&chip->wq, &wait); in xip_wait_for_operation() [all …]
|
/linux-4.4.14/drivers/infiniband/hw/mlx4/ |
D | cq.c | 610 struct mlx4_ib_wq *wq; in mlx4_ib_qp_sw_comp() local 614 wq = is_send ? &qp->sq : &qp->rq; in mlx4_ib_qp_sw_comp() 615 cur = wq->head - wq->tail; in mlx4_ib_qp_sw_comp() 621 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; in mlx4_ib_qp_sw_comp() 624 wq->tail++; in mlx4_ib_qp_sw_comp() 662 struct mlx4_ib_wq *wq; in mlx4_ib_poll_one() local 749 wq = &(*cur_qp)->sq; in mlx4_ib_poll_one() 752 wq->tail += (u16) (wqe_ctr - (u16) wq->tail); in mlx4_ib_poll_one() 754 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; in mlx4_ib_poll_one() 755 ++wq->tail; in mlx4_ib_poll_one() [all …]
|
D | alias_GUID.c | 438 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq, in aliasguid_query_handler() 570 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq, in set_guid_rec() 632 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq, in mlx4_ib_invalidate_all_guid_record() 791 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq, in mlx4_ib_init_alias_guid_work() 827 flush_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); in mlx4_ib_destroy_alias_guid_service() 828 destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); in mlx4_ib_destroy_alias_guid_service() 883 dev->sriov.alias_guid.ports_guid[i].wq = in mlx4_ib_init_alias_guid_service() 885 if (!dev->sriov.alias_guid.ports_guid[i].wq) { in mlx4_ib_init_alias_guid_service() 896 destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); in mlx4_ib_init_alias_guid_service() 897 dev->sriov.alias_guid.ports_guid[i].wq = NULL; in mlx4_ib_init_alias_guid_service()
|
D | mad.c | 1131 queue_work(ctx->wq, &ctx->work); in mlx4_ib_tunnel_comp_handler() 1868 ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq; in create_pv_resources() 1879 ctx->wq = NULL; in create_pv_resources() 1916 flush_workqueue(ctx->wq); in destroy_pv_resources() 2013 ctx->wq = create_singlethread_workqueue(name); in mlx4_ib_alloc_demux_ctx() 2014 if (!ctx->wq) { in mlx4_ib_alloc_demux_ctx() 2031 destroy_workqueue(ctx->wq); in mlx4_ib_alloc_demux_ctx() 2032 ctx->wq = NULL; in mlx4_ib_alloc_demux_ctx() 2048 flush_workqueue(sqp_ctx->wq); in mlx4_ib_free_sqp_ctx() 2077 flush_workqueue(ctx->wq); in mlx4_ib_free_demux_ctx() [all …]
|
/linux-4.4.14/drivers/staging/most/aim-cdev/ |
D | cdev.c | 34 wait_queue_head_t wq; member 131 wake_up_interruptible(&channel->wq); in aim_close() 143 wake_up_interruptible(&channel->wq); in aim_close() 177 channel->wq, in aim_write() 238 if (wait_event_interruptible(channel->wq, in aim_read() 285 poll_wait(filp, &c->wq, wait); in aim_poll() 345 wake_up_interruptible(&channel->wq); in aim_disconnect_channel() 373 wake_up_interruptible(&channel->wq); in aim_rx_completion() 400 wake_up_interruptible(&channel->wq); in aim_tx_completion() 460 init_waitqueue_head(&channel->wq); in aim_probe()
|
/linux-4.4.14/include/trace/events/ |
D | btrfs.h | 997 __field( void *, wq ) 1006 __entry->wq = work->wq; 1015 __entry->work, __entry->normal_work, __entry->wq, 1067 TP_PROTO(struct __btrfs_workqueue *wq, const char *name, int high), 1069 TP_ARGS(wq, name, high), 1072 __field( void *, wq ) 1078 __entry->wq = wq; 1086 __entry->wq) 1091 TP_PROTO(struct __btrfs_workqueue *wq, const char *name, int high), 1093 TP_ARGS(wq, name, high) [all …]
|
D | workqueue.h | 55 __entry->workqueue = pwq->wq;
|
/linux-4.4.14/drivers/thunderbolt/ |
D | tb.c | 296 queue_work(tb->wq, &ev->work); in tb_schedule_hotplug_handler() 332 if (tb->wq) { in thunderbolt_shutdown_and_free() 333 flush_workqueue(tb->wq); in thunderbolt_shutdown_and_free() 334 destroy_workqueue(tb->wq); in thunderbolt_shutdown_and_free() 335 tb->wq = NULL; in thunderbolt_shutdown_and_free() 366 tb->wq = alloc_ordered_workqueue("thunderbolt", 0); in thunderbolt_alloc_and_start() 367 if (!tb->wq) in thunderbolt_alloc_and_start()
|
/linux-4.4.14/drivers/usb/misc/ |
D | appledisplay.c | 88 static struct workqueue_struct *wq; variable 125 queue_delayed_work(wq, &pdata->work, 0); in appledisplay_complete() 368 wq = create_singlethread_workqueue("appledisplay"); in appledisplay_init() 369 if (!wq) { in appledisplay_init() 379 flush_workqueue(wq); in appledisplay_exit() 380 destroy_workqueue(wq); in appledisplay_exit()
|
/linux-4.4.14/drivers/infiniband/hw/mthca/ |
D | mthca_cq.c | 489 struct mthca_wq *wq; in mthca_poll_one() local 539 wq = &(*cur_qp)->sq; in mthca_poll_one() 541 >> wq->wqe_shift); in mthca_poll_one() 547 wq = NULL; in mthca_poll_one() 553 wq = &(*cur_qp)->rq; in mthca_poll_one() 555 wqe_index = wqe >> wq->wqe_shift; in mthca_poll_one() 562 wqe_index = wq->max - 1; in mthca_poll_one() 566 if (wq) { in mthca_poll_one() 567 if (wq->last_comp < wqe_index) in mthca_poll_one() 568 wq->tail += wqe_index - wq->last_comp; in mthca_poll_one() [all …]
|
D | mthca_qp.c | 229 static void mthca_wq_reset(struct mthca_wq *wq) in mthca_wq_reset() argument 231 wq->next_ind = 0; in mthca_wq_reset() 232 wq->last_comp = wq->max - 1; in mthca_wq_reset() 233 wq->head = 0; in mthca_wq_reset() 234 wq->tail = 0; in mthca_wq_reset() 1545 static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, in mthca_wq_overflow() argument 1551 cur = wq->head - wq->tail; in mthca_wq_overflow() 1552 if (likely(cur + nreq < wq->max)) in mthca_wq_overflow() 1557 cur = wq->head - wq->tail; in mthca_wq_overflow() 1560 return cur + nreq >= wq->max; in mthca_wq_overflow()
|
/linux-4.4.14/net/sunrpc/ |
D | svcsock.c | 402 static bool sunrpc_waitqueue_active(wait_queue_head_t *wq) in sunrpc_waitqueue_active() argument 404 if (!wq) in sunrpc_waitqueue_active() 424 return waitqueue_active(wq); in sunrpc_waitqueue_active() 433 wait_queue_head_t *wq = sk_sleep(sk); in svc_udp_data_ready() local 442 if (sunrpc_waitqueue_active(wq)) in svc_udp_data_ready() 443 wake_up_interruptible(wq); in svc_udp_data_ready() 452 wait_queue_head_t *wq = sk_sleep(sk); in svc_write_space() local 460 if (sunrpc_waitqueue_active(wq)) { in svc_write_space() 463 wake_up_interruptible(wq); in svc_write_space() 791 wait_queue_head_t *wq; in svc_tcp_listen_data_ready() local [all …]
|
D | sched.c | 289 wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE); in rpc_complete_task() local 296 spin_lock_irqsave(&wq->lock, flags); in rpc_complete_task() 299 if (waitqueue_active(wq)) in rpc_complete_task() 300 __wake_up_locked_key(wq, TASK_NORMAL, &k); in rpc_complete_task() 301 spin_unlock_irqrestore(&wq->lock, flags); in rpc_complete_task() 1066 struct workqueue_struct *wq; in rpciod_start() local 1073 wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); in rpciod_start() 1074 rpciod_workqueue = wq; in rpciod_start() 1080 struct workqueue_struct *wq = NULL; in rpciod_stop() local 1086 wq = rpciod_workqueue; in rpciod_stop() [all …]
|
/linux-4.4.14/drivers/iio/adc/ |
D | berlin2-adc.c | 75 wait_queue_head_t wq; member 126 ret = wait_event_interruptible_timeout(priv->wq, priv->data_available, in berlin2_adc_read() 177 ret = wait_event_interruptible_timeout(priv->wq, priv->data_available, in berlin2_adc_tsen_read() 253 wake_up_interruptible(&priv->wq); in berlin2_adc_irq() 273 wake_up_interruptible(&priv->wq); in berlin2_adc_tsen_irq() 322 init_waitqueue_head(&priv->wq); in berlin2_adc_probe()
|
/linux-4.4.14/drivers/nfc/ |
D | nfcsim.c | 63 static struct workqueue_struct *wq; variable 211 queue_delayed_work(wq, &dev->poll_work, 0); in nfcsim_start_poll() 326 queue_delayed_work(wq, &peer->recv_work, msecs_to_jiffies(5)); in nfcsim_tx() 431 queue_delayed_work(wq, &dev->poll_work, msecs_to_jiffies(200)); in nfcsim_wq_poll() 491 wq = alloc_ordered_workqueue("nfcsim", 0); in nfcsim_init() 492 if (!wq) { in nfcsim_init() 533 destroy_workqueue(wq); in nfcsim_exit()
|
D | pn533.c | 357 struct workqueue_struct *wq; member 749 queue_work(dev->wq, &dev->cmd_complete_work); in pn533_recv_response() 803 queue_work(dev->wq, &dev->cmd_complete_work); in pn533_recv_ack() 1066 queue_work(dev->wq, &dev->cmd_work); in pn533_wq_cmd_complete() 1651 queue_work(dev->wq, &dev->mi_tm_rx_work); in pn533_tm_get_data_complete() 1733 queue_work(dev->wq, &dev->cmd_work); in pn533_wq_tm_mi_send() 1794 queue_work(dev->wq, &dev->tg_work); in pn533_init_target_complete() 1809 queue_delayed_work(dev->wq, &dev->poll_work, in pn533_listen_mode_timer() 1828 queue_delayed_work(dev->wq, &dev->poll_work, in pn533_rf_complete() 1876 queue_work(dev->wq, &dev->rf_work); in pn533_poll_dep_complete() [all …]
|
/linux-4.4.14/drivers/media/pci/ddbridge/ |
D | ddbridge.h | 85 wait_queue_head_t wq; member 113 wait_queue_head_t wq; member 132 wait_queue_head_t wq; member
|
/linux-4.4.14/drivers/media/i2c/ |
D | saa7110.c | 63 wait_queue_head_t wq; member 199 prepare_to_wait(&decoder->wq, &wait, TASK_UNINTERRUPTIBLE); in determine_norm() 201 finish_wait(&decoder->wq, &wait); in determine_norm() 234 prepare_to_wait(&decoder->wq, &wait, TASK_UNINTERRUPTIBLE); in determine_norm() 236 finish_wait(&decoder->wq, &wait); in determine_norm() 415 init_waitqueue_head(&decoder->wq); in saa7110_probe()
|
D | msp3400-driver.c | 323 wake_up_interruptible(&state->wq); in msp_wake_thread() 330 add_wait_queue(&state->wq, &wait); in msp_sleep() 341 remove_wait_queue(&state->wq, &wait); in msp_sleep() 712 init_waitqueue_head(&state->wq); in msp_probe()
|
/linux-4.4.14/crypto/ |
D | algif_aead.c | 102 struct socket_wq *wq; in aead_wmem_wakeup() local 108 wq = rcu_dereference(sk->sk_wq); in aead_wmem_wakeup() 109 if (wq_has_sleeper(wq)) in aead_wmem_wakeup() 110 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | in aead_wmem_wakeup() 151 struct socket_wq *wq; in aead_data_wakeup() local 159 wq = rcu_dereference(sk->sk_wq); in aead_data_wakeup() 160 if (wq_has_sleeper(wq)) in aead_data_wakeup() 161 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | in aead_data_wakeup()
|
D | pcrypt.c | 35 struct workqueue_struct *wq; member 410 pcrypt->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, in pcrypt_init_padata() 412 if (!pcrypt->wq) in pcrypt_init_padata() 415 pcrypt->pinst = padata_alloc_possible(pcrypt->wq); in pcrypt_init_padata() 451 destroy_workqueue(pcrypt->wq); in pcrypt_init_padata() 465 destroy_workqueue(pcrypt->wq); in pcrypt_fini_padata()
|
D | algif_skcipher.c | 228 struct socket_wq *wq; in skcipher_wmem_wakeup() local 234 wq = rcu_dereference(sk->sk_wq); in skcipher_wmem_wakeup() 235 if (wq_has_sleeper(wq)) in skcipher_wmem_wakeup() 236 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | in skcipher_wmem_wakeup() 278 struct socket_wq *wq; in skcipher_data_wakeup() local 284 wq = rcu_dereference(sk->sk_wq); in skcipher_data_wakeup() 285 if (wq_has_sleeper(wq)) in skcipher_data_wakeup() 286 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | in skcipher_data_wakeup()
|
/linux-4.4.14/drivers/mtd/lpddr/ |
D | lpddr_cmds.c | 101 init_waitqueue_head(&chip->wq); in lpddr_cmdset() 159 add_wait_queue(&chip->wq, &wait); in wait_for_ready() 162 remove_wait_queue(&chip->wq, &wait); in wait_for_ready() 258 add_wait_queue(&chip->wq, &wait); in get_chip() 261 remove_wait_queue(&chip->wq, &wait); in get_chip() 325 add_wait_queue(&chip->wq, &wait); in chip_ready() 328 remove_wait_queue(&chip->wq, &wait); in chip_ready() 351 wake_up(&chip->wq); in put_chip() 365 wake_up(&chip->wq); in put_chip() 386 wake_up(&chip->wq); in put_chip()
|
/linux-4.4.14/drivers/staging/android/ |
D | sync.c | 169 init_waitqueue_head(&fence->wq); in sync_fence_alloc() 187 wake_up_all(&fence->wq); in fence_check_cb_func() 342 spin_lock_irqsave(&fence->wq.lock, flags); in sync_fence_wait_async() 345 __add_wait_queue_tail(&fence->wq, &waiter->work); in sync_fence_wait_async() 346 spin_unlock_irqrestore(&fence->wq.lock, flags); in sync_fence_wait_async() 361 spin_lock_irqsave(&fence->wq.lock, flags); in sync_fence_cancel_async() 366 spin_unlock_irqrestore(&fence->wq.lock, flags); in sync_fence_cancel_async() 384 ret = wait_event_interruptible_timeout(fence->wq, in sync_fence_wait() 549 poll_wait(file, &fence->wq, wait); in sync_fence_poll()
|
D | sync_debug.c | 163 spin_lock_irqsave(&fence->wq.lock, flags); in sync_print_fence() 164 list_for_each_entry(pos, &fence->wq.task_list, task_list) { in sync_print_fence() 174 spin_unlock_irqrestore(&fence->wq.lock, flags); in sync_print_fence()
|
/linux-4.4.14/arch/arm/kvm/ |
D | psci.c | 73 wait_queue_head_t *wq; in kvm_psci_vcpu_on() local 121 wq = kvm_arch_vcpu_wq(vcpu); in kvm_psci_vcpu_on() 122 wake_up_interruptible(wq); in kvm_psci_vcpu_on()
|
D | arm.c | 501 wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); in kvm_arm_resume_guest() local 504 wake_up_interruptible(wq); in kvm_arm_resume_guest() 510 wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); in vcpu_sleep() local 512 wait_event_interruptible(*wq, ((!vcpu->arch.power_off) && in vcpu_sleep()
|
/linux-4.4.14/Documentation/driver-model/ |
D | design-patterns.txt | 73 struct workqueue_struct *wq; 89 queue_work(foo->wq, &foo->offload); 97 foo->wq = create_singlethread_workqueue("foo-wq");
|
/linux-4.4.14/fs/nfs/blocklayout/ |
D | rpc_pipefs.c | 62 DECLARE_WAITQUEUE(wq, current); in bl_resolve_deviceid() 87 add_wait_queue(&nn->bl_wq, &wq); in bl_resolve_deviceid() 90 remove_wait_queue(&nn->bl_wq, &wq); in bl_resolve_deviceid() 96 remove_wait_queue(&nn->bl_wq, &wq); in bl_resolve_deviceid()
|
/linux-4.4.14/drivers/net/wireless/cw1200/ |
D | cw1200_spi.c | 43 wait_queue_head_t wq; member 205 add_wait_queue(&self->wq, &wait); in cw1200_spi_lock() 218 remove_wait_queue(&self->wq, &wait); in cw1200_spi_lock() 230 wake_up(&self->wq); in cw1200_spi_unlock() 413 init_waitqueue_head(&self->wq); in cw1200_spi_probe()
|
/linux-4.4.14/arch/x86/kernel/ |
D | kvm.c | 94 wait_queue_head_t wq; member 144 init_waitqueue_head(&n.wq); in kvm_async_pf_task_wait() 150 prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE); in kvm_async_pf_task_wait() 169 finish_wait(&n.wq, &wait); in kvm_async_pf_task_wait() 181 else if (waitqueue_active(&n->wq)) in apf_task_wake_one() 182 wake_up(&n->wq); in apf_task_wake_one() 234 init_waitqueue_head(&n->wq); in kvm_async_pf_task_wake()
|
/linux-4.4.14/drivers/target/tcm_fc/ |
D | tfc_conf.c | 236 struct workqueue_struct *wq; in ft_add_tpg() local 267 wq = alloc_workqueue("tcm_fc", 0, 1); in ft_add_tpg() 268 if (!wq) { in ft_add_tpg() 275 destroy_workqueue(wq); in ft_add_tpg() 279 tpg->workqueue = wq; in ft_add_tpg()
|
/linux-4.4.14/drivers/sbus/char/ |
D | bbc_i2c.c | 129 add_wait_queue(&bp->wq, &wait); in wait_for_pin() 134 bp->wq, in wait_for_pin() 143 remove_wait_queue(&bp->wq, &wait); in wait_for_pin() 279 wake_up_interruptible(&bp->wq); in bbc_i2c_interrupt() 317 init_waitqueue_head(&bp->wq); in attach_one_i2c()
|
D | bbc_i2c.h | 61 wait_queue_head_t wq; member
|
/linux-4.4.14/drivers/media/pci/netup_unidvb/ |
D | netup_unidvb.h | 82 wait_queue_head_t wq; member 118 struct workqueue_struct *wq; member
|
D | netup_unidvb_i2c.c | 124 wake_up(&i2c->wq); in netup_i2c_interrupt() 236 if (wait_event_timeout(i2c->wq, in netup_i2c_xfer() 322 init_waitqueue_head(&i2c->wq); in netup_i2c_init()
|
/linux-4.4.14/drivers/staging/rtl8192e/rtl8192e/ |
D | rtl_ps.c | 66 queue_delayed_work_rsl(priv->rtllib->wq, in rtl92e_hw_wakeup() 114 queue_delayed_work_rsl(priv->rtllib->wq, in rtl92e_enter_sleep() 116 queue_delayed_work_rsl(priv->rtllib->wq, in rtl92e_enter_sleep() 206 queue_work_rsl(priv->rtllib->wq, in rtl92e_rtllib_ips_leave_wq()
|
/linux-4.4.14/drivers/gpu/drm/ |
D | drm_flip_work.c | 99 struct workqueue_struct *wq) in drm_flip_work_commit() argument 107 queue_work(wq, &work->worker); in drm_flip_work_commit()
|
/linux-4.4.14/drivers/pci/hotplug/ |
D | shpchp_core.c | 131 slot->wq = alloc_workqueue("shpchp-%d", 0, 0, slot->number); in init_slots() 132 if (!slot->wq) { in init_slots() 168 destroy_workqueue(slot->wq); in init_slots() 189 destroy_workqueue(slot->wq); in cleanup_slots()
|
D | pciehp_ctrl.c | 53 queue_work(p_slot->wq, &info->work); in pciehp_queue_interrupt_event() 222 queue_work(p_slot->wq, &info->work); in pciehp_queue_power_work() 266 queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ); in handle_button_press_event()
|
/linux-4.4.14/fs/nfs/ |
D | callback.c | 112 DEFINE_WAIT(wq); in nfs41_callback_svc() 120 prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE); in nfs41_callback_svc() 127 finish_wait(&serv->sv_cb_waitq, &wq); in nfs41_callback_svc() 135 finish_wait(&serv->sv_cb_waitq, &wq); in nfs41_callback_svc()
|
/linux-4.4.14/drivers/gpu/host1x/ |
D | intr.c | 124 wait_queue_head_t *wq = waiter->data; in action_wakeup() local 125 wake_up(wq); in action_wakeup() 130 wait_queue_head_t *wq = waiter->data; in action_wakeup_interruptible() local 131 wake_up_interruptible(wq); in action_wakeup_interruptible()
|
D | syncpt.c | 191 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); in host1x_syncpt_wait() 230 &wq, waiter, &ref); in host1x_syncpt_wait() 242 int remain = wait_event_interruptible_timeout(wq, in host1x_syncpt_wait()
|
D | cdma.h | 53 struct delayed_work wq; /* work queue */ member
|
/linux-4.4.14/drivers/iommu/ |
D | amd_iommu_v2.c | 59 wait_queue_head_t wq; /* To wait for count == 0 */ member 74 wait_queue_head_t wq; member 159 wake_up(&dev_state->wq); in put_device_state() 272 wake_up(&pasid_state->wq); in put_pasid_state() 278 wait_event(pasid_state->wq, !atomic_read(&pasid_state->count)); in put_pasid_state_wait() 660 init_waitqueue_head(&pasid_state->wq); in amd_iommu_bind_pasid() 785 init_waitqueue_head(&dev_state->wq); in amd_iommu_init_device() 882 wait_event(dev_state->wq, !atomic_read(&dev_state->count)); in amd_iommu_free_device()
|
/linux-4.4.14/fs/logfs/ |
D | dev_bdev.c | 54 static DECLARE_WAIT_QUEUE_HEAD(wq); 71 wake_up(&wq); in writeseg_end_io() 163 wake_up(&wq); in erase_end_io() 242 wait_event(wq, atomic_read(&super->s_pending_writes) == 0); in bdev_sync()
|
/linux-4.4.14/net/ |
D | socket.c | 248 struct socket_wq *wq; in sock_alloc_inode() local 253 wq = kmalloc(sizeof(*wq), GFP_KERNEL); in sock_alloc_inode() 254 if (!wq) { in sock_alloc_inode() 258 init_waitqueue_head(&wq->wait); in sock_alloc_inode() 259 wq->fasync_list = NULL; in sock_alloc_inode() 260 wq->flags = 0; in sock_alloc_inode() 261 RCU_INIT_POINTER(ei->socket.wq, wq); in sock_alloc_inode() 275 struct socket_wq *wq; in sock_destroy_inode() local 278 wq = rcu_dereference_protected(ei->socket.wq, 1); in sock_destroy_inode() 279 kfree_rcu(wq, rcu); in sock_destroy_inode() [all …]
|
/linux-4.4.14/drivers/gpu/drm/tilcdc/ |
D | tilcdc_drv.c | 135 flush_workqueue(priv->wq); in tilcdc_unload() 136 destroy_workqueue(priv->wq); in tilcdc_unload() 168 priv->wq = alloc_ordered_workqueue("tilcdc", 0); in tilcdc_load() 169 if (!priv->wq) { in tilcdc_load() 343 flush_workqueue(priv->wq); in tilcdc_load() 344 destroy_workqueue(priv->wq); in tilcdc_load()
|
D | tilcdc_drv.h | 77 struct workqueue_struct *wq; member
|
/linux-4.4.14/drivers/mtd/ubi/ |
D | block.c | 93 struct workqueue_struct *wq; member 333 queue_work(dev->wq, &pdu->work); in ubiblock_queue_rq() 437 dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name); in ubiblock_create() 438 if (!dev->wq) { in ubiblock_create() 472 destroy_workqueue(dev->wq); in ubiblock_cleanup()
|
/linux-4.4.14/fs/xfs/ |
D | xfs_log_priv.h | 552 static inline void xlog_wait(wait_queue_head_t *wq, spinlock_t *lock) in xlog_wait() argument 556 add_wait_queue_exclusive(wq, &wait); in xlog_wait() 560 remove_wait_queue(wq, &wait); in xlog_wait()
|
/linux-4.4.14/drivers/gpu/host1x/hw/ |
D | cdma_hw.c | 243 timeout.wq); in cdma_timeout_handler() 296 INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler); in cdma_timeout_init() 308 cancel_delayed_work(&cdma->timeout.wq); in cdma_timeout_destroy()
|
/linux-4.4.14/fs/jffs2/ |
D | os-linux.h | 40 #define sleep_on_spinunlock(wq, s) \ argument 43 add_wait_queue((wq), &__wait); \ 47 remove_wait_queue((wq), &__wait); \
|
/linux-4.4.14/drivers/scsi/bfa/ |
D | bfad_im.c | 161 wait_queue_head_t *wq; in bfa_cb_tskim_done() local 165 wq = (wait_queue_head_t *) cmnd->SCp.ptr; in bfa_cb_tskim_done() 168 if (wq) in bfa_cb_tskim_done() 169 wake_up(wq); in bfa_cb_tskim_done() 298 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); in bfad_im_reset_lun_handler() 326 cmnd->SCp.ptr = (char *)&wq; in bfad_im_reset_lun_handler() 334 wait_event(wq, test_bit(IO_DONE_BIT, in bfad_im_reset_lun_handler() 361 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); in bfad_im_reset_bus_handler() 368 cmnd->SCp.ptr = (char *)&wq; in bfad_im_reset_bus_handler() 377 wait_event(wq, test_bit(IO_DONE_BIT, in bfad_im_reset_bus_handler()
|
/linux-4.4.14/drivers/scsi/libsas/ |
D | sas_event.c | 59 struct workqueue_struct *wq = ha->core.shost->work_q; in __sas_drain_work() local 67 drain_workqueue(wq); in __sas_drain_work()
|
/linux-4.4.14/net/atm/ |
D | common.c | 95 struct socket_wq *wq; in vcc_def_wakeup() local 98 wq = rcu_dereference(sk->sk_wq); in vcc_def_wakeup() 99 if (wq_has_sleeper(wq)) in vcc_def_wakeup() 100 wake_up(&wq->wait); in vcc_def_wakeup() 114 struct socket_wq *wq; in vcc_write_space() local 119 wq = rcu_dereference(sk->sk_wq); in vcc_write_space() 120 if (wq_has_sleeper(wq)) in vcc_write_space() 121 wake_up_interruptible(&wq->wait); in vcc_write_space()
|
/linux-4.4.14/drivers/net/ethernet/qlogic/qlcnic/ |
D | qlcnic_dcb.c | 289 if (dcb->wq) { in __qlcnic_dcb_free() 290 destroy_workqueue(dcb->wq); in __qlcnic_dcb_free() 291 dcb->wq = NULL; in __qlcnic_dcb_free() 314 dcb->wq = create_singlethread_workqueue("qlcnic-dcb"); in __qlcnic_dcb_attach() 315 if (!dcb->wq) { in __qlcnic_dcb_attach() 339 destroy_workqueue(dcb->wq); in __qlcnic_dcb_attach() 340 dcb->wq = NULL; in __qlcnic_dcb_attach() 539 queue_delayed_work(dcb->wq, &dcb->aen_work, 0); in qlcnic_82xx_dcb_aen_handler() 653 queue_delayed_work(dcb->wq, &dcb->aen_work, 0); in qlcnic_83xx_dcb_aen_handler()
|
/linux-4.4.14/arch/powerpc/platforms/cell/spufs/ |
D | spufs.h | 321 #define spufs_wait(wq, condition) \ argument 326 prepare_to_wait(&(wq), &__wait, TASK_INTERRUPTIBLE); \ 339 finish_wait(&(wq), &__wait); \
|
/linux-4.4.14/include/drm/ |
D | drm_flip_work.h | 87 struct workqueue_struct *wq);
|
/linux-4.4.14/drivers/gpu/drm/atmel-hlcdc/ |
D | atmel_hlcdc_dc.c | 503 dc->wq = alloc_ordered_workqueue("atmel-hlcdc-dc", 0); in atmel_hlcdc_dc_load() 504 if (!dc->wq) in atmel_hlcdc_dc_load() 555 destroy_workqueue(dc->wq); in atmel_hlcdc_dc_load() 566 flush_workqueue(dc->wq); in atmel_hlcdc_dc_unload() 579 destroy_workqueue(dc->wq); in atmel_hlcdc_dc_unload()
|
D | atmel_hlcdc_dc.h | 139 struct workqueue_struct *wq; member
|
/linux-4.4.14/drivers/bluetooth/ |
D | bluecard_cs.c | 283 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); in bluecard_write_wakeup() 306 prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE); in bluecard_write_wakeup() 308 finish_wait(&wq, &wait); in bluecard_write_wakeup() 320 prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE); in bluecard_write_wakeup() 322 finish_wait(&wq, &wait); in bluecard_write_wakeup()
|
/linux-4.4.14/net/9p/ |
D | trans_fd.c | 144 struct work_struct wq; member 451 m = container_of(work, struct p9_conn, wq); in p9_write_work() 510 schedule_work(&m->wq); in p9_write_work() 592 INIT_WORK(&m->wq, p9_write_work); in p9_conn_create() 644 schedule_work(&m->wq); in p9_poll_mux() 682 schedule_work(&m->wq); in p9_fd_request() 853 cancel_work_sync(&m->wq); in p9_conn_destroy()
|
/linux-4.4.14/include/linux/mtd/ |
D | flashchip.h | 90 wait_queue_head_t wq; /* Wait on here when we're waiting for the chip member
|
D | blktrans.h | 47 struct workqueue_struct *wq; member
|
/linux-4.4.14/virt/kvm/ |
D | async_pf.c | 101 if (waitqueue_active(&vcpu->wq)) in async_pf_execute() 102 wake_up_interruptible(&vcpu->wq); in async_pf_execute()
|
/linux-4.4.14/arch/cris/mm/ |
D | fault.c | 223 DECLARE_WAIT_QUEUE_HEAD(wq); in do_page_fault() 235 wait_event_interruptible(wq, 0 == 1); in do_page_fault()
|
/linux-4.4.14/drivers/char/tpm/ |
D | tpm_ibmvtpm.h | 45 wait_queue_head_t wq; member
|
D | tpm_ibmvtpm.c | 93 sig = wait_event_interruptible(ibmvtpm->wq, ibmvtpm->res_len != 0); in tpm_ibmvtpm_recv() 518 wake_up_interruptible(&ibmvtpm->wq); in ibmvtpm_crq_process() 624 init_waitqueue_head(&ibmvtpm->wq); in tpm_ibmvtpm_probe()
|
/linux-4.4.14/arch/arm/common/ |
D | bL_switcher.c | 263 wait_queue_head_t wq; member 286 wait_event_interruptible(t->wq, in bL_switcher_thread() 372 wake_up(&t->wq); in bL_switch_request_cb() 584 init_waitqueue_head(&t->wq); in bL_switcher_enable()
|
/linux-4.4.14/drivers/uwb/ |
D | uwbd.c | 276 rc->uwbd.wq, in uwbd() 341 wake_up_all(&rc->uwbd.wq); in uwbd_event_queue()
|
/linux-4.4.14/drivers/mtd/ |
D | mtd_blkdevs.c | 184 queue_work(dev->wq, &dev->work); in mtd_blktrans_request() 428 new->wq = alloc_workqueue("%s%d", 0, 0, in add_mtd_blktrans_dev() 430 if (!new->wq) in add_mtd_blktrans_dev() 474 destroy_workqueue(old->wq); in del_mtd_blktrans_dev()
|
/linux-4.4.14/drivers/misc/cxl/ |
D | file.c | 298 poll_wait(file, &ctx->wq, poll); in afu_poll() 341 prepare_to_wait(&ctx->wq, &wait, TASK_INTERRUPTIBLE); in afu_read() 367 finish_wait(&ctx->wq, &wait); in afu_read() 407 finish_wait(&ctx->wq, &wait); in afu_read()
|
D | context.c | 62 init_waitqueue_head(&ctx->wq); in cxl_context_init() 234 wake_up_all(&ctx->wq); in cxl_context_detach()
|
/linux-4.4.14/drivers/net/caif/ |
D | caif_hsi.c | 78 queue_work(cfhsi->wq, &cfhsi->wake_down_work); in cfhsi_inactivity_tout() 988 queue_work(cfhsi->wq, &cfhsi->wake_up_work); in cfhsi_wake_up_cb() 1107 queue_work(cfhsi->wq, &cfhsi->wake_up_work); in cfhsi_xmit() 1204 cfhsi->wq = create_singlethread_workqueue(cfhsi->ndev->name); in cfhsi_open() 1205 if (!cfhsi->wq) { in cfhsi_open() 1251 destroy_workqueue(cfhsi->wq); in cfhsi_open() 1271 flush_workqueue(cfhsi->wq); in cfhsi_close() 1282 destroy_workqueue(cfhsi->wq); in cfhsi_close()
|
D | caif_spi.c | 641 cfspi->wq = create_singlethread_workqueue(dev->name); in cfspi_init() 642 if (!cfspi->wq) { in cfspi_init() 665 queue_work(cfspi->wq, &cfspi->work); in cfspi_init() 692 destroy_workqueue(cfspi->wq); in cfspi_uninit()
|
/linux-4.4.14/Documentation/spi/ |
D | 00-INDEX | 10 - PXA2xx SPI master controller build by spi_message fifo wq
|
/linux-4.4.14/drivers/mtd/nand/ |
D | tmio_nand.c | 175 if (unlikely(!waitqueue_active(&nand_chip->controller->wq))) in tmio_irq() 178 wake_up(&nand_chip->controller->wq); in tmio_irq() 198 timeout = wait_event_timeout(nand_chip->controller->wq, in tmio_nand_wait()
|
/linux-4.4.14/drivers/gpu/drm/via/ |
D | via_dmablit.h | 77 struct work_struct wq; member
|
/linux-4.4.14/drivers/staging/nvec/ |
D | nvec.h | 149 struct workqueue_struct *wq; member
|
/linux-4.4.14/net/dccp/ |
D | output.c | 200 struct socket_wq *wq; in dccp_write_space() local 203 wq = rcu_dereference(sk->sk_wq); in dccp_write_space() 204 if (wq_has_sleeper(wq)) in dccp_write_space() 205 wake_up_interruptible(&wq->wait); in dccp_write_space()
|
/linux-4.4.14/fs/ocfs2/dlm/ |
D | dlmmaster.c | 280 init_waitqueue_head(&mle->wq); in dlm_init_mle() 561 init_waitqueue_head(&res->wq); in dlm_init_lockres() 675 wake_up(&res->wq); in dlm_lockres_drop_inflight_ref() 1010 wake_up(&res->wq); in dlm_get_lock_resource() 1133 (void)wait_event_timeout(mle->wq, in dlm_wait_for_lock_mastery() 1783 wake_up(&res->wq); in dlm_do_assert_master() 1962 wake_up(&mle->wq); in dlm_assert_master_handler() 1983 wake_up(&res->wq); in dlm_assert_master_handler() 2081 wake_up(&res->wq); in dlm_assert_master_post_handler() 2617 ret = wait_event_interruptible_timeout(mle->wq, in dlm_migrate_lockres() [all …]
|
D | dlmconvert.c | 94 wake_up(&res->wq); in dlmconvert_master() 352 wake_up(&res->wq); in dlmconvert_remote() 539 wake_up(&res->wq); in dlm_convert_lock_handler()
|
/linux-4.4.14/drivers/net/can/spi/ |
D | mcp251x.c | 259 struct workqueue_struct *wq; member 531 queue_work(priv->wq, &priv->tx_work); in mcp251x_hard_start_xmit() 548 queue_work(priv->wq, &priv->restart_work); in mcp251x_do_set_mode() 702 destroy_workqueue(priv->wq); in mcp251x_stop() 703 priv->wq = NULL; in mcp251x_stop() 964 priv->wq = create_freezable_workqueue("mcp251x_wq"); in mcp251x_open() 1230 queue_work(priv->wq, &priv->restart_work); in mcp251x_can_resume()
|
/linux-4.4.14/net/sunrpc/xprtrdma/ |
D | frwr_ops.c | 91 struct workqueue_struct *wq; in frwr_destroy_recovery_wq() local 96 wq = frwr_recovery_wq; in frwr_destroy_recovery_wq() 98 destroy_workqueue(wq); in frwr_destroy_recovery_wq()
|
/linux-4.4.14/drivers/ps3/ |
D | ps3av.c | 47 struct workqueue_struct *wq; member 488 queue_work(ps3av->wq, &ps3av->work); in ps3av_set_videomode() 959 ps3av->wq = create_singlethread_workqueue("ps3avd"); in ps3av_probe() 960 if (!ps3av->wq) { in ps3av_probe() 1021 if (ps3av->wq) in ps3av_remove() 1022 destroy_workqueue(ps3av->wq); in ps3av_remove()
|
/linux-4.4.14/drivers/gpu/drm/msm/ |
D | msm_drv.c | 171 queue_work(priv->wq, &vbl_ctrl->work); in vblank_ctrl_queue_work() 206 flush_workqueue(priv->wq); in msm_unload() 207 destroy_workqueue(priv->wq); in msm_unload() 348 priv->wq = alloc_ordered_workqueue("msm", 0); in msm_load() 770 queue_work(priv->wq, &cb->work); in msm_queue_fence_cb() 795 queue_work(priv->wq, &cb->work); in msm_update_fence()
|
D | msm_gpu.c | 242 queue_work(priv->wq, &gpu->inactive_work); in inactive_handler() 324 queue_work(priv->wq, &gpu->recover_work); in hangcheck_handler() 332 queue_work(priv->wq, &gpu->retire_work); in hangcheck_handler() 494 queue_work(priv->wq, &gpu->retire_work); in msm_gpu_retire()
|
/linux-4.4.14/include/linux/power/ |
D | charger-manager.h | 70 struct work_struct wq; member
|
/linux-4.4.14/net/nfc/hci/ |
D | hci.h | 39 wait_queue_head_t *wq; member
|
D | command.c | 64 wake_up(hcp_ew->wq); in nfc_hci_execute_cb() 73 hcp_ew.wq = &ew_wq; in nfc_hci_execute_cmd()
|
/linux-4.4.14/fs/ext4/ |
D | page-io.c | 215 struct workqueue_struct *wq; in ext4_add_complete_io() local 222 wq = sbi->rsv_conversion_wq; in ext4_add_complete_io() 224 queue_work(wq, &ei->i_rsv_conversion_work); in ext4_add_complete_io()
|
/linux-4.4.14/drivers/media/platform/vsp1/ |
D | vsp1_video.h | 69 wait_queue_head_t wq; member
|
/linux-4.4.14/drivers/gpu/drm/i2c/ |
D | adv7511.c | 38 wait_queue_head_t wq; member 448 wake_up_all(&adv7511->wq); in adv7511_irq_process() 472 ret = wait_event_interruptible_timeout(adv7511->wq, in adv7511_wait_for_edid() 917 init_waitqueue_head(&adv7511->wq); in adv7511_probe()
|
/linux-4.4.14/drivers/mfd/ |
D | dln2.c | 87 wait_queue_head_t wq; member 388 ret = wait_event_interruptible(dln2->mod_rx_slots[handle].wq, in alloc_rx_slot() 426 wake_up_interruptible(&rxs->wq); in free_rx_slot() 753 init_waitqueue_head(&dln2->mod_rx_slots[i].wq); in dln2_probe()
|
/linux-4.4.14/drivers/staging/lustre/lustre/ptlrpc/ |
D | niobuf.c | 244 wait_queue_head_t *wq; in ptlrpc_unregister_bulk() local 277 wq = &req->rq_set->set_waitq; in ptlrpc_unregister_bulk() 279 wq = &req->rq_reply_waitq; in ptlrpc_unregister_bulk() 286 rc = l_wait_event(*wq, !ptlrpc_client_bulk_active(req), &lwi); in ptlrpc_unregister_bulk()
|
/linux-4.4.14/drivers/char/ |
D | tlclk.c | 200 static DECLARE_WAIT_QUEUE_HEAD(wq); 253 wait_event_interruptible(wq, got_event); in tlclk_read() 873 wake_up(&wq); in switchover_timeout() 929 wake_up(&wq); in tlclk_interrupt()
|
/linux-4.4.14/drivers/net/wireless/ath/ar5523/ |
D | ar5523.h | 93 struct workqueue_struct *wq; member
|