/linux-4.4.14/drivers/scsi/snic/ |
H A D | vnic_wq.c | 26 static inline int vnic_wq_get_ctrl(struct vnic_dev *vdev, struct vnic_wq *wq, vnic_wq_get_ctrl() argument 29 wq->ctrl = svnic_dev_get_res(vdev, res_type, index); vnic_wq_get_ctrl() 30 if (!wq->ctrl) vnic_wq_get_ctrl() 36 static inline int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq, vnic_wq_alloc_ring() argument 39 return svnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, vnic_wq_alloc_ring() 43 static int vnic_wq_alloc_bufs(struct vnic_wq *wq) vnic_wq_alloc_bufs() argument 46 unsigned int i, j, count = wq->ring.desc_count; vnic_wq_alloc_bufs() 50 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC); vnic_wq_alloc_bufs() 51 if (!wq->bufs[i]) { vnic_wq_alloc_bufs() 59 buf = wq->bufs[i]; vnic_wq_alloc_bufs() 62 buf->desc = (u8 *)wq->ring.descs + vnic_wq_alloc_bufs() 63 wq->ring.desc_size * buf->index; vnic_wq_alloc_bufs() 65 buf->next = wq->bufs[0]; vnic_wq_alloc_bufs() 68 buf->next = wq->bufs[i + 1]; vnic_wq_alloc_bufs() 76 wq->to_use = wq->to_clean = wq->bufs[0]; vnic_wq_alloc_bufs() 81 void svnic_wq_free(struct vnic_wq *wq) svnic_wq_free() argument 86 vdev = wq->vdev; svnic_wq_free() 88 svnic_dev_free_desc_ring(vdev, &wq->ring); svnic_wq_free() 91 kfree(wq->bufs[i]); svnic_wq_free() 92 wq->bufs[i] = NULL; svnic_wq_free() 95 wq->ctrl = NULL; svnic_wq_free() 99 int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, vnic_wq_devcmd2_alloc() argument 104 wq->index = 0; vnic_wq_devcmd2_alloc() 105 wq->vdev = vdev; vnic_wq_devcmd2_alloc() 107 err = vnic_wq_get_ctrl(vdev, wq, 0, RES_TYPE_DEVCMD2); vnic_wq_devcmd2_alloc() 114 svnic_wq_disable(wq); vnic_wq_devcmd2_alloc() 116 err = vnic_wq_alloc_ring(vdev, wq, 0, desc_count, desc_size); vnic_wq_devcmd2_alloc() 123 int svnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, svnic_wq_alloc() argument 128 wq->index = index; svnic_wq_alloc() 129 wq->vdev = vdev; svnic_wq_alloc() 131 err = vnic_wq_get_ctrl(vdev, wq, index, RES_TYPE_WQ); svnic_wq_alloc() 138 svnic_wq_disable(wq); svnic_wq_alloc() 140 err = vnic_wq_alloc_ring(vdev, wq, index, desc_count, desc_size); svnic_wq_alloc() 144 err = vnic_wq_alloc_bufs(wq); svnic_wq_alloc() 146 svnic_wq_free(wq); svnic_wq_alloc() 154 void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, vnic_wq_init_start() argument 160 unsigned int count = wq->ring.desc_count; vnic_wq_init_start() 162 paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; vnic_wq_init_start() 163 writeq(paddr, &wq->ctrl->ring_base); vnic_wq_init_start() 164 iowrite32(count, &wq->ctrl->ring_size); vnic_wq_init_start() 165 iowrite32(fetch_index, &wq->ctrl->fetch_index); vnic_wq_init_start() 166 iowrite32(posted_index, &wq->ctrl->posted_index); vnic_wq_init_start() 167 iowrite32(cq_index, &wq->ctrl->cq_index); vnic_wq_init_start() 168 iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); vnic_wq_init_start() 169 iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); vnic_wq_init_start() 170 iowrite32(0, &wq->ctrl->error_status); vnic_wq_init_start() 172 wq->to_use = wq->to_clean = vnic_wq_init_start() 173 &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)] vnic_wq_init_start() 177 void svnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, svnic_wq_init() argument 181 vnic_wq_init_start(wq, cq_index, 0, 0, error_interrupt_enable, svnic_wq_init() 185 unsigned int svnic_wq_error_status(struct vnic_wq *wq) svnic_wq_error_status() argument 187 return ioread32(&wq->ctrl->error_status); svnic_wq_error_status() 190 void svnic_wq_enable(struct vnic_wq *wq) svnic_wq_enable() argument 192 iowrite32(1, &wq->ctrl->enable); svnic_wq_enable() 195 int svnic_wq_disable(struct vnic_wq *wq) svnic_wq_disable() argument 199 iowrite32(0, &wq->ctrl->enable); svnic_wq_disable() 203 if (!(ioread32(&wq->ctrl->running))) svnic_wq_disable() 208 pr_err("Failed to disable WQ[%d]\n", wq->index); svnic_wq_disable() 213 void svnic_wq_clean(struct vnic_wq *wq, svnic_wq_clean() argument 214 void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) svnic_wq_clean() 218 BUG_ON(ioread32(&wq->ctrl->enable)); svnic_wq_clean() 220 buf = wq->to_clean; svnic_wq_clean() 222 while (svnic_wq_desc_used(wq) > 0) { svnic_wq_clean() 224 (*buf_clean)(wq, buf); svnic_wq_clean() 226 buf = wq->to_clean = buf->next; svnic_wq_clean() 227 wq->ring.desc_avail++; svnic_wq_clean() 230 wq->to_use = wq->to_clean = wq->bufs[0]; svnic_wq_clean() 232 iowrite32(0, &wq->ctrl->fetch_index); svnic_wq_clean() 233 iowrite32(0, &wq->ctrl->posted_index); svnic_wq_clean() 234 iowrite32(0, &wq->ctrl->error_status); svnic_wq_clean() 236 svnic_dev_clear_desc_ring(&wq->ring); svnic_wq_clean()
|
H A D | vnic_wq.h | 85 static inline unsigned int svnic_wq_desc_avail(struct vnic_wq *wq) svnic_wq_desc_avail() argument 88 return wq->ring.desc_avail; svnic_wq_desc_avail() 91 static inline unsigned int svnic_wq_desc_used(struct vnic_wq *wq) svnic_wq_desc_used() argument 94 return wq->ring.desc_count - wq->ring.desc_avail - 1; svnic_wq_desc_used() 97 static inline void *svnic_wq_next_desc(struct vnic_wq *wq) svnic_wq_next_desc() argument 99 return wq->to_use->desc; svnic_wq_next_desc() 102 static inline void svnic_wq_post(struct vnic_wq *wq, svnic_wq_post() argument 106 struct vnic_wq_buf *buf = wq->to_use; svnic_wq_post() 121 iowrite32(buf->index, &wq->ctrl->posted_index); svnic_wq_post() 123 wq->to_use = buf; svnic_wq_post() 125 wq->ring.desc_avail--; svnic_wq_post() 128 static inline void svnic_wq_service(struct vnic_wq *wq, svnic_wq_service() argument 130 void (*buf_service)(struct vnic_wq *wq, svnic_wq_service() 136 buf = wq->to_clean; svnic_wq_service() 139 (*buf_service)(wq, cq_desc, buf, opaque); svnic_wq_service() 141 wq->ring.desc_avail++; svnic_wq_service() 143 wq->to_clean = buf->next; svnic_wq_service() 148 buf = wq->to_clean; svnic_wq_service() 152 void svnic_wq_free(struct vnic_wq *wq); 153 int svnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, 155 int vnic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, 157 void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, 162 void svnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, 165 unsigned int svnic_wq_error_status(struct vnic_wq *wq); 166 void svnic_wq_enable(struct vnic_wq *wq); 167 int svnic_wq_disable(struct vnic_wq *wq); 168 void svnic_wq_clean(struct vnic_wq *wq, 169 void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
|
H A D | snic_res.c | 99 SNIC_INFO("vNIC resources wq %d\n", c->wq_enet_desc_count); snic_get_vnic_config() 137 svnic_wq_free(&snic->wq[i]); snic_free_vnic_res() 170 SNIC_INFO("wq %d cq %d intr %d\n", snic->wq_count, snic_alloc_vnic_res() 178 &snic->wq[i], snic_alloc_vnic_res() 224 svnic_wq_init(&snic->wq[i], snic_alloc_vnic_res() 288 err_status = ioread32(&snic->wq[i].ctrl->error_status); snic_log_q_error()
|
H A D | snic_res.h | 63 snic_queue_wq_eth_desc(struct vnic_wq *wq, snic_queue_wq_eth_desc() argument 71 struct wq_enet_desc *desc = svnic_wq_next_desc(wq); snic_queue_wq_eth_desc() 86 svnic_wq_post(wq, os_buf, dma_addr, len, 1, 1); snic_queue_wq_eth_desc()
|
H A D | vnic_dev.c | 37 struct vnic_wq wq; member in struct:devcmd2_controller 364 &dc2c->wq, svnic_dev_init_devcmd2() 370 fetch_idx = ioread32(&dc2c->wq.ctrl->fetch_index); svnic_dev_init_devcmd2() 381 vnic_wq_init_start(&dc2c->wq, 0, fetch_idx, fetch_idx, 0, 0); svnic_dev_init_devcmd2() 382 svnic_wq_enable(&dc2c->wq); svnic_dev_init_devcmd2() 391 dc2c->cmd_ring = (struct vnic_devcmd2 *) dc2c->wq.ring.descs; svnic_dev_init_devcmd2() 392 dc2c->wq_ctrl = dc2c->wq.ctrl; svnic_dev_init_devcmd2() 409 svnic_wq_disable(&dc2c->wq); svnic_dev_init_devcmd2() 410 svnic_wq_free(&dc2c->wq); svnic_dev_init_devcmd2() 427 svnic_wq_disable(&dc2c->wq); vnic_dev_deinit_devcmd2() 428 svnic_wq_free(&dc2c->wq); vnic_dev_deinit_devcmd2()
|
H A D | snic_isr.c | 119 "%.11s-scsi-wq", snic_request_intr() 159 unsigned int n = ARRAY_SIZE(snic->wq); snic_set_intr_mode() 168 BUILD_BUG_ON((ARRAY_SIZE(snic->wq) + SNIC_CQ_IO_CMPL_MAX) > snic_set_intr_mode()
|
H A D | snic_io.c | 34 snic_wq_cmpl_frame_send(struct vnic_wq *wq, snic_wq_cmpl_frame_send() argument 39 struct snic *snic = svnic_dev_priv(wq->vdev); snic_wq_cmpl_frame_send() 69 svnic_wq_service(&snic->wq[q_num], snic_wq_cmpl_handler_cont() 97 snic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) snic_free_wq_buf() argument 101 struct snic *snic = svnic_dev_priv(wq->vdev); snic_free_wq_buf() 162 if (!svnic_wq_desc_avail(snic->wq)) { snic_queue_wq_desc() 171 snic_queue_wq_eth_desc(&snic->wq[q_num], os_buf, pa, len, 0, 0, 1); snic_queue_wq_desc()
|
H A D | snic_main.c | 232 ret = svnic_wq_disable(&snic->wq[i]); snic_cleanup() 244 svnic_wq_clean(&snic->wq[i], snic_free_wq_buf); snic_cleanup() 315 SNIC_HOST_ERR(shost, "Failed to Create ScsiHost wq.\n"); snic_add_host() 651 svnic_wq_enable(&snic->wq[i]); snic_probe() 711 rc = svnic_wq_disable(&snic->wq[i]); snic_probe()
|
/linux-4.4.14/drivers/net/ethernet/cisco/enic/ |
H A D | vnic_wq.c | 31 static int vnic_wq_alloc_bufs(struct vnic_wq *wq) vnic_wq_alloc_bufs() argument 34 unsigned int i, j, count = wq->ring.desc_count; vnic_wq_alloc_bufs() 38 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC); vnic_wq_alloc_bufs() 39 if (!wq->bufs[i]) vnic_wq_alloc_bufs() 44 buf = wq->bufs[i]; vnic_wq_alloc_bufs() 47 buf->desc = (u8 *)wq->ring.descs + vnic_wq_alloc_bufs() 48 wq->ring.desc_size * buf->index; vnic_wq_alloc_bufs() 50 buf->next = wq->bufs[0]; vnic_wq_alloc_bufs() 54 buf->next = wq->bufs[i + 1]; vnic_wq_alloc_bufs() 64 wq->to_use = wq->to_clean = wq->bufs[0]; vnic_wq_alloc_bufs() 69 void vnic_wq_free(struct vnic_wq *wq) vnic_wq_free() argument 74 vdev = wq->vdev; vnic_wq_free() 76 vnic_dev_free_desc_ring(vdev, &wq->ring); vnic_wq_free() 79 if (wq->bufs[i]) { vnic_wq_free() 80 kfree(wq->bufs[i]); vnic_wq_free() 81 wq->bufs[i] = NULL; vnic_wq_free() 85 wq->ctrl = NULL; vnic_wq_free() 88 int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, vnic_wq_alloc() argument 93 wq->index = index; vnic_wq_alloc() 94 wq->vdev = vdev; vnic_wq_alloc() 96 wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index); vnic_wq_alloc() 97 if (!wq->ctrl) { vnic_wq_alloc() 102 vnic_wq_disable(wq); vnic_wq_alloc() 104 err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); vnic_wq_alloc() 108 err = vnic_wq_alloc_bufs(wq); vnic_wq_alloc() 110 vnic_wq_free(wq); vnic_wq_alloc() 117 int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, enic_wq_devcmd2_alloc() argument 122 wq->index = 0; enic_wq_devcmd2_alloc() 123 wq->vdev = vdev; enic_wq_devcmd2_alloc() 125 wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_DEVCMD2, 0); enic_wq_devcmd2_alloc() 126 if (!wq->ctrl) enic_wq_devcmd2_alloc() 128 vnic_wq_disable(wq); enic_wq_devcmd2_alloc() 129 err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); enic_wq_devcmd2_alloc() 134 void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, enic_wq_init_start() argument 140 unsigned int count = wq->ring.desc_count; enic_wq_init_start() 142 paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; enic_wq_init_start() 143 writeq(paddr, &wq->ctrl->ring_base); enic_wq_init_start() 144 iowrite32(count, &wq->ctrl->ring_size); enic_wq_init_start() 145 iowrite32(fetch_index, &wq->ctrl->fetch_index); enic_wq_init_start() 146 iowrite32(posted_index, &wq->ctrl->posted_index); enic_wq_init_start() 147 iowrite32(cq_index, &wq->ctrl->cq_index); enic_wq_init_start() 148 iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); enic_wq_init_start() 149 iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); enic_wq_init_start() 150 iowrite32(0, &wq->ctrl->error_status); enic_wq_init_start() 152 wq->to_use = wq->to_clean = enic_wq_init_start() 153 &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)] enic_wq_init_start() 157 void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, vnic_wq_init() argument 161 enic_wq_init_start(wq, cq_index, 0, 0, vnic_wq_init() 166 unsigned int vnic_wq_error_status(struct vnic_wq *wq) vnic_wq_error_status() argument 168 return ioread32(&wq->ctrl->error_status); vnic_wq_error_status() 171 void vnic_wq_enable(struct vnic_wq *wq) vnic_wq_enable() argument 173 iowrite32(1, &wq->ctrl->enable); vnic_wq_enable() 176 int vnic_wq_disable(struct vnic_wq *wq) vnic_wq_disable() argument 179 struct vnic_dev *vdev = wq->vdev; vnic_wq_disable() 181 iowrite32(0, &wq->ctrl->enable); vnic_wq_disable() 185 if (!(ioread32(&wq->ctrl->running))) vnic_wq_disable() 190 vdev_neterr("Failed to disable WQ[%d]\n", wq->index); vnic_wq_disable() 195 void vnic_wq_clean(struct vnic_wq *wq, vnic_wq_clean() argument 196 void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) vnic_wq_clean() 200 buf = wq->to_clean; vnic_wq_clean() 202 while (vnic_wq_desc_used(wq) > 0) { vnic_wq_clean() 204 (*buf_clean)(wq, buf); vnic_wq_clean() 206 buf = wq->to_clean = buf->next; vnic_wq_clean() 207 wq->ring.desc_avail++; vnic_wq_clean() 210 wq->to_use = wq->to_clean = wq->bufs[0]; vnic_wq_clean() 212 iowrite32(0, &wq->ctrl->fetch_index); vnic_wq_clean() 213 iowrite32(0, &wq->ctrl->posted_index); vnic_wq_clean() 214 iowrite32(0, &wq->ctrl->error_status); vnic_wq_clean() 216 vnic_dev_clear_desc_ring(&wq->ring); vnic_wq_clean()
|
H A D | vnic_wq.h | 99 struct vnic_wq wq; member in struct:devcmd2_controller 103 static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) vnic_wq_desc_avail() argument 106 return wq->ring.desc_avail; vnic_wq_desc_avail() 109 static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) vnic_wq_desc_used() argument 112 return wq->ring.desc_count - wq->ring.desc_avail - 1; vnic_wq_desc_used() 115 static inline void *vnic_wq_next_desc(struct vnic_wq *wq) vnic_wq_next_desc() argument 117 return wq->to_use->desc; vnic_wq_next_desc() 120 static inline void vnic_wq_doorbell(struct vnic_wq *wq) vnic_wq_doorbell() argument 128 iowrite32(wq->to_use->index, &wq->ctrl->posted_index); vnic_wq_doorbell() 131 static inline void vnic_wq_post(struct vnic_wq *wq, vnic_wq_post() argument 137 struct vnic_wq_buf *buf = wq->to_use; vnic_wq_post() 149 wq->to_use = buf; vnic_wq_post() 151 wq->ring.desc_avail -= desc_skip_cnt; vnic_wq_post() 154 static inline void vnic_wq_service(struct vnic_wq *wq, vnic_wq_service() argument 156 void (*buf_service)(struct vnic_wq *wq, vnic_wq_service() 162 buf = wq->to_clean; vnic_wq_service() 165 (*buf_service)(wq, cq_desc, buf, opaque); vnic_wq_service() 167 wq->ring.desc_avail++; vnic_wq_service() 169 wq->to_clean = buf->next; vnic_wq_service() 174 buf = wq->to_clean; vnic_wq_service() 178 void vnic_wq_free(struct vnic_wq *wq); 179 int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, 181 void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, 184 unsigned int vnic_wq_error_status(struct vnic_wq *wq); 185 void vnic_wq_enable(struct vnic_wq *wq); 186 int vnic_wq_disable(struct vnic_wq *wq); 187 void vnic_wq_clean(struct vnic_wq *wq, 188 void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)); 189 int enic_wq_devcmd2_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, 191 void enic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index,
|
H A D | enic_res.h | 43 static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq, enic_queue_wq_desc_ex() argument 49 struct wq_enet_desc *desc = vnic_wq_next_desc(wq); enic_queue_wq_desc_ex() 65 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt, enic_queue_wq_desc_ex() 69 static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq, enic_queue_wq_desc_cont() argument 73 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, enic_queue_wq_desc_cont() 78 static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf, enic_queue_wq_desc() argument 82 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, enic_queue_wq_desc() 88 static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq, enic_queue_wq_desc_csum() argument 93 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, enic_queue_wq_desc_csum() 100 static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq, enic_queue_wq_desc_csum_l4() argument 105 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, enic_queue_wq_desc_csum_l4() 111 static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq, enic_queue_wq_desc_tso() argument 116 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, enic_queue_wq_desc_tso()
|
H A D | enic_main.c | 205 static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) enic_free_wq_buf() argument 207 struct enic *enic = vnic_dev_priv(wq->vdev); enic_free_wq_buf() 220 static void enic_wq_free_buf(struct vnic_wq *wq, enic_wq_free_buf() argument 223 enic_free_wq_buf(wq, buf); enic_wq_free_buf() 233 vnic_wq_service(&enic->wq[q_number], cq_desc, enic_wq_service() 238 vnic_wq_desc_avail(&enic->wq[q_number]) >= enic_wq_service() 254 error_status = vnic_wq_error_status(&enic->wq[i]); enic_log_q_error() 425 static int enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq, enic_queue_wq_skb_cont() argument 440 enic_queue_wq_desc_cont(wq, skb, dma_addr, skb_frag_size(frag), enic_queue_wq_skb_cont() 448 static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq, enic_queue_wq_skb_vlan() argument 468 enic_queue_wq_desc(wq, skb, dma_addr, head_len, vlan_tag_insert, enic_queue_wq_skb_vlan() 472 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); enic_queue_wq_skb_vlan() 477 static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq, enic_queue_wq_skb_csum_l4() argument 499 enic_queue_wq_desc_csum_l4(wq, skb, dma_addr, head_len, csum_offset, enic_queue_wq_skb_csum_l4() 504 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); enic_queue_wq_skb_csum_l4() 509 static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, enic_queue_wq_skb_tso() argument 546 enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len, enic_queue_wq_skb_tso() 572 enic_queue_wq_desc_cont(wq, skb, dma_addr, len, enic_queue_wq_skb_tso() 585 struct vnic_wq *wq, struct sk_buff *skb) enic_queue_wq_skb() 603 err = enic_queue_wq_skb_tso(enic, wq, skb, mss, enic_queue_wq_skb() 607 err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert, enic_queue_wq_skb() 610 err = enic_queue_wq_skb_vlan(enic, wq, skb, vlan_tag_insert, enic_queue_wq_skb() 615 buf = wq->to_use->prev; enic_queue_wq_skb() 619 while (!buf->os_buf && (buf->next != wq->to_clean)) { enic_queue_wq_skb() 620 enic_free_wq_buf(wq, buf); enic_queue_wq_skb() 621 wq->ring.desc_avail++; enic_queue_wq_skb() 624 wq->to_use = buf->next; enic_queue_wq_skb() 634 struct vnic_wq *wq; enic_hard_start_xmit() local 644 wq = &enic->wq[txq_map]; enic_hard_start_xmit() 661 if (vnic_wq_desc_avail(wq) < enic_hard_start_xmit() 670 enic_queue_wq_skb(enic, wq, skb); enic_hard_start_xmit() 672 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) enic_hard_start_xmit() 675 vnic_wq_doorbell(wq); enic_hard_start_xmit() 1422 struct vnic_wq *wq = &enic->wq[wq_index]; enic_poll_msix_wq() local 1429 wq_irq = wq->index; enic_poll_msix_wq() 1575 int wq = enic_cq_wq(enic, i); enic_request_intr() local 1582 enic->msix[intr].devid = &enic->napi[wq]; enic_request_intr() 1739 vnic_wq_enable(&enic->wq[i]); enic_open() 1815 err = vnic_wq_disable(&enic->wq[i]); enic_stop() 1830 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); enic_stop() 2686 /* Setup notification timer, HW reset task, and wq locks enic_probe() 584 enic_queue_wq_skb(struct enic *enic, struct vnic_wq *wq, struct sk_buff *skb) enic_queue_wq_skb() argument
|
H A D | enic_res.c | 98 "vNIC MAC addr %pM wq/rq %d/%d mtu %d\n", enic_get_vnic_config() 187 vnic_wq_free(&enic->wq[i]); enic_free_vnic_resources() 205 "vNIC resources avail: wq %d rq %d cq %d intr %d\n", enic_get_res_counts() 252 vnic_wq_init(&enic->wq[i], enic_init_vnic_resources() 321 "wq %d rq %d cq %d intr %d intr mode %s\n", enic_alloc_vnic_resources() 333 err = vnic_wq_alloc(enic->vdev, &enic->wq[i], i, enic_alloc_vnic_resources()
|
H A D | enic.h | 169 ____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX]; member in struct:enic 225 static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq) enic_cq_wq() argument 227 return enic->rq_count + wq; enic_cq_wq() 252 unsigned int wq) enic_msix_wq_intr() 254 return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset; enic_msix_wq_intr() 251 enic_msix_wq_intr(struct enic *enic, unsigned int wq) enic_msix_wq_intr() argument
|
H A D | vnic_dev.c | 316 vdev_neterr("devcmd2 %d: wq is full. fetch index: %u, posted index: %u\n", _vnic_dev_cmd2() 397 err = enic_wq_devcmd2_alloc(vdev, &vdev->devcmd2->wq, DEVCMD2_RING_SIZE, vnic_dev_init_devcmd2() 402 fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index); vnic_dev_init_devcmd2() 409 enic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, fetch_index, 0, vnic_dev_init_devcmd2() 412 vnic_wq_enable(&vdev->devcmd2->wq); vnic_dev_init_devcmd2() 420 vdev->devcmd2->cmd_ring = vdev->devcmd2->wq.ring.descs; vnic_dev_init_devcmd2() 421 vdev->devcmd2->wq_ctrl = vdev->devcmd2->wq.ctrl; vnic_dev_init_devcmd2() 437 vnic_wq_disable(&vdev->devcmd2->wq); vnic_dev_init_devcmd2() 438 vnic_wq_free(&vdev->devcmd2->wq); vnic_dev_init_devcmd2() 449 vnic_wq_disable(&vdev->devcmd2->wq); vnic_dev_deinit_devcmd2() 450 vnic_wq_free(&vdev->devcmd2->wq); vnic_dev_deinit_devcmd2()
|
/linux-4.4.14/drivers/scsi/fnic/ |
H A D | vnic_wq_copy.h | 36 static inline unsigned int vnic_wq_copy_desc_avail(struct vnic_wq_copy *wq) vnic_wq_copy_desc_avail() argument 38 return wq->ring.desc_avail; vnic_wq_copy_desc_avail() 41 static inline unsigned int vnic_wq_copy_desc_in_use(struct vnic_wq_copy *wq) vnic_wq_copy_desc_in_use() argument 43 return wq->ring.desc_count - 1 - wq->ring.desc_avail; vnic_wq_copy_desc_in_use() 46 static inline void *vnic_wq_copy_next_desc(struct vnic_wq_copy *wq) vnic_wq_copy_next_desc() argument 48 struct fcpio_host_req *desc = wq->ring.descs; vnic_wq_copy_next_desc() 49 return &desc[wq->to_use_index]; vnic_wq_copy_next_desc() 52 static inline void vnic_wq_copy_post(struct vnic_wq_copy *wq) vnic_wq_copy_post() argument 55 ((wq->to_use_index + 1) == wq->ring.desc_count) ? vnic_wq_copy_post() 56 (wq->to_use_index = 0) : (wq->to_use_index++); vnic_wq_copy_post() 57 wq->ring.desc_avail--; vnic_wq_copy_post() 66 iowrite32(wq->to_use_index, &wq->ctrl->posted_index); vnic_wq_copy_post() 69 static inline void vnic_wq_copy_desc_process(struct vnic_wq_copy *wq, u16 index) vnic_wq_copy_desc_process() argument 73 if (wq->to_clean_index <= index) vnic_wq_copy_desc_process() 74 cnt = (index - wq->to_clean_index) + 1; vnic_wq_copy_desc_process() 76 cnt = wq->ring.desc_count - wq->to_clean_index + index + 1; vnic_wq_copy_desc_process() 78 wq->to_clean_index = ((index + 1) % wq->ring.desc_count); vnic_wq_copy_desc_process() 79 wq->ring.desc_avail += cnt; vnic_wq_copy_desc_process() 83 static inline void vnic_wq_copy_service(struct vnic_wq_copy *wq, vnic_wq_copy_service() argument 85 void (*q_service)(struct vnic_wq_copy *wq, vnic_wq_copy_service() 88 struct fcpio_host_req *wq_desc = wq->ring.descs; vnic_wq_copy_service() 94 (*q_service)(wq, &wq_desc[wq->to_clean_index]); vnic_wq_copy_service() 96 wq->ring.desc_avail++; vnic_wq_copy_service() 98 curr_index = wq->to_clean_index; vnic_wq_copy_service() 103 ((wq->to_clean_index + 1) == wq->ring.desc_count) ? vnic_wq_copy_service() 104 (wq->to_clean_index = 0) : (wq->to_clean_index++); vnic_wq_copy_service() 111 (wq->to_clean_index == wq->to_use_index)) vnic_wq_copy_service() 116 void vnic_wq_copy_enable(struct vnic_wq_copy *wq); 117 int vnic_wq_copy_disable(struct vnic_wq_copy *wq); 118 void vnic_wq_copy_free(struct vnic_wq_copy *wq); 119 int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq, 121 void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index, 124 void vnic_wq_copy_clean(struct vnic_wq_copy *wq, 125 void (*q_clean)(struct vnic_wq_copy *wq,
|
H A D | vnic_wq.c | 27 static int vnic_wq_alloc_bufs(struct vnic_wq *wq) vnic_wq_alloc_bufs() argument 31 unsigned int i, j, count = wq->ring.desc_count; vnic_wq_alloc_bufs() 34 vdev = wq->vdev; vnic_wq_alloc_bufs() 37 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC); vnic_wq_alloc_bufs() 38 if (!wq->bufs[i]) { vnic_wq_alloc_bufs() 45 buf = wq->bufs[i]; vnic_wq_alloc_bufs() 48 buf->desc = (u8 *)wq->ring.descs + vnic_wq_alloc_bufs() 49 wq->ring.desc_size * buf->index; vnic_wq_alloc_bufs() 51 buf->next = wq->bufs[0]; vnic_wq_alloc_bufs() 54 buf->next = wq->bufs[i + 1]; vnic_wq_alloc_bufs() 62 wq->to_use = wq->to_clean = wq->bufs[0]; vnic_wq_alloc_bufs() 67 void vnic_wq_free(struct vnic_wq *wq) vnic_wq_free() argument 72 vdev = wq->vdev; vnic_wq_free() 74 vnic_dev_free_desc_ring(vdev, &wq->ring); vnic_wq_free() 77 kfree(wq->bufs[i]); vnic_wq_free() 78 wq->bufs[i] = NULL; vnic_wq_free() 81 wq->ctrl = NULL; vnic_wq_free() 85 int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, vnic_wq_alloc() argument 90 wq->index = index; vnic_wq_alloc() 91 wq->vdev = vdev; vnic_wq_alloc() 93 wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index); vnic_wq_alloc() 94 if (!wq->ctrl) { vnic_wq_alloc() 99 vnic_wq_disable(wq); vnic_wq_alloc() 101 err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); vnic_wq_alloc() 105 err = vnic_wq_alloc_bufs(wq); vnic_wq_alloc() 107 vnic_wq_free(wq); vnic_wq_alloc() 114 void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, vnic_wq_init() argument 120 paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; vnic_wq_init() 121 writeq(paddr, &wq->ctrl->ring_base); vnic_wq_init() 122 iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size); vnic_wq_init() 123 iowrite32(0, &wq->ctrl->fetch_index); vnic_wq_init() 124 iowrite32(0, &wq->ctrl->posted_index); vnic_wq_init() 125 iowrite32(cq_index, &wq->ctrl->cq_index); vnic_wq_init() 126 iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); vnic_wq_init() 127 iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); vnic_wq_init() 128 iowrite32(0, &wq->ctrl->error_status); vnic_wq_init() 131 unsigned int vnic_wq_error_status(struct vnic_wq *wq) vnic_wq_error_status() argument 133 return ioread32(&wq->ctrl->error_status); vnic_wq_error_status() 136 void vnic_wq_enable(struct vnic_wq *wq) vnic_wq_enable() argument 138 iowrite32(1, &wq->ctrl->enable); vnic_wq_enable() 141 int vnic_wq_disable(struct vnic_wq *wq) vnic_wq_disable() argument 145 iowrite32(0, &wq->ctrl->enable); vnic_wq_disable() 149 if (!(ioread32(&wq->ctrl->running))) vnic_wq_disable() 154 printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index); vnic_wq_disable() 159 void vnic_wq_clean(struct vnic_wq *wq, vnic_wq_clean() argument 160 void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) vnic_wq_clean() 164 BUG_ON(ioread32(&wq->ctrl->enable)); vnic_wq_clean() 166 buf = wq->to_clean; vnic_wq_clean() 168 while (vnic_wq_desc_used(wq) > 0) { vnic_wq_clean() 170 (*buf_clean)(wq, buf); vnic_wq_clean() 172 buf = wq->to_clean = buf->next; vnic_wq_clean() 173 wq->ring.desc_avail++; vnic_wq_clean() 176 wq->to_use = wq->to_clean = wq->bufs[0]; vnic_wq_clean() 178 iowrite32(0, &wq->ctrl->fetch_index); vnic_wq_clean() 179 iowrite32(0, &wq->ctrl->posted_index); vnic_wq_clean() 180 iowrite32(0, &wq->ctrl->error_status); vnic_wq_clean() 182 vnic_dev_clear_desc_ring(&wq->ring); vnic_wq_clean()
|
H A D | vnic_wq_copy.c | 25 void vnic_wq_copy_enable(struct vnic_wq_copy *wq) vnic_wq_copy_enable() argument 27 iowrite32(1, &wq->ctrl->enable); vnic_wq_copy_enable() 30 int vnic_wq_copy_disable(struct vnic_wq_copy *wq) vnic_wq_copy_disable() argument 34 iowrite32(0, &wq->ctrl->enable); vnic_wq_copy_disable() 38 if (!(ioread32(&wq->ctrl->running))) vnic_wq_copy_disable() 45 wq->index, ioread32(&wq->ctrl->fetch_index), vnic_wq_copy_disable() 46 ioread32(&wq->ctrl->posted_index)); vnic_wq_copy_disable() 51 void vnic_wq_copy_clean(struct vnic_wq_copy *wq, vnic_wq_copy_clean() argument 52 void (*q_clean)(struct vnic_wq_copy *wq, vnic_wq_copy_clean() 55 BUG_ON(ioread32(&wq->ctrl->enable)); vnic_wq_copy_clean() 57 if (vnic_wq_copy_desc_in_use(wq)) vnic_wq_copy_clean() 58 vnic_wq_copy_service(wq, -1, q_clean); vnic_wq_copy_clean() 60 wq->to_use_index = wq->to_clean_index = 0; vnic_wq_copy_clean() 62 iowrite32(0, &wq->ctrl->fetch_index); vnic_wq_copy_clean() 63 iowrite32(0, &wq->ctrl->posted_index); vnic_wq_copy_clean() 64 iowrite32(0, &wq->ctrl->error_status); vnic_wq_copy_clean() 66 vnic_dev_clear_desc_ring(&wq->ring); vnic_wq_copy_clean() 69 void vnic_wq_copy_free(struct vnic_wq_copy *wq) vnic_wq_copy_free() argument 73 vdev = wq->vdev; vnic_wq_copy_free() 74 vnic_dev_free_desc_ring(vdev, &wq->ring); vnic_wq_copy_free() 75 wq->ctrl = NULL; vnic_wq_copy_free() 78 int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq, vnic_wq_copy_alloc() argument 84 wq->index = index; vnic_wq_copy_alloc() 85 wq->vdev = vdev; vnic_wq_copy_alloc() 86 wq->to_use_index = wq->to_clean_index = 0; vnic_wq_copy_alloc() 87 wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index); vnic_wq_copy_alloc() 88 if (!wq->ctrl) { vnic_wq_copy_alloc() 93 vnic_wq_copy_disable(wq); vnic_wq_copy_alloc() 95 err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); vnic_wq_copy_alloc() 102 void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index, vnic_wq_copy_init() argument 108 paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; vnic_wq_copy_init() 109 writeq(paddr, &wq->ctrl->ring_base); vnic_wq_copy_init() 110 iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size); vnic_wq_copy_init() 111 iowrite32(0, &wq->ctrl->fetch_index); vnic_wq_copy_init() 112 iowrite32(0, &wq->ctrl->posted_index); vnic_wq_copy_init() 113 iowrite32(cq_index, &wq->ctrl->cq_index); vnic_wq_copy_init() 114 iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); vnic_wq_copy_init() 115 iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); vnic_wq_copy_init()
|
H A D | vnic_wq.h | 96 static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) vnic_wq_desc_avail() argument 99 return wq->ring.desc_avail; vnic_wq_desc_avail() 102 static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) vnic_wq_desc_used() argument 105 return wq->ring.desc_count - wq->ring.desc_avail - 1; vnic_wq_desc_used() 108 static inline void *vnic_wq_next_desc(struct vnic_wq *wq) vnic_wq_next_desc() argument 110 return wq->to_use->desc; vnic_wq_next_desc() 113 static inline void vnic_wq_post(struct vnic_wq *wq, vnic_wq_post() argument 117 struct vnic_wq_buf *buf = wq->to_use; vnic_wq_post() 132 iowrite32(buf->index, &wq->ctrl->posted_index); vnic_wq_post() 134 wq->to_use = buf; vnic_wq_post() 136 wq->ring.desc_avail--; vnic_wq_post() 139 static inline void vnic_wq_service(struct vnic_wq *wq, vnic_wq_service() argument 141 void (*buf_service)(struct vnic_wq *wq, vnic_wq_service() 147 buf = wq->to_clean; vnic_wq_service() 150 (*buf_service)(wq, cq_desc, buf, opaque); vnic_wq_service() 152 wq->ring.desc_avail++; vnic_wq_service() 154 wq->to_clean = buf->next; vnic_wq_service() 159 buf = wq->to_clean; vnic_wq_service() 163 void vnic_wq_free(struct vnic_wq *wq); 164 int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, 166 void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, 169 unsigned int vnic_wq_error_status(struct vnic_wq *wq); 170 void vnic_wq_enable(struct vnic_wq *wq); 171 int vnic_wq_disable(struct vnic_wq *wq); 172 void vnic_wq_clean(struct vnic_wq *wq, 173 void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
|
H A D | fnic_res.h | 30 static inline void fnic_queue_wq_desc(struct vnic_wq *wq, fnic_queue_wq_desc() argument 37 struct wq_enet_desc *desc = vnic_wq_next_desc(wq); fnic_queue_wq_desc() 51 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop); fnic_queue_wq_desc() 54 static inline void fnic_queue_wq_eth_desc(struct vnic_wq *wq, fnic_queue_wq_eth_desc() argument 61 struct wq_enet_desc *desc = vnic_wq_next_desc(wq); fnic_queue_wq_eth_desc() 76 vnic_wq_post(wq, os_buf, dma_addr, len, 1, 1); fnic_queue_wq_eth_desc() 79 static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq, fnic_queue_wq_copy_desc_icmnd_16() argument 91 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); fnic_queue_wq_copy_desc_icmnd_16() 121 vnic_wq_copy_post(wq); fnic_queue_wq_copy_desc_icmnd_16() 124 static inline void fnic_queue_wq_copy_desc_itmf(struct vnic_wq_copy *wq, fnic_queue_wq_copy_desc_itmf() argument 130 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); fnic_queue_wq_copy_desc_itmf() 147 vnic_wq_copy_post(wq); fnic_queue_wq_copy_desc_itmf() 150 static inline void fnic_queue_wq_copy_desc_flogi_reg(struct vnic_wq_copy *wq, fnic_queue_wq_copy_desc_flogi_reg() argument 154 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); fnic_queue_wq_copy_desc_flogi_reg() 166 vnic_wq_copy_post(wq); fnic_queue_wq_copy_desc_flogi_reg() 169 static inline void fnic_queue_wq_copy_desc_fip_reg(struct vnic_wq_copy *wq, fnic_queue_wq_copy_desc_fip_reg() argument 174 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); fnic_queue_wq_copy_desc_fip_reg() 190 vnic_wq_copy_post(wq); fnic_queue_wq_copy_desc_fip_reg() 193 static inline void fnic_queue_wq_copy_desc_fw_reset(struct vnic_wq_copy *wq, fnic_queue_wq_copy_desc_fw_reset() argument 196 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); fnic_queue_wq_copy_desc_fw_reset() 203 vnic_wq_copy_post(wq); fnic_queue_wq_copy_desc_fw_reset() 206 static inline void fnic_queue_wq_copy_desc_lunmap(struct vnic_wq_copy *wq, fnic_queue_wq_copy_desc_lunmap() argument 210 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); fnic_queue_wq_copy_desc_lunmap() 220 vnic_wq_copy_post(wq); fnic_queue_wq_copy_desc_lunmap()
|
H A D | fnic_scsi.c | 143 static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq) free_wq_copy_descs() argument 153 if (wq->to_clean_index <= fnic->fw_ack_index[0]) free_wq_copy_descs() 154 wq->ring.desc_avail += (fnic->fw_ack_index[0] free_wq_copy_descs() 155 - wq->to_clean_index + 1); free_wq_copy_descs() 157 wq->ring.desc_avail += (wq->ring.desc_count free_wq_copy_descs() 158 - wq->to_clean_index free_wq_copy_descs() 166 wq->to_clean_index = free_wq_copy_descs() 167 (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count; free_wq_copy_descs() 208 struct vnic_wq_copy *wq = &fnic->wq_copy[0]; fnic_fw_reset_handler() local 224 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) fnic_fw_reset_handler() 225 free_wq_copy_descs(fnic, wq); fnic_fw_reset_handler() 227 if (!vnic_wq_copy_desc_avail(wq)) fnic_fw_reset_handler() 230 fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG); fnic_fw_reset_handler() 261 struct vnic_wq_copy *wq = &fnic->wq_copy[0]; fnic_flogi_reg_handler() local 270 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) fnic_flogi_reg_handler() 271 free_wq_copy_descs(fnic, wq); fnic_flogi_reg_handler() 273 if (!vnic_wq_copy_desc_avail(wq)) { fnic_flogi_reg_handler() 287 fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG, fnic_flogi_reg_handler() 295 fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG, fnic_flogi_reg_handler() 315 * Routine to enqueue a wq copy desc 318 struct vnic_wq_copy *wq, fnic_queue_wq_copy_desc() 377 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) fnic_queue_wq_copy_desc() 378 free_wq_copy_descs(fnic, wq); fnic_queue_wq_copy_desc() 380 if (unlikely(!vnic_wq_copy_desc_avail(wq))) { fnic_queue_wq_copy_desc() 399 fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag, fnic_queue_wq_copy_desc() 436 struct vnic_wq_copy *wq; fnic_queuecommand_lck() local 546 /* create copy wq desc and enqueue it */ fnic_queuecommand_lck() 547 wq = &fnic->wq_copy[0]; fnic_queuecommand_lck() 548 ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count); fnic_queuecommand_lck() 744 static inline int is_ack_index_in_range(struct vnic_wq_copy *wq, is_ack_index_in_range() argument 747 if (wq->to_clean_index <= wq->to_use_index) { is_ack_index_in_range() 749 if (request_out < wq->to_clean_index || is_ack_index_in_range() 750 request_out >= wq->to_use_index) is_ack_index_in_range() 754 if (request_out < wq->to_clean_index && is_ack_index_in_range() 755 request_out >= wq->to_use_index) is_ack_index_in_range() 773 struct vnic_wq_copy *wq; fnic_fcpio_ack_handler() local 779 wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count]; fnic_fcpio_ack_handler() 783 if (is_ack_index_in_range(wq, request_out)) { fnic_fcpio_ack_handler() 1242 case FCPIO_ACK: /* fw copied copy wq desc to its queue */ fnic_fcpio_cmpl_handler() 1275 * Routine to process wq copy 1377 void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, fnic_wq_copy_cleanup_handler() argument 1381 struct fnic *fnic = vnic_dev_priv(wq->vdev); fnic_wq_copy_cleanup_handler() 1442 struct vnic_wq_copy *wq = &fnic->wq_copy[0]; fnic_queue_abort_io_req() local 1458 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) fnic_queue_abort_io_req() 1459 free_wq_copy_descs(fnic, wq); fnic_queue_abort_io_req() 1461 if (!vnic_wq_copy_desc_avail(wq)) { fnic_queue_abort_io_req() 1469 fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT, fnic_queue_abort_io_req() 1965 struct vnic_wq_copy *wq = &fnic->wq_copy[0]; fnic_queue_dr_io_req() local 1983 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) fnic_queue_dr_io_req() 1984 free_wq_copy_descs(fnic, wq); fnic_queue_dr_io_req() 1986 if (!vnic_wq_copy_desc_avail(wq)) { fnic_queue_dr_io_req() 1997 fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST, fnic_queue_dr_io_req() 317 fnic_queue_wq_copy_desc(struct fnic *fnic, struct vnic_wq_copy *wq, struct fnic_io_req *io_req, struct scsi_cmnd *sc, int sg_count) fnic_queue_wq_copy_desc() argument
|
H A D | fnic_res.c | 148 "wq/wq_copy/rq %d/%d/%d\n", fnic_get_vnic_config() 215 vnic_wq_free(&fnic->wq[i]); fnic_free_vnic_resources() 250 "wq %d cp_wq %d raw_wq %d rq %d cq %d intr %d\n", fnic_alloc_vnic_resources() 256 err = vnic_wq_alloc(fnic->vdev, &fnic->wq[i], i, fnic_alloc_vnic_resources() 338 * Note for copy wq we always initialize with cq_index = 0 fnic_alloc_vnic_resources() 365 vnic_wq_init(&fnic->wq[i], fnic_alloc_vnic_resources()
|
H A D | fnic_fcs.c | 991 struct vnic_wq *wq = &fnic->wq[0]; fnic_eth_send() local 1026 if (!vnic_wq_desc_avail(wq)) fnic_eth_send() 1029 fnic_queue_wq_eth_desc(wq, skb, pa, skb->len, fnic_eth_send() 1047 struct vnic_wq *wq = &fnic->wq[0]; fnic_send_frame() local 1109 if (!vnic_wq_desc_avail(wq)) { fnic_send_frame() 1116 fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp), fnic_send_frame() 1218 static void fnic_wq_complete_frame_send(struct vnic_wq *wq, fnic_wq_complete_frame_send() argument 1224 struct fnic *fnic = vnic_dev_priv(wq->vdev); fnic_wq_complete_frame_send() 1241 vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index, fnic_wq_cmpl_handler_cont() 1264 void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) fnic_free_wq_buf() argument 1267 struct fnic *fnic = vnic_dev_priv(wq->vdev); fnic_free_wq_buf()
|
H A D | fnic_isr.c | 197 "%.11s-fcs-wq", fnic->name); fnic_request_intr() 202 "%.11s-scsi-wq", fnic->name); fnic_request_intr() 238 unsigned int m = ARRAY_SIZE(fnic->wq); fnic_set_intr_mode()
|
H A D | fnic.h | 305 ____cacheline_aligned struct vnic_wq wq[FNIC_WQ_MAX]; member in struct:fnic 330 void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf); 355 void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
|
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | wq.c | 34 #include "wq.h" 37 u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq) mlx5_wq_cyc_get_size() argument 39 return (u32)wq->sz_m1 + 1; mlx5_wq_cyc_get_size() 42 u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) mlx5_cqwq_get_size() argument 44 return wq->sz_m1 + 1; mlx5_cqwq_get_size() 47 u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq) mlx5_wq_ll_get_size() argument 49 return (u32)wq->sz_m1 + 1; mlx5_wq_ll_get_size() 52 static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq) mlx5_wq_cyc_get_byte_size() argument 54 return mlx5_wq_cyc_get_size(wq) << wq->log_stride; mlx5_wq_cyc_get_byte_size() 57 static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq) mlx5_cqwq_get_byte_size() argument 59 return mlx5_cqwq_get_size(wq) << wq->log_stride; mlx5_cqwq_get_byte_size() 62 static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq) mlx5_wq_ll_get_byte_size() argument 64 return mlx5_wq_ll_get_size(wq) << wq->log_stride; mlx5_wq_ll_get_byte_size() 68 void *wqc, struct mlx5_wq_cyc *wq, mlx5_wq_cyc_create() 73 wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride); mlx5_wq_cyc_create() 74 wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1; mlx5_wq_cyc_create() 82 err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq), mlx5_wq_cyc_create() 89 wq->buf = wq_ctrl->buf.direct.buf; mlx5_wq_cyc_create() 90 wq->db = wq_ctrl->db.db; mlx5_wq_cyc_create() 103 void *cqc, struct mlx5_cqwq *wq, mlx5_cqwq_create() 108 wq->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz); mlx5_cqwq_create() 109 wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size); mlx5_cqwq_create() 110 wq->sz_m1 = (1 << wq->log_sz) - 1; mlx5_cqwq_create() 118 err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq), mlx5_cqwq_create() 125 wq->buf = wq_ctrl->buf.direct.buf; mlx5_cqwq_create() 126 wq->db = wq_ctrl->db.db; mlx5_cqwq_create() 139 void *wqc, struct mlx5_wq_ll *wq, mlx5_wq_ll_create() 146 wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride); mlx5_wq_ll_create() 147 wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1; mlx5_wq_ll_create() 155 err = mlx5_buf_alloc(mdev, mlx5_wq_ll_get_byte_size(wq), &wq_ctrl->buf); mlx5_wq_ll_create() 161 wq->buf = wq_ctrl->buf.direct.buf; mlx5_wq_ll_create() 162 wq->db = wq_ctrl->db.db; mlx5_wq_ll_create() 164 for (i = 0; i < wq->sz_m1; i++) { mlx5_wq_ll_create() 165 next_seg = mlx5_wq_ll_get_wqe(wq, i); mlx5_wq_ll_create() 168 next_seg = mlx5_wq_ll_get_wqe(wq, i); mlx5_wq_ll_create() 169 wq->tail_next = &next_seg->next_wqe_index; mlx5_wq_ll_create() 67 mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *wqc, struct mlx5_wq_cyc *wq, struct mlx5_wq_ctrl *wq_ctrl) mlx5_wq_cyc_create() argument 102 mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *cqc, struct mlx5_cqwq *wq, struct mlx5_wq_ctrl *wq_ctrl) mlx5_cqwq_create() argument 138 mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *wqc, struct mlx5_wq_ll *wq, struct mlx5_wq_ctrl *wq_ctrl) mlx5_wq_ll_create() argument
|
H A D | wq.h | 78 void *wqc, struct mlx5_wq_cyc *wq, 80 u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); 83 void *cqc, struct mlx5_cqwq *wq, 85 u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq); 88 void *wqc, struct mlx5_wq_ll *wq, 90 u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq); 94 static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr) mlx5_wq_cyc_ctr2ix() argument 96 return ctr & wq->sz_m1; mlx5_wq_cyc_ctr2ix() 99 static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix) mlx5_wq_cyc_get_wqe() argument 101 return wq->buf + (ix << wq->log_stride); mlx5_wq_cyc_get_wqe() 112 static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq) mlx5_cqwq_get_ci() argument 114 return wq->cc & wq->sz_m1; mlx5_cqwq_get_ci() 117 static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix) mlx5_cqwq_get_wqe() argument 119 return wq->buf + (ix << wq->log_stride); mlx5_cqwq_get_wqe() 122 static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq) mlx5_cqwq_get_wrap_cnt() argument 124 return wq->cc >> wq->log_sz; mlx5_cqwq_get_wrap_cnt() 127 static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq) mlx5_cqwq_pop() argument 129 wq->cc++; mlx5_cqwq_pop() 132 static inline void mlx5_cqwq_update_db_record(struct mlx5_cqwq *wq) mlx5_cqwq_update_db_record() argument 134 *wq->db = cpu_to_be32(wq->cc & 0xffffff); mlx5_cqwq_update_db_record() 137 static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq) mlx5_wq_ll_is_full() argument 139 return wq->cur_sz == wq->sz_m1; mlx5_wq_ll_is_full() 142 static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq) mlx5_wq_ll_is_empty() argument 144 return !wq->cur_sz; mlx5_wq_ll_is_empty() 147 static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix) mlx5_wq_ll_get_wqe() argument 149 return wq->buf + (ix << wq->log_stride); mlx5_wq_ll_get_wqe() 152 static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next) mlx5_wq_ll_push() argument 154 wq->head = head_next; mlx5_wq_ll_push() 155 wq->wqe_ctr++; mlx5_wq_ll_push() 156 wq->cur_sz++; mlx5_wq_ll_push() 159 static inline void mlx5_wq_ll_pop(struct mlx5_wq_ll *wq, __be16 ix, mlx5_wq_ll_pop() argument 162 *wq->tail_next = ix; mlx5_wq_ll_pop() 163 wq->tail_next = next_tail_next; mlx5_wq_ll_pop() 164 wq->cur_sz--; mlx5_wq_ll_pop() 167 static inline void mlx5_wq_ll_update_db_record(struct mlx5_wq_ll *wq) mlx5_wq_ll_update_db_record() argument 169 *wq->db = cpu_to_be32(wq->wqe_ctr); mlx5_wq_ll_update_db_record()
|
H A D | Makefile | 6 mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o flow_table.o \
|
H A D | srq.c | 83 void *wq = MLX5_ADDR_OF(rmpc, rmpc, wq); rmpc_srqc_reformat() local 99 MLX5_SET(wq, wq, wq_signature, MLX5_GET(srqc, srqc, wq_signature)); rmpc_srqc_reformat() 100 MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(srqc, srqc, log_page_size)); rmpc_srqc_reformat() 101 MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(srqc, srqc, log_rq_stride) + 4); rmpc_srqc_reformat() 102 MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(srqc, srqc, log_srq_size)); rmpc_srqc_reformat() 103 MLX5_SET(wq, wq, page_offset, MLX5_GET(srqc, srqc, page_offset)); rmpc_srqc_reformat() 104 MLX5_SET(wq, wq, lwm, MLX5_GET(srqc, srqc, lwm)); rmpc_srqc_reformat() 105 MLX5_SET(wq, wq, pd, MLX5_GET(srqc, srqc, pd)); rmpc_srqc_reformat() 106 MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(srqc, srqc, dbr_addr)); rmpc_srqc_reformat() 123 MLX5_SET(srqc, srqc, wq_signature, MLX5_GET(wq, wq, wq_signature)); rmpc_srqc_reformat() 124 MLX5_SET(srqc, srqc, log_page_size, MLX5_GET(wq, wq, log_wq_pg_sz)); rmpc_srqc_reformat() 125 MLX5_SET(srqc, srqc, log_rq_stride, MLX5_GET(wq, wq, log_wq_stride) - 4); rmpc_srqc_reformat() 126 MLX5_SET(srqc, srqc, log_srq_size, MLX5_GET(wq, wq, log_wq_sz)); rmpc_srqc_reformat() 127 MLX5_SET(srqc, srqc, page_offset, MLX5_GET(wq, wq, page_offset)); rmpc_srqc_reformat() 128 MLX5_SET(srqc, srqc, lwm, MLX5_GET(wq, wq, lwm)); rmpc_srqc_reformat() 129 MLX5_SET(srqc, srqc, pd, MLX5_GET(wq, wq, pd)); rmpc_srqc_reformat() 130 MLX5_SET64(srqc, srqc, dbr_addr, MLX5_GET64(wq, wq, dbr_addr)); rmpc_srqc_reformat() 349 memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size); create_rmp_cmd() 370 void *wq; arm_rmp_cmd() local 380 wq = MLX5_ADDR_OF(rmpc, rmpc, wq); arm_rmp_cmd() 384 MLX5_SET(wq, wq, lwm, lwm); arm_rmp_cmd()
|
H A D | en_main.c | 38 struct mlx5_wq_param wq; member in struct:mlx5e_rq_param 43 struct mlx5_wq_param wq; member in struct:mlx5e_sq_param 49 struct mlx5_wq_param wq; member in struct:mlx5e_cq_param 316 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); mlx5e_create_rq() 321 param->wq.db_numa_node = cpu_to_node(c->cpu); mlx5e_create_rq() 323 err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq, mlx5e_create_rq() 328 rq->wq.db = &rq->wq.db[MLX5_RCV_DBR]; mlx5e_create_rq() 330 wq_sz = mlx5_wq_ll_get_size(&rq->wq); mlx5e_create_rq() 343 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); mlx5e_create_rq() 378 void *wq; mlx5e_enable_rq() local 389 wq = MLX5_ADDR_OF(rqc, rqc, wq); mlx5e_enable_rq() 396 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift - mlx5e_enable_rq() 398 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma); mlx5e_enable_rq() 401 (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); mlx5e_enable_rq() 448 struct mlx5_wq_ll *wq = &rq->wq; mlx5e_wait_for_min_rx_wqes() local 451 if (wq->cur_sz >= priv->params.min_rx_wqes) mlx5e_wait_for_min_rx_wqes() 497 while (!mlx5_wq_ll_is_empty(&rq->wq)) mlx5e_close_rq() 515 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); mlx5e_alloc_sq_db() 541 void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq); mlx5e_create_sq() 549 param->wq.db_numa_node = cpu_to_node(c->cpu); mlx5e_create_sq() 551 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, mlx5e_create_sq() 556 sq->wq.db = &sq->wq.db[MLX5_SND_DBR]; mlx5e_create_sq() 573 sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS; mlx5e_create_sq() 606 void *wq; mlx5e_enable_sq() local 617 wq = MLX5_ADDR_OF(sqc, sqc, wq); mlx5e_enable_sq() 627 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); mlx5e_enable_sq() 628 MLX5_SET(wq, wq, uar_page, sq->uar.index); mlx5e_enable_sq() 629 MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - mlx5e_enable_sq() 631 MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma); mlx5e_enable_sq() 634 (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); mlx5e_enable_sq() 753 param->wq.buf_numa_node = cpu_to_node(c->cpu); mlx5e_create_cq() 754 param->wq.db_numa_node = cpu_to_node(c->cpu); mlx5e_create_cq() 757 err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, mlx5e_create_cq() 777 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { mlx5e_create_cq() 778 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); mlx5e_create_cq() 1043 void *wq = MLX5_ADDR_OF(rqc, rqc, wq); mlx5e_build_rq_param() local 1045 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST); mlx5e_build_rq_param() 1046 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN); mlx5e_build_rq_param() 1047 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe))); mlx5e_build_rq_param() 1048 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size); mlx5e_build_rq_param() 1049 MLX5_SET(wq, wq, pd, priv->pdn); mlx5e_build_rq_param() 1051 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); mlx5e_build_rq_param() 1052 param->wq.linear = 1; mlx5e_build_rq_param() 1059 void *wq = MLX5_ADDR_OF(sqc, sqc, wq); mlx5e_build_sq_param() local 1061 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size); mlx5e_build_sq_param() 1062 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); mlx5e_build_sq_param() 1063 MLX5_SET(wq, wq, pd, priv->pdn); mlx5e_build_sq_param() 1065 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev); mlx5e_build_sq_param() 1485 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq); mlx5e_create_drop_rq() 1488 param->wq.db_numa_node = param->wq.buf_numa_node; mlx5e_create_drop_rq() 1490 err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq, mlx5e_create_drop_rq() 1510 err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq, mlx5e_create_drop_cq()
|
H A D | en_rx.c | 75 struct mlx5_wq_ll *wq = &rq->wq; mlx5e_post_rx_wqes() local 80 while (!mlx5_wq_ll_is_full(wq)) { mlx5e_post_rx_wqes() 81 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head); mlx5e_post_rx_wqes() 83 if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, wq->head))) mlx5e_post_rx_wqes() 86 mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index)); mlx5e_post_rx_wqes() 92 mlx5_wq_ll_update_db_record(wq); mlx5e_post_rx_wqes() 94 return !mlx5_wq_ll_is_full(wq); mlx5e_post_rx_wqes() 238 mlx5_cqwq_pop(&cq->wq); mlx5e_poll_rx_cq() 242 wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter); mlx5e_poll_rx_cq() 263 mlx5_wq_ll_pop(&rq->wq, wqe_counter_be, mlx5e_poll_rx_cq() 267 mlx5_cqwq_update_db_record(&cq->wq); mlx5e_poll_rx_cq()
|
H A D | en_txrx.c | 37 struct mlx5_cqwq *wq = &cq->wq; mlx5e_get_cqe() local 38 u32 ci = mlx5_cqwq_get_ci(wq); mlx5e_get_cqe() 39 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); mlx5e_get_cqe() 41 int sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1; mlx5e_get_cqe()
|
H A D | en_tx.c | 43 struct mlx5_wq_cyc *wq = &sq->wq; mlx5e_send_nop() local 45 u16 pi = sq->pc & wq->sz_m1; mlx5e_send_nop() 46 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); mlx5e_send_nop() 159 struct mlx5_wq_cyc *wq = &sq->wq; mlx5e_sq_xmit() local 161 u16 pi = sq->pc & wq->sz_m1; mlx5e_sq_xmit() 162 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); mlx5e_sq_xmit() 290 while ((sq->pc & wq->sz_m1) > sq->edge) mlx5e_sq_xmit() 350 mlx5_cqwq_pop(&cq->wq); mlx5e_poll_tx_cq() 361 ci = sqcc & sq->wq.sz_m1; mlx5e_poll_tx_cq() 384 mlx5_cqwq_update_db_record(&cq->wq); mlx5e_poll_tx_cq()
|
H A D | en.h | 39 #include "wq.h" 295 struct mlx5_cqwq wq; member in struct:mlx5e_cq 310 struct mlx5_wq_ll wq; member in struct:mlx5e_rq 374 struct mlx5_wq_cyc wq; member in struct:mlx5e_sq 396 return (((sq->wq.sz_m1 & (sq->cc - sq->pc)) >= n) || mlx5e_sq_has_room_for() 598 *sq->wq.db = cpu_to_be32(sq->pc); mlx5e_tx_notify_hw() 623 mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc); mlx5e_cq_arm()
|
H A D | health.c | 270 queue_work(health->wq, &health->work); poll_health() 299 destroy_workqueue(health->wq); mlx5_health_cleanup() 314 health->wq = create_singlethread_workqueue(name); mlx5_health_init() 316 if (!health->wq) mlx5_health_init()
|
H A D | transobj.c | 276 void *wq; mlx5_core_arm_rmp() local 286 wq = MLX5_ADDR_OF(rmpc, rmpc, wq); mlx5_core_arm_rmp() 290 MLX5_SET(wq, wq, lwm, lwm); mlx5_core_arm_rmp()
|
/linux-4.4.14/fs/btrfs/ |
H A D | async-thread.c | 115 * For threshold-able wq, let its concurrency grow on demand. __btrfs_alloc_workqueue() 144 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq); 180 static inline void thresh_queue_hook(struct __btrfs_workqueue *wq) thresh_queue_hook() argument 182 if (wq->thresh == NO_THRESHOLD) thresh_queue_hook() 184 atomic_inc(&wq->pending); thresh_queue_hook() 192 static inline void thresh_exec_hook(struct __btrfs_workqueue *wq) thresh_exec_hook() argument 198 if (wq->thresh == NO_THRESHOLD) thresh_exec_hook() 201 atomic_dec(&wq->pending); thresh_exec_hook() 202 spin_lock(&wq->thres_lock); thresh_exec_hook() 204 * Use wq->count to limit the calling frequency of thresh_exec_hook() 207 wq->count++; thresh_exec_hook() 208 wq->count %= (wq->thresh / 4); thresh_exec_hook() 209 if (!wq->count) thresh_exec_hook() 211 new_current_active = wq->current_active; thresh_exec_hook() 217 pending = atomic_read(&wq->pending); thresh_exec_hook() 218 if (pending > wq->thresh) thresh_exec_hook() 220 if (pending < wq->thresh / 2) thresh_exec_hook() 222 new_current_active = clamp_val(new_current_active, 1, wq->limit_active); thresh_exec_hook() 223 if (new_current_active != wq->current_active) { thresh_exec_hook() 225 wq->current_active = new_current_active; thresh_exec_hook() 228 spin_unlock(&wq->thres_lock); thresh_exec_hook() 231 workqueue_set_max_active(wq->normal_wq, wq->current_active); thresh_exec_hook() 235 static void run_ordered_work(struct __btrfs_workqueue *wq) run_ordered_work() argument 237 struct list_head *list = &wq->ordered_list; run_ordered_work() 239 spinlock_t *lock = &wq->list_lock; run_ordered_work() 280 struct __btrfs_workqueue *wq; normal_work_helper() local 293 wq = work->wq; normal_work_helper() 296 thresh_exec_hook(wq); normal_work_helper() 300 run_ordered_work(wq); normal_work_helper() 319 static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq, __btrfs_queue_work() argument 324 work->wq = wq; __btrfs_queue_work() 325 thresh_queue_hook(wq); __btrfs_queue_work() 327 spin_lock_irqsave(&wq->list_lock, flags); __btrfs_queue_work() 328 list_add_tail(&work->ordered_list, &wq->ordered_list); __btrfs_queue_work() 329 spin_unlock_irqrestore(&wq->list_lock, flags); __btrfs_queue_work() 332 queue_work(wq->normal_wq, &work->normal_work); __btrfs_queue_work() 335 void btrfs_queue_work(struct btrfs_workqueue *wq, btrfs_queue_work() argument 340 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high) btrfs_queue_work() 341 dest_wq = wq->high; btrfs_queue_work() 343 dest_wq = wq->normal; btrfs_queue_work() 348 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq) __btrfs_destroy_workqueue() argument 350 destroy_workqueue(wq->normal_wq); __btrfs_destroy_workqueue() 351 trace_btrfs_workqueue_destroy(wq); __btrfs_destroy_workqueue() 352 kfree(wq); __btrfs_destroy_workqueue() 355 void btrfs_destroy_workqueue(struct btrfs_workqueue *wq) btrfs_destroy_workqueue() argument 357 if (!wq) btrfs_destroy_workqueue() 359 if (wq->high) btrfs_destroy_workqueue() 360 __btrfs_destroy_workqueue(wq->high); btrfs_destroy_workqueue() 361 __btrfs_destroy_workqueue(wq->normal); btrfs_destroy_workqueue() 362 kfree(wq); btrfs_destroy_workqueue() 365 void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active) btrfs_workqueue_set_max() argument 367 if (!wq) btrfs_workqueue_set_max() 369 wq->normal->limit_active = limit_active; btrfs_workqueue_set_max() 370 if (wq->high) btrfs_workqueue_set_max() 371 wq->high->limit_active = limit_active; btrfs_workqueue_set_max()
|
H A D | async-thread.h | 39 struct __btrfs_workqueue *wq; member in struct:btrfs_work 78 void btrfs_queue_work(struct btrfs_workqueue *wq, 80 void btrfs_destroy_workqueue(struct btrfs_workqueue *wq); 81 void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max);
|
/linux-4.4.14/fs/autofs4/ |
H A D | waitq.c | 29 struct autofs_wait_queue *wq, *nwq; autofs4_catatonic_mode() local 40 wq = sbi->queues; autofs4_catatonic_mode() 42 while (wq) { autofs4_catatonic_mode() 43 nwq = wq->next; autofs4_catatonic_mode() 44 wq->status = -ENOENT; /* Magic is gone - report failure */ autofs4_catatonic_mode() 45 kfree(wq->name.name); autofs4_catatonic_mode() 46 wq->name.name = NULL; autofs4_catatonic_mode() 47 wq->wait_ctr--; autofs4_catatonic_mode() 48 wake_up_interruptible(&wq->queue); autofs4_catatonic_mode() 49 wq = nwq; autofs4_catatonic_mode() 94 struct autofs_wait_queue *wq, autofs4_notify_daemon() 106 (unsigned long) wq->wait_queue_token, wq->name.len, wq->name.name, type); autofs4_notify_daemon() 121 mp->wait_queue_token = wq->wait_queue_token; autofs4_notify_daemon() 122 mp->len = wq->name.len; autofs4_notify_daemon() 123 memcpy(mp->name, wq->name.name, wq->name.len); autofs4_notify_daemon() 124 mp->name[wq->name.len] = '\0'; autofs4_notify_daemon() 133 ep->wait_queue_token = wq->wait_queue_token; autofs4_notify_daemon() 134 ep->len = wq->name.len; autofs4_notify_daemon() 135 memcpy(ep->name, wq->name.name, wq->name.len); autofs4_notify_daemon() 136 ep->name[wq->name.len] = '\0'; autofs4_notify_daemon() 153 packet->wait_queue_token = wq->wait_queue_token; autofs4_notify_daemon() 154 packet->len = wq->name.len; autofs4_notify_daemon() 155 memcpy(packet->name, wq->name.name, wq->name.len); autofs4_notify_daemon() 156 packet->name[wq->name.len] = '\0'; autofs4_notify_daemon() 157 packet->dev = wq->dev; autofs4_notify_daemon() 158 packet->ino = wq->ino; autofs4_notify_daemon() 159 packet->uid = from_kuid_munged(user_ns, wq->uid); autofs4_notify_daemon() 160 packet->gid = from_kgid_munged(user_ns, wq->gid); autofs4_notify_daemon() 161 packet->pid = wq->pid; autofs4_notify_daemon() 162 packet->tgid = wq->tgid; autofs4_notify_daemon() 228 struct autofs_wait_queue *wq; autofs4_find_wait() local 230 for (wq = sbi->queues; wq; wq = wq->next) { autofs4_find_wait() 231 if (wq->name.hash == qstr->hash && autofs4_find_wait() 232 wq->name.len == qstr->len && autofs4_find_wait() 233 wq->name.name && autofs4_find_wait() 234 !memcmp(wq->name.name, qstr->name, qstr->len)) autofs4_find_wait() 237 return wq; autofs4_find_wait() 253 struct autofs_wait_queue *wq; validate_request() local 260 wq = autofs4_find_wait(sbi, qstr); validate_request() 261 if (wq) { validate_request() 262 *wait = wq; validate_request() 294 wq = autofs4_find_wait(sbi, qstr); validate_request() 295 if (wq) { validate_request() 296 *wait = wq; validate_request() 346 struct autofs_wait_queue *wq; autofs4_wait() local 404 ret = validate_request(&wq, sbi, &qstr, dentry, notify); autofs4_wait() 412 if (!wq) { autofs4_wait() 414 wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL); autofs4_wait() 415 if (!wq) { autofs4_wait() 421 wq->wait_queue_token = autofs4_next_wait_queue; autofs4_wait() 424 wq->next = sbi->queues; autofs4_wait() 425 sbi->queues = wq; autofs4_wait() 426 init_waitqueue_head(&wq->queue); autofs4_wait() 427 memcpy(&wq->name, &qstr, sizeof(struct qstr)); autofs4_wait() 428 wq->dev = autofs4_get_dev(sbi); autofs4_wait() 429 wq->ino = autofs4_get_ino(sbi); autofs4_wait() 430 wq->uid = current_uid(); autofs4_wait() 431 wq->gid = current_gid(); autofs4_wait() 432 wq->pid = pid; autofs4_wait() 433 wq->tgid = tgid; autofs4_wait() 434 wq->status = -EINTR; /* Status return if interrupted */ autofs4_wait() 435 wq->wait_ctr = 2; autofs4_wait() 454 (unsigned long) wq->wait_queue_token, wq->name.len, autofs4_wait() 455 wq->name.name, notify); autofs4_wait() 458 autofs4_notify_daemon(sbi, wq, type); autofs4_wait() 460 wq->wait_ctr++; autofs4_wait() 462 (unsigned long) wq->wait_queue_token, wq->name.len, autofs4_wait() 463 wq->name.name, notify); autofs4_wait() 469 * wq->name.name is NULL iff the lock is already released autofs4_wait() 472 if (wq->name.name) { autofs4_wait() 483 wait_event_interruptible(wq->queue, wq->name.name == NULL); autofs4_wait() 493 status = wq->status; autofs4_wait() 520 ino->uid = wq->uid; autofs4_wait() 521 ino->gid = wq->gid; autofs4_wait() 531 if (!--wq->wait_ctr) autofs4_wait() 532 kfree(wq); autofs4_wait() 541 struct autofs_wait_queue *wq, **wql; autofs4_wait_release() local 544 for (wql = &sbi->queues; (wq = *wql) != NULL; wql = &wq->next) { autofs4_wait_release() 545 if (wq->wait_queue_token == wait_queue_token) autofs4_wait_release() 549 if (!wq) { autofs4_wait_release() 554 *wql = wq->next; /* Unlink from chain */ autofs4_wait_release() 555 kfree(wq->name.name); autofs4_wait_release() 556 wq->name.name = NULL; /* Do not wait on this queue */ autofs4_wait_release() 557 wq->status = status; autofs4_wait_release() 558 wake_up_interruptible(&wq->queue); autofs4_wait_release() 559 if (!--wq->wait_ctr) autofs4_wait_release() 560 kfree(wq); autofs4_wait_release() 93 autofs4_notify_daemon(struct autofs_sb_info *sbi, struct autofs_wait_queue *wq, int type) autofs4_notify_daemon() argument
|
/linux-4.4.14/drivers/infiniband/hw/cxgb4/ |
H A D | t4.h | 344 static inline int t4_rqes_posted(struct t4_wq *wq) t4_rqes_posted() argument 346 return wq->rq.in_use; t4_rqes_posted() 349 static inline int t4_rq_empty(struct t4_wq *wq) t4_rq_empty() argument 351 return wq->rq.in_use == 0; t4_rq_empty() 354 static inline int t4_rq_full(struct t4_wq *wq) t4_rq_full() argument 356 return wq->rq.in_use == (wq->rq.size - 1); t4_rq_full() 359 static inline u32 t4_rq_avail(struct t4_wq *wq) t4_rq_avail() argument 361 return wq->rq.size - 1 - wq->rq.in_use; t4_rq_avail() 364 static inline void t4_rq_produce(struct t4_wq *wq, u8 len16) t4_rq_produce() argument 366 wq->rq.in_use++; t4_rq_produce() 367 if (++wq->rq.pidx == wq->rq.size) t4_rq_produce() 368 wq->rq.pidx = 0; t4_rq_produce() 369 wq->rq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); t4_rq_produce() 370 if (wq->rq.wq_pidx >= wq->rq.size * T4_RQ_NUM_SLOTS) t4_rq_produce() 371 wq->rq.wq_pidx %= wq->rq.size * T4_RQ_NUM_SLOTS; t4_rq_produce() 374 static inline void t4_rq_consume(struct t4_wq *wq) t4_rq_consume() argument 376 wq->rq.in_use--; t4_rq_consume() 377 wq->rq.msn++; t4_rq_consume() 378 if (++wq->rq.cidx == wq->rq.size) t4_rq_consume() 379 wq->rq.cidx = 0; t4_rq_consume() 382 static inline u16 t4_rq_host_wq_pidx(struct t4_wq *wq) t4_rq_host_wq_pidx() argument 384 return wq->rq.queue[wq->rq.size].status.host_wq_pidx; t4_rq_host_wq_pidx() 387 static inline u16 t4_rq_wq_size(struct t4_wq *wq) t4_rq_wq_size() argument 389 return wq->rq.size * T4_RQ_NUM_SLOTS; t4_rq_wq_size() 397 static inline int t4_sq_empty(struct t4_wq *wq) t4_sq_empty() argument 399 return wq->sq.in_use == 0; t4_sq_empty() 402 static inline int t4_sq_full(struct t4_wq *wq) t4_sq_full() argument 404 return wq->sq.in_use == (wq->sq.size - 1); t4_sq_full() 407 static inline u32 t4_sq_avail(struct t4_wq *wq) t4_sq_avail() argument 409 return wq->sq.size - 1 - wq->sq.in_use; t4_sq_avail() 412 static inline void t4_sq_produce(struct t4_wq *wq, u8 len16) t4_sq_produce() argument 414 wq->sq.in_use++; t4_sq_produce() 415 if (++wq->sq.pidx == wq->sq.size) t4_sq_produce() 416 wq->sq.pidx = 0; t4_sq_produce() 417 wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); t4_sq_produce() 418 if (wq->sq.wq_pidx >= wq->sq.size * T4_SQ_NUM_SLOTS) t4_sq_produce() 419 wq->sq.wq_pidx %= wq->sq.size * T4_SQ_NUM_SLOTS; t4_sq_produce() 422 static inline void t4_sq_consume(struct t4_wq *wq) t4_sq_consume() argument 424 BUG_ON(wq->sq.in_use < 1); t4_sq_consume() 425 if (wq->sq.cidx == wq->sq.flush_cidx) t4_sq_consume() 426 wq->sq.flush_cidx = -1; t4_sq_consume() 427 wq->sq.in_use--; t4_sq_consume() 428 if (++wq->sq.cidx == wq->sq.size) t4_sq_consume() 429 wq->sq.cidx = 0; t4_sq_consume() 432 static inline u16 t4_sq_host_wq_pidx(struct t4_wq *wq) t4_sq_host_wq_pidx() argument 434 return wq->sq.queue[wq->sq.size].status.host_wq_pidx; t4_sq_host_wq_pidx() 437 static inline u16 t4_sq_wq_size(struct t4_wq *wq) t4_sq_wq_size() argument 439 return wq->sq.size * T4_SQ_NUM_SLOTS; t4_sq_wq_size() 458 static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe) t4_ring_sq_db() argument 463 if (wq->sq.bar2_va) { t4_ring_sq_db() 464 if (inc == 1 && wq->sq.bar2_qid == 0 && wqe) { t4_ring_sq_db() 465 PDBG("%s: WC wq->sq.pidx = %d\n", t4_ring_sq_db() 466 __func__, wq->sq.pidx); t4_ring_sq_db() 468 (wq->sq.bar2_va + SGE_UDB_WCDOORBELL), t4_ring_sq_db() 471 PDBG("%s: DB wq->sq.pidx = %d\n", t4_ring_sq_db() 472 __func__, wq->sq.pidx); t4_ring_sq_db() 473 writel(PIDX_T5_V(inc) | QID_V(wq->sq.bar2_qid), t4_ring_sq_db() 474 wq->sq.bar2_va + SGE_UDB_KDOORBELL); t4_ring_sq_db() 481 writel(QID_V(wq->sq.qid) | PIDX_V(inc), wq->db); t4_ring_sq_db() 484 static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, t4_ring_rq_db() argument 490 if (wq->rq.bar2_va) { t4_ring_rq_db() 491 if (inc == 1 && wq->rq.bar2_qid == 0 && wqe) { t4_ring_rq_db() 492 PDBG("%s: WC wq->rq.pidx = %d\n", t4_ring_rq_db() 493 __func__, wq->rq.pidx); t4_ring_rq_db() 495 (wq->rq.bar2_va + SGE_UDB_WCDOORBELL), t4_ring_rq_db() 498 PDBG("%s: DB wq->rq.pidx = %d\n", t4_ring_rq_db() 499 __func__, wq->rq.pidx); t4_ring_rq_db() 500 writel(PIDX_T5_V(inc) | QID_V(wq->rq.bar2_qid), t4_ring_rq_db() 501 wq->rq.bar2_va + SGE_UDB_KDOORBELL); t4_ring_rq_db() 508 writel(QID_V(wq->rq.qid) | PIDX_V(inc), wq->db); t4_ring_rq_db() 511 static inline int t4_wq_in_error(struct t4_wq *wq) t4_wq_in_error() argument 513 return wq->rq.queue[wq->rq.size].status.qp_err; t4_wq_in_error() 516 static inline void t4_set_wq_in_error(struct t4_wq *wq) t4_set_wq_in_error() argument 518 wq->rq.queue[wq->rq.size].status.qp_err = 1; t4_set_wq_in_error() 521 static inline void t4_disable_wq_db(struct t4_wq *wq) t4_disable_wq_db() argument 523 wq->rq.queue[wq->rq.size].status.db_off = 1; t4_disable_wq_db() 526 static inline void t4_enable_wq_db(struct t4_wq *wq) t4_enable_wq_db() argument 528 wq->rq.queue[wq->rq.size].status.db_off = 0; t4_enable_wq_db() 531 static inline int t4_wq_db_enabled(struct t4_wq *wq) t4_wq_db_enabled() argument 533 return !wq->rq.queue[wq->rq.size].status.db_off; t4_wq_db_enabled()
|
H A D | cq.c | 183 static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq) insert_recv_cqe() argument 187 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__, insert_recv_cqe() 188 wq, cq, cq->sw_cidx, cq->sw_pidx); insert_recv_cqe() 194 CQE_QPID_V(wq->sq.qid)); insert_recv_cqe() 200 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count) c4iw_flush_rq() argument 203 int in_use = wq->rq.in_use - count; c4iw_flush_rq() 206 PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__, c4iw_flush_rq() 207 wq, cq, wq->rq.in_use, count); c4iw_flush_rq() 209 insert_recv_cqe(wq, cq); c4iw_flush_rq() 215 static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq, insert_sq_cqe() argument 220 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__, insert_sq_cqe() 221 wq, cq, cq->sw_cidx, cq->sw_pidx); insert_sq_cqe() 227 CQE_QPID_V(wq->sq.qid)); insert_sq_cqe() 234 static void advance_oldest_read(struct t4_wq *wq); 239 struct t4_wq *wq = &qhp->wq; c4iw_flush_sq() local 245 if (wq->sq.flush_cidx == -1) c4iw_flush_sq() 246 wq->sq.flush_cidx = wq->sq.cidx; c4iw_flush_sq() 247 idx = wq->sq.flush_cidx; c4iw_flush_sq() 248 BUG_ON(idx >= wq->sq.size); c4iw_flush_sq() 249 while (idx != wq->sq.pidx) { c4iw_flush_sq() 250 swsqe = &wq->sq.sw_sq[idx]; c4iw_flush_sq() 253 insert_sq_cqe(wq, cq, swsqe); c4iw_flush_sq() 254 if (wq->sq.oldest_read == swsqe) { c4iw_flush_sq() 256 advance_oldest_read(wq); c4iw_flush_sq() 259 if (++idx == wq->sq.size) c4iw_flush_sq() 262 wq->sq.flush_cidx += flushed; c4iw_flush_sq() 263 if (wq->sq.flush_cidx >= wq->sq.size) c4iw_flush_sq() 264 wq->sq.flush_cidx -= wq->sq.size; c4iw_flush_sq() 268 static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq) flush_completed_wrs() argument 273 if (wq->sq.flush_cidx == -1) flush_completed_wrs() 274 wq->sq.flush_cidx = wq->sq.cidx; flush_completed_wrs() 275 cidx = wq->sq.flush_cidx; flush_completed_wrs() 276 BUG_ON(cidx > wq->sq.size); flush_completed_wrs() 278 while (cidx != wq->sq.pidx) { flush_completed_wrs() 279 swsqe = &wq->sq.sw_sq[cidx]; flush_completed_wrs() 281 if (++cidx == wq->sq.size) flush_completed_wrs() 296 if (++cidx == wq->sq.size) flush_completed_wrs() 298 wq->sq.flush_cidx = cidx; flush_completed_wrs() 304 static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe, create_read_req_cqe() argument 307 read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx; create_read_req_cqe() 308 read_cqe->len = htonl(wq->sq.oldest_read->read_len); create_read_req_cqe() 316 static void advance_oldest_read(struct t4_wq *wq) advance_oldest_read() argument 319 u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1; advance_oldest_read() 321 if (rptr == wq->sq.size) advance_oldest_read() 323 while (rptr != wq->sq.pidx) { advance_oldest_read() 324 wq->sq.oldest_read = &wq->sq.sw_sq[rptr]; advance_oldest_read() 326 if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ) advance_oldest_read() 328 if (++rptr == wq->sq.size) advance_oldest_read() 331 wq->sq.oldest_read = NULL; advance_oldest_read() 383 if (!qhp->wq.sq.oldest_read->signaled) { c4iw_flush_hw_cq() 384 advance_oldest_read(&qhp->wq); c4iw_flush_hw_cq() 392 create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe); c4iw_flush_hw_cq() 394 advance_oldest_read(&qhp->wq); c4iw_flush_hw_cq() 401 swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)]; c4iw_flush_hw_cq() 404 flush_completed_wrs(&qhp->wq, &chp->cq); c4iw_flush_hw_cq() 417 static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) cqe_completes_wr() argument 428 if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq)) cqe_completes_wr() 433 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count) c4iw_count_rcqes() argument 444 (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq)) c4iw_count_rcqes() 457 * supply the wq assicated with the qpid. 468 static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, poll_cq() argument 490 if (wq == NULL) { poll_cq() 496 * skip hw cqe's if the wq is flushed. poll_cq() 498 if (wq->flushed && !SW_CQE(hw_cqe)) { poll_cq() 526 t4_set_wq_in_error(wq); poll_cq() 537 t4_set_wq_in_error(wq); poll_cq() 545 if (!wq->sq.oldest_read->signaled) { poll_cq() 546 advance_oldest_read(wq); poll_cq() 555 create_read_req_cqe(wq, hw_cqe, &read_cqe); poll_cq() 557 advance_oldest_read(wq); poll_cq() 560 if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) { poll_cq() 562 t4_set_wq_in_error(wq); poll_cq() 573 * then we complete this with T4_ERR_MSN and mark the wq in poll_cq() 577 if (t4_rq_empty(wq)) { poll_cq() 578 t4_set_wq_in_error(wq); poll_cq() 582 if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) { poll_cq() 583 t4_set_wq_in_error(wq); poll_cq() 601 if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) { poll_cq() 606 swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)]; poll_cq() 622 BUG_ON(idx >= wq->sq.size); poll_cq() 632 if (idx < wq->sq.cidx) poll_cq() 633 wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx; poll_cq() 635 wq->sq.in_use -= idx - wq->sq.cidx; poll_cq() 636 BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size); poll_cq() 638 wq->sq.cidx = (uint16_t)idx; poll_cq() 639 PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx); poll_cq() 640 *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id; poll_cq() 642 c4iw_log_wr_stats(wq, hw_cqe); poll_cq() 643 t4_sq_consume(wq); poll_cq() 645 PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx); poll_cq() 646 *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id; poll_cq() 647 BUG_ON(t4_rq_empty(wq)); poll_cq() 649 c4iw_log_wr_stats(wq, hw_cqe); poll_cq() 650 t4_rq_consume(wq); poll_cq() 658 flush_completed_wrs(wq, cq); poll_cq() 686 struct t4_wq *wq; c4iw_poll_cq_one() local 699 wq = NULL; c4iw_poll_cq_one() 702 wq = &(qhp->wq); c4iw_poll_cq_one() 704 ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit); c4iw_poll_cq_one() 821 if (wq) c4iw_poll_cq_one()
|
H A D | qp.c | 149 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, destroy_qp() argument 157 wq->rq.memsize, wq->rq.queue, destroy_qp() 158 dma_unmap_addr(&wq->rq, mapping)); destroy_qp() 159 dealloc_sq(rdev, &wq->sq); destroy_qp() 160 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); destroy_qp() 161 kfree(wq->rq.sw_rq); destroy_qp() 162 kfree(wq->sq.sw_sq); destroy_qp() 163 c4iw_put_qpid(rdev, wq->rq.qid, uctx); destroy_qp() 164 c4iw_put_qpid(rdev, wq->sq.qid, uctx); destroy_qp() 195 static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, create_qp() argument 208 wq->sq.qid = c4iw_get_qpid(rdev, uctx); create_qp() 209 if (!wq->sq.qid) create_qp() 212 wq->rq.qid = c4iw_get_qpid(rdev, uctx); create_qp() 213 if (!wq->rq.qid) { create_qp() 219 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq, create_qp() 221 if (!wq->sq.sw_sq) { create_qp() 226 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq, create_qp() 228 if (!wq->rq.sw_rq) { create_qp() 237 wq->rq.rqt_size = roundup_pow_of_two(max_t(u16, wq->rq.size, 16)); create_qp() 238 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size); create_qp() 239 if (!wq->rq.rqt_hwaddr) { create_qp() 244 ret = alloc_sq(rdev, &wq->sq, user); create_qp() 247 memset(wq->sq.queue, 0, wq->sq.memsize); create_qp() 248 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); create_qp() 250 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), create_qp() 251 wq->rq.memsize, &(wq->rq.dma_addr), create_qp() 253 if (!wq->rq.queue) { create_qp() 258 __func__, wq->sq.queue, create_qp() 259 (unsigned long long)virt_to_phys(wq->sq.queue), create_qp() 260 wq->rq.queue, create_qp() 261 (unsigned long long)virt_to_phys(wq->rq.queue)); create_qp() 262 memset(wq->rq.queue, 0, wq->rq.memsize); create_qp() 263 dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr); create_qp() 265 wq->db = rdev->lldi.db_reg; create_qp() 267 wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid, T4_BAR2_QTYPE_EGRESS, create_qp() 268 &wq->sq.bar2_qid, create_qp() 269 user ? &wq->sq.bar2_pa : NULL); create_qp() 270 wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid, T4_BAR2_QTYPE_EGRESS, create_qp() 271 &wq->rq.bar2_qid, create_qp() 272 user ? &wq->rq.bar2_pa : NULL); create_qp() 277 if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) { create_qp() 279 pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid); create_qp() 283 wq->rdev = rdev; create_qp() 284 wq->rq.msn = 1; create_qp() 311 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + create_qp() 318 (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_ONCHIP_F : 0) | create_qp() 328 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid); create_qp() 329 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr); create_qp() 337 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + create_qp() 352 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid); create_qp() 353 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr); create_qp() 360 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__); create_qp() 365 __func__, wq->sq.qid, wq->rq.qid, wq->db, create_qp() 366 wq->sq.bar2_va, wq->rq.bar2_va); create_qp() 371 wq->rq.memsize, wq->rq.queue, create_qp() 372 dma_unmap_addr(&wq->rq, mapping)); create_qp() 374 dealloc_sq(rdev, &wq->sq); create_qp() 376 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); create_qp() 378 kfree(wq->rq.sw_rq); create_qp() 380 kfree(wq->sq.sw_sq); create_qp() 382 c4iw_put_qpid(rdev, wq->rq.qid, uctx); create_qp() 384 c4iw_put_qpid(rdev, wq->sq.qid, uctx); create_qp() 602 ret = build_isgl((__be64 *)qhp->wq.rq.queue, build_rdma_recv() 603 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size], build_rdma_recv() 712 t4_ring_sq_db(&qhp->wq, inc, NULL); ring_kernel_sq_db() 715 qhp->wq.sq.wq_pidx_inc += inc; ring_kernel_sq_db() 729 t4_ring_rq_db(&qhp->wq, inc, NULL); ring_kernel_rq_db() 732 qhp->wq.rq.wq_pidx_inc += inc; ring_kernel_rq_db() 755 if (t4_wq_in_error(&qhp->wq)) { c4iw_post_send() 759 num_wrs = t4_sq_avail(&qhp->wq); c4iw_post_send() 770 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue + c4iw_post_send() 771 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE); c4iw_post_send() 778 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; c4iw_post_send() 789 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16); c4iw_post_send() 794 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16); c4iw_post_send() 808 if (!qhp->wq.sq.oldest_read) c4iw_post_send() 809 qhp->wq.sq.oldest_read = swsqe; c4iw_post_send() 814 err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr), &len16, c4iw_post_send() 835 swsqe->idx = qhp->wq.sq.pidx; c4iw_post_send() 847 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16); c4iw_post_send() 850 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx, c4iw_post_send() 854 t4_sq_produce(&qhp->wq, len16); c4iw_post_send() 858 t4_ring_sq_db(&qhp->wq, idx, wqe); c4iw_post_send() 880 if (t4_wq_in_error(&qhp->wq)) { c4iw_post_receive() 884 num_wrs = t4_rq_avail(&qhp->wq); c4iw_post_receive() 895 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue + c4iw_post_receive() 896 qhp->wq.rq.wq_pidx * c4iw_post_receive() 907 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id; c4iw_post_receive() 909 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts = c4iw_post_receive() 913 &qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_ts); c4iw_post_receive() 918 wqe->recv.wrid = qhp->wq.rq.pidx; c4iw_post_receive() 924 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx); c4iw_post_receive() 925 t4_rq_produce(&qhp->wq, len16); c4iw_post_receive() 931 t4_ring_rq_db(&qhp->wq, idx, wqe); c4iw_post_receive() 1088 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, post_terminate() 1130 if (qhp->wq.flushed) { __flush_qp() 1135 qhp->wq.flushed = 1; __flush_qp() 1138 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); __flush_qp() 1139 rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); __flush_qp() 1184 t4_set_wq_in_error(&qhp->wq); flush_qp() 1209 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, rdma_fini() 1233 qhp->wq.sq.qid, __func__); rdma_fini() 1271 qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord); rdma_init() 1315 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq)); rdma_init() 1317 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid); rdma_init() 1318 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid); rdma_init() 1319 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid); rdma_init() 1326 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size); rdma_init() 1327 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr - rdma_init() 1337 qhp->ep->hwtid, qhp->wq.sq.qid, __func__); rdma_init() 1361 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state, c4iw_modify_qp() 1450 t4_set_wq_in_error(&qhp->wq); c4iw_modify_qp() 1463 t4_set_wq_in_error(&qhp->wq); c4iw_modify_qp() 1480 t4_set_wq_in_error(&qhp->wq); c4iw_modify_qp() 1521 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) { c4iw_modify_qp() 1544 qhp->wq.sq.qid); c4iw_modify_qp() 1601 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); c4iw_destroy_qp() 1613 destroy_qp(&rhp->rdev, &qhp->wq, c4iw_destroy_qp() 1616 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid); c4iw_destroy_qp() 1667 qhp->wq.sq.size = sqsize; c4iw_create_qp() 1668 qhp->wq.sq.memsize = c4iw_create_qp() 1670 sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64); c4iw_create_qp() 1671 qhp->wq.sq.flush_cidx = -1; c4iw_create_qp() 1672 qhp->wq.rq.size = rqsize; c4iw_create_qp() 1673 qhp->wq.rq.memsize = c4iw_create_qp() 1675 sizeof(*qhp->wq.rq.queue); c4iw_create_qp() 1678 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE); c4iw_create_qp() 1679 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE); c4iw_create_qp() 1682 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq, c4iw_create_qp() 1713 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); c4iw_create_qp() 1738 if (t4_sq_onchip(&qhp->wq.sq)) { c4iw_create_qp() 1748 uresp.sqid = qhp->wq.sq.qid; c4iw_create_qp() 1749 uresp.sq_size = qhp->wq.sq.size; c4iw_create_qp() 1750 uresp.sq_memsize = qhp->wq.sq.memsize; c4iw_create_qp() 1751 uresp.rqid = qhp->wq.rq.qid; c4iw_create_qp() 1752 uresp.rq_size = qhp->wq.rq.size; c4iw_create_qp() 1753 uresp.rq_memsize = qhp->wq.rq.memsize; c4iw_create_qp() 1774 mm1->addr = qhp->wq.sq.phys_addr; c4iw_create_qp() 1775 mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize); c4iw_create_qp() 1778 mm2->addr = virt_to_phys(qhp->wq.rq.queue); c4iw_create_qp() 1779 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize); c4iw_create_qp() 1782 mm3->addr = (__force unsigned long)qhp->wq.sq.bar2_pa; c4iw_create_qp() 1786 mm4->addr = (__force unsigned long)qhp->wq.rq.bar2_pa; c4iw_create_qp() 1797 qhp->ibqp.qp_num = qhp->wq.sq.qid; c4iw_create_qp() 1802 qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize, c4iw_create_qp() 1803 attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size, c4iw_create_qp() 1804 qhp->wq.rq.memsize, attrs->cap.max_recv_wr); c4iw_create_qp() 1817 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); c4iw_create_qp() 1819 destroy_qp(&rhp->rdev, &qhp->wq, c4iw_create_qp()
|
H A D | device.c | 117 void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe) c4iw_log_wr_stats() argument 122 if (!wq->rdev->wr_log) c4iw_log_wr_stats() 125 idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) & c4iw_log_wr_stats() 126 (wq->rdev->wr_log_size - 1); c4iw_log_wr_stats() 127 le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]); c4iw_log_wr_stats() 132 le.qid = wq->sq.qid; c4iw_log_wr_stats() 134 le.post_host_ts = wq->sq.sw_sq[wq->sq.cidx].host_ts; c4iw_log_wr_stats() 135 le.post_sge_ts = wq->sq.sw_sq[wq->sq.cidx].sge_ts; c4iw_log_wr_stats() 138 le.qid = wq->rq.qid; c4iw_log_wr_stats() 140 le.post_host_ts = wq->rq.sw_rq[wq->rq.cidx].host_ts; c4iw_log_wr_stats() 141 le.post_sge_ts = wq->rq.sw_rq[wq->rq.cidx].sge_ts; c4iw_log_wr_stats() 144 wq->rdev->wr_log[idx] = le; c4iw_log_wr_stats() 235 if (id != qp->wq.sq.qid) dump_qp() 257 qp->wq.sq.qid, qp->wq.rq.qid, dump_qp() 259 qp->wq.sq.flags & T4_SQ_ONCHIP, dump_qp() 281 qp->wq.sq.qid, qp->wq.rq.qid, dump_qp() 283 qp->wq.sq.flags & T4_SQ_ONCHIP, dump_qp() 295 qp->wq.sq.qid, qp->wq.rq.qid, dump_qp() 297 qp->wq.sq.flags & T4_SQ_ONCHIP); dump_qp() 1241 t4_disable_wq_db(&qp->wq); disable_qp_db() 1263 t4_enable_wq_db(&qp->wq); enable_qp_db() 1270 t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc, NULL); resume_rc_qp() 1271 qp->wq.sq.wq_pidx_inc = 0; resume_rc_qp() 1272 t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc, NULL); resume_rc_qp() 1273 qp->wq.rq.wq_pidx_inc = 0; resume_rc_qp() 1375 qp->wq.sq.qid, recover_lost_dbs() 1376 t4_sq_host_wq_pidx(&qp->wq), recover_lost_dbs() 1377 t4_sq_wq_size(&qp->wq)); recover_lost_dbs() 1382 pci_name(ctx->lldi.pdev), qp->wq.sq.qid); recover_lost_dbs() 1387 qp->wq.sq.wq_pidx_inc = 0; recover_lost_dbs() 1390 qp->wq.rq.qid, recover_lost_dbs() 1391 t4_rq_host_wq_pidx(&qp->wq), recover_lost_dbs() 1392 t4_rq_wq_size(&qp->wq)); recover_lost_dbs() 1398 pci_name(ctx->lldi.pdev), qp->wq.rq.qid); recover_lost_dbs() 1403 qp->wq.rq.wq_pidx_inc = 0; recover_lost_dbs()
|
/linux-4.4.14/drivers/infiniband/hw/cxgb3/ |
H A D | cxio_hal.c | 275 struct t3_wq *wq, struct cxio_ucontext *uctx) cxio_create_qp() 277 int depth = 1UL << wq->size_log2; cxio_create_qp() 278 int rqsize = 1UL << wq->rq_size_log2; cxio_create_qp() 280 wq->qpid = get_qpid(rdev_p, uctx); cxio_create_qp() 281 if (!wq->qpid) cxio_create_qp() 284 wq->rq = kzalloc(depth * sizeof(struct t3_swrq), GFP_KERNEL); cxio_create_qp() 285 if (!wq->rq) cxio_create_qp() 288 wq->rq_addr = cxio_hal_rqtpool_alloc(rdev_p, rqsize); cxio_create_qp() 289 if (!wq->rq_addr) cxio_create_qp() 292 wq->sq = kzalloc(depth * sizeof(struct t3_swsq), GFP_KERNEL); cxio_create_qp() 293 if (!wq->sq) cxio_create_qp() 296 wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), cxio_create_qp() 298 &(wq->dma_addr), GFP_KERNEL); cxio_create_qp() 299 if (!wq->queue) cxio_create_qp() 302 memset(wq->queue, 0, depth * sizeof(union t3_wr)); cxio_create_qp() 303 dma_unmap_addr_set(wq, mapping, wq->dma_addr); cxio_create_qp() 304 wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr; cxio_create_qp() 306 wq->udb = (u64)rdev_p->rnic_info.udbell_physbase + cxio_create_qp() 307 (wq->qpid << rdev_p->qpshift); cxio_create_qp() 308 wq->rdev = rdev_p; cxio_create_qp() 310 wq->qpid, wq->doorbell, (unsigned long long) wq->udb); cxio_create_qp() 313 kfree(wq->sq); cxio_create_qp() 315 cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, rqsize); cxio_create_qp() 317 kfree(wq->rq); cxio_create_qp() 319 put_qpid(rdev_p, wq->qpid, uctx); cxio_create_qp() 336 int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq, cxio_destroy_qp() argument 340 (1UL << (wq->size_log2)) cxio_destroy_qp() 341 * sizeof(union t3_wr), wq->queue, cxio_destroy_qp() 342 dma_unmap_addr(wq, mapping)); cxio_destroy_qp() 343 kfree(wq->sq); cxio_destroy_qp() 344 cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2)); cxio_destroy_qp() 345 kfree(wq->rq); cxio_destroy_qp() 346 put_qpid(rdev_p, wq->qpid, uctx); cxio_destroy_qp() 350 static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq) insert_recv_cqe() argument 354 PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__, insert_recv_cqe() 355 wq, cq, cq->sw_rptr, cq->sw_wptr); insert_recv_cqe() 361 V_CQE_QPID(wq->qpid) | insert_recv_cqe() 368 int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count) cxio_flush_rq() argument 373 PDBG("%s wq %p cq %p\n", __func__, wq, cq); cxio_flush_rq() 377 wq->rq_rptr, wq->rq_wptr, count); cxio_flush_rq() 378 ptr = wq->rq_rptr + count; cxio_flush_rq() 379 while (ptr++ != wq->rq_wptr) { cxio_flush_rq() 380 insert_recv_cqe(wq, cq); cxio_flush_rq() 386 static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq, insert_sq_cqe() argument 391 PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__, insert_sq_cqe() 392 wq, cq, cq->sw_rptr, cq->sw_wptr); insert_sq_cqe() 398 V_CQE_QPID(wq->qpid) | insert_sq_cqe() 407 int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count) cxio_flush_sq() argument 411 struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2); cxio_flush_sq() 413 ptr = wq->sq_rptr + count; cxio_flush_sq() 414 sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); cxio_flush_sq() 415 while (ptr != wq->sq_wptr) { cxio_flush_sq() 417 insert_sq_cqe(wq, cq, sqp); cxio_flush_sq() 419 sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); cxio_flush_sq() 446 static int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq) cqe_completes_wr() argument 458 Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) cqe_completes_wr() 464 void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count) cxio_count_scqes() argument 474 ((CQE_OPCODE(*cqe) == T3_READ_RESP) && wq->oldest_read)) && cxio_count_scqes() 475 (CQE_QPID(*cqe) == wq->qpid)) cxio_count_scqes() 482 void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count) cxio_count_rcqes() argument 493 (CQE_QPID(*cqe) == wq->qpid) && cqe_completes_wr(cqe, wq)) cxio_count_rcqes() 1073 static void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq) flush_completed_wrs() argument 1076 __u32 ptr = wq->sq_rptr; flush_completed_wrs() 1077 int count = Q_COUNT(wq->sq_rptr, wq->sq_wptr); flush_completed_wrs() 1079 sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); flush_completed_wrs() 1083 sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); flush_completed_wrs() 1090 __func__, Q_PTR2IDX(ptr, wq->sq_size_log2), flush_completed_wrs() 1102 static void create_read_req_cqe(struct t3_wq *wq, struct t3_cqe *hw_cqe, create_read_req_cqe() argument 1105 read_cqe->u.scqe.wrid_hi = wq->oldest_read->sq_wptr; create_read_req_cqe() 1106 read_cqe->len = wq->oldest_read->read_len; create_read_req_cqe() 1116 static void advance_oldest_read(struct t3_wq *wq) advance_oldest_read() argument 1119 u32 rptr = wq->oldest_read - wq->sq + 1; advance_oldest_read() 1120 u32 wptr = Q_PTR2IDX(wq->sq_wptr, wq->sq_size_log2); advance_oldest_read() 1122 while (Q_PTR2IDX(rptr, wq->sq_size_log2) != wptr) { advance_oldest_read() 1123 wq->oldest_read = wq->sq + Q_PTR2IDX(rptr, wq->sq_size_log2); advance_oldest_read() 1125 if (wq->oldest_read->opcode == T3_READ_REQ) advance_oldest_read() 1129 wq->oldest_read = NULL; advance_oldest_read() 1137 * supply the wq assicated with the qpid. 1147 int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe, cxio_poll_cq() argument 1167 if (wq == NULL) { cxio_poll_cq() 1186 if (!wq->oldest_read) { cxio_poll_cq() 1188 wq->error = 1; cxio_poll_cq() 1197 create_read_req_cqe(wq, hw_cqe, &read_cqe); cxio_poll_cq() 1199 advance_oldest_read(wq); cxio_poll_cq() 1207 wq->error = 1; cxio_poll_cq() 1211 if (CQE_STATUS(*hw_cqe) || wq->error) { cxio_poll_cq() 1212 *cqe_flushed = wq->error; cxio_poll_cq() 1213 wq->error = 1; cxio_poll_cq() 1233 Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) { cxio_poll_cq() 1249 * then we complete this with TPT_ERR_MSN and mark the wq in cxio_poll_cq() 1253 if (Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) { cxio_poll_cq() 1254 wq->error = 1; cxio_poll_cq() 1259 if (unlikely((CQE_WRID_MSN(*hw_cqe) != (wq->rq_rptr + 1)))) { cxio_poll_cq() 1260 wq->error = 1; cxio_poll_cq() 1278 if (!SW_CQE(*hw_cqe) && (CQE_WRID_SQ_WPTR(*hw_cqe) != wq->sq_rptr)) { cxio_poll_cq() 1283 Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2)); cxio_poll_cq() 1284 sqp = wq->sq + cxio_poll_cq() 1285 Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2); cxio_poll_cq() 1300 wq->sq_rptr = CQE_WRID_SQ_WPTR(*hw_cqe); cxio_poll_cq() 1302 Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2)); cxio_poll_cq() 1303 *cookie = wq->sq[Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2)].wr_id; cxio_poll_cq() 1304 wq->sq_rptr++; cxio_poll_cq() 1307 Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)); cxio_poll_cq() 1308 *cookie = wq->rq[Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)].wr_id; cxio_poll_cq() 1309 if (wq->rq[Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)].pbl_addr) cxio_poll_cq() 1310 cxio_hal_pblpool_free(wq->rdev, cxio_poll_cq() 1311 wq->rq[Q_PTR2IDX(wq->rq_rptr, cxio_poll_cq() 1312 wq->rq_size_log2)].pbl_addr, T3_STAG0_PBL_SIZE); cxio_poll_cq() 1313 BUG_ON(Q_EMPTY(wq->rq_rptr, wq->rq_wptr)); cxio_poll_cq() 1314 wq->rq_rptr++; cxio_poll_cq() 1321 flush_completed_wrs(wq, cq); cxio_poll_cq() 274 cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain, struct t3_wq *wq, struct cxio_ucontext *uctx) cxio_create_qp() argument
|
H A D | iwch_qp.c | 150 u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq) build_memreg() 175 wqe = (union t3_wr *)(wq->queue + build_memreg() 176 Q_PTR2IDX((wq->wptr+1), wq->size_log2)); build_memreg() 178 Q_GENBIT(wq->wptr + 1, wq->size_log2), build_memreg() 281 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr, build_rdma_recv() 282 qhp->wq.rq_size_log2)].wr_id = wr->wr_id; build_rdma_recv() 283 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr, build_rdma_recv() 284 qhp->wq.rq_size_log2)].pbl_addr = 0; build_rdma_recv() 344 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr, build_zero_stag_recv() 345 qhp->wq.rq_size_log2)].wr_id = wr->wr_id; build_zero_stag_recv() 346 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr, build_zero_stag_recv() 347 qhp->wq.rq_size_log2)].pbl_addr = pbl_addr; build_zero_stag_recv() 373 num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr, iwch_post_send() 374 qhp->wq.sq_size_log2); iwch_post_send() 385 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2); iwch_post_send() 386 wqe = (union t3_wr *) (qhp->wq.queue + idx); iwch_post_send() 392 sqp = qhp->wq.sq + iwch_post_send() 393 Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2); iwch_post_send() 415 if (!qhp->wq.oldest_read) iwch_post_send() 416 qhp->wq.oldest_read = sqp; iwch_post_send() 421 &wr_cnt, &qhp->wq); iwch_post_send() 436 wqe->send.wrid.id0.hi = qhp->wq.sq_wptr; iwch_post_send() 439 sqp->sq_wptr = qhp->wq.sq_wptr; iwch_post_send() 444 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), iwch_post_send() 447 PDBG("%s cookie 0x%llx wq idx 0x%x swsq idx %ld opcode %d\n", iwch_post_send() 449 Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2), iwch_post_send() 453 qhp->wq.wptr += wr_cnt; iwch_post_send() 454 ++(qhp->wq.sq_wptr); iwch_post_send() 457 if (cxio_wq_db_enabled(&qhp->wq)) iwch_post_send() 458 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); iwch_post_send() 483 num_wrs = Q_FREECNT(qhp->wq.rq_rptr, qhp->wq.rq_wptr, iwch_post_receive() 484 qhp->wq.rq_size_log2) - 1; iwch_post_receive() 495 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2); iwch_post_receive() 496 wqe = (union t3_wr *) (qhp->wq.queue + idx); iwch_post_receive() 509 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), iwch_post_receive() 513 idx, qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe); iwch_post_receive() 514 ++(qhp->wq.rq_wptr); iwch_post_receive() 515 ++(qhp->wq.wptr); iwch_post_receive() 520 if (cxio_wq_db_enabled(&qhp->wq)) iwch_post_receive() 521 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); iwch_post_receive() 556 num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr, iwch_bind_mw() 557 qhp->wq.sq_size_log2); iwch_bind_mw() 562 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2); iwch_bind_mw() 565 wqe = (union t3_wr *) (qhp->wq.queue + idx); iwch_bind_mw() 589 wqe->send.wrid.id0.hi = qhp->wq.sq_wptr; iwch_bind_mw() 590 sqp = qhp->wq.sq + Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2); iwch_bind_mw() 593 sqp->sq_wptr = qhp->wq.sq_wptr; iwch_bind_mw() 599 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), 0, iwch_bind_mw() 601 ++(qhp->wq.wptr); iwch_bind_mw() 602 ++(qhp->wq.sq_wptr); iwch_bind_mw() 605 if (cxio_wq_db_enabled(&qhp->wq)) iwch_bind_mw() 606 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); iwch_bind_mw() 823 cxio_count_rcqes(&rchp->cq, &qhp->wq, &count); __flush_qp() 824 flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count); __flush_qp() 837 cxio_count_scqes(&schp->cq, &qhp->wq, &count); __flush_qp() 838 flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count); __flush_qp() 862 cxio_set_wq_in_error(&qhp->wq); flush_qp() 885 union t3_wr *wqe = qhp->wq.queue; iwch_rqes_posted() 904 init_attr.qpid = qhp->wq.qpid; rdma_init() 908 init_attr.rq_addr = qhp->wq.rq_addr; rdma_init() 909 init_attr.rq_size = 1 << qhp->wq.rq_size_log2; rdma_init() 925 init_attr.qp_dma_addr = qhp->wq.dma_addr; rdma_init() 926 init_attr.qp_dma_size = (1UL << qhp->wq.size_log2); rdma_init() 963 qhp, qhp->wq.qpid, qhp->ep, qhp->attr.state, iwch_modify_qp() 1058 cxio_set_wq_in_error(&qhp->wq); iwch_modify_qp() 1104 if (!Q_EMPTY(qhp->wq.sq_rptr, qhp->wq.sq_wptr) || iwch_modify_qp() 1105 !Q_EMPTY(qhp->wq.rq_rptr, qhp->wq.rq_wptr)) { iwch_modify_qp() 1128 qhp->wq.qpid); iwch_modify_qp() 149 build_memreg(union t3_wr *wqe, struct ib_reg_wr *wr, u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq) build_memreg() argument
|
H A D | cxio_hal.h | 165 int cxio_create_qp(struct cxio_rdev *rdev, u32 kernel_domain, struct t3_wq *wq, 167 int cxio_destroy_qp(struct cxio_rdev *rdev, struct t3_wq *wq, 190 int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count); 191 int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count); 192 void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count); 193 void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count); 195 int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
|
H A D | iwch_cq.c | 49 struct t3_wq *wq; iwch_poll_cq_one() local 62 wq = NULL; iwch_poll_cq_one() 65 wq = &(qhp->wq); iwch_poll_cq_one() 67 ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, iwch_poll_cq_one() 190 if (wq) iwch_poll_cq_one()
|
H A D | iwch_provider.c | 901 remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid); iwch_destroy_qp() 908 cxio_destroy_qp(&rhp->rdev, &qhp->wq, iwch_destroy_qp() 912 ib_qp, qhp->wq.qpid, qhp); iwch_destroy_qp() 964 * Kernel users need more wq space for fastreg WRs which can take iwch_create_qp() 976 qhp->wq.size_log2 = ilog2(wqsize); iwch_create_qp() 977 qhp->wq.rq_size_log2 = ilog2(rqsize); iwch_create_qp() 978 qhp->wq.sq_size_log2 = ilog2(sqsize); iwch_create_qp() 979 if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq, iwch_create_qp() 1016 if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) { iwch_create_qp() 1017 cxio_destroy_qp(&rhp->rdev, &qhp->wq, iwch_create_qp() 1040 uresp.qpid = qhp->wq.qpid; iwch_create_qp() 1041 uresp.size_log2 = qhp->wq.size_log2; iwch_create_qp() 1042 uresp.sq_size_log2 = qhp->wq.sq_size_log2; iwch_create_qp() 1043 uresp.rq_size_log2 = qhp->wq.rq_size_log2; iwch_create_qp() 1057 mm1->addr = virt_to_phys(qhp->wq.queue); iwch_create_qp() 1061 mm2->addr = qhp->wq.udb & PAGE_MASK; iwch_create_qp() 1065 qhp->ibqp.qp_num = qhp->wq.qpid; iwch_create_qp() 1070 qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr, iwch_create_qp() 1071 1 << qhp->wq.size_log2, qhp->wq.rq_addr); iwch_create_qp()
|
H A D | iwch_ev.c | 66 qhp->attr.state, qhp->wq.qpid, CQE_STATUS(rsp_msg->cqe)); post_qp_event() 141 __func__, qhp->wq.qpid, qhp->ep); iwch_ev_dispatch() 145 qhp->wq.qpid); iwch_ev_dispatch() 222 CQE_STATUS(rsp_msg->cqe), qhp->wq.qpid); iwch_ev_dispatch()
|
H A D | cxio_wr.h | 698 u32 size_log2; /* total wq size */ 747 static inline void cxio_set_wq_in_error(struct t3_wq *wq) cxio_set_wq_in_error() argument 749 wq->queue->wq_in_err.err |= 1; cxio_set_wq_in_error() 752 static inline void cxio_disable_wq_db(struct t3_wq *wq) cxio_disable_wq_db() argument 754 wq->queue->wq_in_err.err |= 2; cxio_disable_wq_db() 757 static inline void cxio_enable_wq_db(struct t3_wq *wq) cxio_enable_wq_db() argument 759 wq->queue->wq_in_err.err &= ~2; cxio_enable_wq_db() 762 static inline int cxio_wq_db_enabled(struct t3_wq *wq) cxio_wq_db_enabled() argument 764 return !(wq->queue->wq_in_err.err & 2); cxio_wq_db_enabled()
|
H A D | iwch.c | 70 cxio_disable_wq_db(&qhp->wq); disable_qp_db() 79 ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell, qhp->wq.qpid); enable_qp_db() 80 cxio_enable_wq_db(&qhp->wq); enable_qp_db()
|
/linux-4.4.14/include/linux/ |
H A D | wait.h | 212 #define ___wait_event(wq, condition, state, exclusive, ret, cmd) \ 225 long __int = prepare_to_wait_event(&wq, &__wait, state);\ 233 abort_exclusive_wait(&wq, &__wait, \ 242 finish_wait(&wq, &__wait); \ 246 #define __wait_event(wq, condition) \ 247 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ 252 * @wq: the waitqueue to wait on 257 * the waitqueue @wq is woken up. 262 #define wait_event(wq, condition) \ 267 __wait_event(wq, condition); \ 270 #define __io_wait_event(wq, condition) \ 271 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ 277 #define io_wait_event(wq, condition) \ 282 __io_wait_event(wq, condition); \ 285 #define __wait_event_freezable(wq, condition) \ 286 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \ 291 * @wq: the waitqueue to wait on 296 * @condition is checked each time the waitqueue @wq is woken up. 301 #define wait_event_freezable(wq, condition) \ 306 __ret = __wait_event_freezable(wq, condition); \ 310 #define __wait_event_timeout(wq, condition, timeout) \ 311 ___wait_event(wq, ___wait_cond_timeout(condition), \ 317 * @wq: the waitqueue to wait on 323 * the waitqueue @wq is woken up. 334 #define wait_event_timeout(wq, condition, timeout) \ 339 __ret = __wait_event_timeout(wq, condition, timeout); \ 343 #define __wait_event_freezable_timeout(wq, condition, timeout) \ 344 ___wait_event(wq, ___wait_cond_timeout(condition), \ 352 #define wait_event_freezable_timeout(wq, condition, timeout) \ 357 __ret = __wait_event_freezable_timeout(wq, condition, timeout); \ 361 #define __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \ 362 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 1, 0, \ 367 #define wait_event_exclusive_cmd(wq, condition, cmd1, cmd2) \ 371 __wait_event_exclusive_cmd(wq, condition, cmd1, cmd2); \ 374 #define __wait_event_cmd(wq, condition, cmd1, cmd2) \ 375 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ 380 * @wq: the waitqueue to wait on 387 * the waitqueue @wq is woken up. 392 #define wait_event_cmd(wq, condition, cmd1, cmd2) \ 396 __wait_event_cmd(wq, condition, cmd1, cmd2); \ 399 #define __wait_event_interruptible(wq, condition) \ 400 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \ 405 * @wq: the waitqueue to wait on 410 * The @condition is checked each time the waitqueue @wq is woken up. 418 #define wait_event_interruptible(wq, condition) \ 423 __ret = __wait_event_interruptible(wq, condition); \ 427 #define __wait_event_interruptible_timeout(wq, condition, timeout) \ 428 ___wait_event(wq, ___wait_cond_timeout(condition), \ 434 * @wq: the waitqueue to wait on 440 * The @condition is checked each time the waitqueue @wq is woken up. 452 #define wait_event_interruptible_timeout(wq, condition, timeout) \ 457 __ret = __wait_event_interruptible_timeout(wq, \ 462 #define __wait_event_hrtimeout(wq, condition, timeout, state) \ 475 __ret = ___wait_event(wq, condition, state, 0, 0, \ 489 * @wq: the waitqueue to wait on 495 * The @condition is checked each time the waitqueue @wq is woken up. 503 #define wait_event_hrtimeout(wq, condition, timeout) \ 508 __ret = __wait_event_hrtimeout(wq, condition, timeout, \ 515 * @wq: the waitqueue to wait on 521 * The @condition is checked each time the waitqueue @wq is woken up. 529 #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \ 534 __ret = __wait_event_hrtimeout(wq, condition, timeout, \ 539 #define __wait_event_interruptible_exclusive(wq, condition) \ 540 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \ 543 #define wait_event_interruptible_exclusive(wq, condition) \ 548 __ret = __wait_event_interruptible_exclusive(wq, condition);\ 553 #define __wait_event_freezable_exclusive(wq, condition) \ 554 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \ 557 #define wait_event_freezable_exclusive(wq, condition) \ 562 __ret = __wait_event_freezable_exclusive(wq, condition);\ 567 #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \ 575 __add_wait_queue_tail(&(wq), &__wait); \ 582 spin_unlock_irq(&(wq).lock); \ 584 spin_unlock(&(wq).lock); \ 587 spin_lock_irq(&(wq).lock); \ 589 spin_lock(&(wq).lock); \ 591 __remove_wait_queue(&(wq), &__wait); \ 599 * @wq: the waitqueue to wait on 604 * The @condition is checked each time the waitqueue @wq is woken up. 606 * It must be called with wq.lock being held. This spinlock is 620 #define wait_event_interruptible_locked(wq, condition) \ 622 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0)) 626 * @wq: the waitqueue to wait on 631 * The @condition is checked each time the waitqueue @wq is woken up. 633 * It must be called with wq.lock being held. This spinlock is 647 #define wait_event_interruptible_locked_irq(wq, condition) \ 649 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1)) 653 * @wq: the waitqueue to wait on 658 * The @condition is checked each time the waitqueue @wq is woken up. 660 * It must be called with wq.lock being held. This spinlock is 678 #define wait_event_interruptible_exclusive_locked(wq, condition) \ 680 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0)) 684 * @wq: the waitqueue to wait on 689 * The @condition is checked each time the waitqueue @wq is woken up. 691 * It must be called with wq.lock being held. This spinlock is 709 #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \ 711 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1)) 714 #define __wait_event_killable(wq, condition) \ 715 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule()) 719 * @wq: the waitqueue to wait on 724 * The @condition is checked each time the waitqueue @wq is woken up. 732 #define wait_event_killable(wq, condition) \ 737 __ret = __wait_event_killable(wq, condition); \ 742 #define __wait_event_lock_irq(wq, condition, lock, cmd) \ 743 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \ 754 * @wq: the waitqueue to wait on 763 * the waitqueue @wq is woken up. 772 #define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \ 776 __wait_event_lock_irq(wq, condition, lock, cmd); \ 784 * @wq: the waitqueue to wait on 791 * the waitqueue @wq is woken up. 799 #define wait_event_lock_irq(wq, condition, lock) \ 803 __wait_event_lock_irq(wq, condition, lock, ); \ 807 #define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \ 808 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \ 818 * @wq: the waitqueue to wait on 827 * checked each time the waitqueue @wq is woken up. 839 #define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \ 843 __ret = __wait_event_interruptible_lock_irq(wq, \ 852 * @wq: the waitqueue to wait on 859 * checked each time the waitqueue @wq is woken up. 870 #define wait_event_interruptible_lock_irq(wq, condition, lock) \ 874 __ret = __wait_event_interruptible_lock_irq(wq, \ 879 #define __wait_event_interruptible_lock_irq_timeout(wq, condition, \ 881 ___wait_event(wq, ___wait_cond_timeout(condition), \ 891 * @wq: the waitqueue to wait on 899 * checked each time the waitqueue @wq is woken up. 911 #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \ 917 wq, condition, lock, timeout); \
|
H A D | workqueue.h | 118 struct workqueue_struct *wq; member in struct:delayed_work 316 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ 320 /* unbound wq's aren't per-cpu, scale max_active according to #cpus */ 421 extern void destroy_workqueue(struct workqueue_struct *wq); 425 int apply_workqueue_attrs(struct workqueue_struct *wq, 429 extern bool queue_work_on(int cpu, struct workqueue_struct *wq, 431 extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 433 extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, 436 extern void flush_workqueue(struct workqueue_struct *wq); 437 extern void drain_workqueue(struct workqueue_struct *wq); 450 extern void workqueue_set_max_active(struct workqueue_struct *wq, 453 extern bool workqueue_congested(int cpu, struct workqueue_struct *wq); 461 * @wq: workqueue to use 469 static inline bool queue_work(struct workqueue_struct *wq, queue_work() argument 472 return queue_work_on(WORK_CPU_UNBOUND, wq, work); queue_work() 477 * @wq: workqueue to use 483 static inline bool queue_delayed_work(struct workqueue_struct *wq, queue_delayed_work() argument 487 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); queue_delayed_work() 492 * @wq: workqueue to use 498 static inline bool mod_delayed_work(struct workqueue_struct *wq, mod_delayed_work() argument 502 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); mod_delayed_work() 615 int workqueue_sysfs_register(struct workqueue_struct *wq); 617 static inline int workqueue_sysfs_register(struct workqueue_struct *wq) workqueue_sysfs_register() argument
|
H A D | freezer.h | 250 #define wait_event_freezekillable_unsafe(wq, condition) \ 254 __retval = wait_event_killable(wq, (condition)); \ 296 #define wait_event_freezekillable_unsafe(wq, condition) \ 297 wait_event_killable(wq, condition)
|
H A D | padata.h | 143 * @wq: The workqueue in use. 155 struct workqueue_struct *wq; member in struct:padata_instance 168 struct workqueue_struct *wq); 169 extern struct padata_instance *padata_alloc(struct workqueue_struct *wq,
|
/linux-4.4.14/kernel/ |
H A D | workqueue.c | 130 * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads. 132 * PWR: wq_pool_mutex and wq->mutex protected for writes. Either or 135 * WQ: wq->mutex protected. 137 * WR: wq->mutex protected for writes. Sched-RCU protected for reads. 200 struct workqueue_struct *wq; /* I: the owning workqueue */ member in struct:pool_workqueue 209 struct list_head pwqs_node; /* WR: node on wq->pwqs */ 210 struct list_head mayday_node; /* MD: node on wq->maydays */ 216 * determined without grabbing wq->mutex. 238 struct list_head pwqs; /* WR: all pwqs of this wq */ 241 struct mutex mutex; /* protects this wq */ 297 static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ 335 static void workqueue_sysfs_unregister(struct workqueue_struct *wq); 345 #define assert_rcu_or_wq_mutex(wq) \ 347 !lockdep_is_held(&wq->mutex), \ 348 "sched RCU or wq->mutex should be held") 350 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ 352 !lockdep_is_held(&wq->mutex) && \ 354 "sched RCU, wq->mutex or wq_pool_mutex should be held") 396 * @wq: the target workqueue 398 * This must be called either with wq->mutex held or sched RCU read locked. 405 #define for_each_pwq(pwq, wq) \ 406 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \ 407 if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \ 557 * @wq: the target workqueue 560 * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU 567 static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq, unbound_pwq_by_node() argument 570 assert_rcu_or_wq_mutex_or_pool_mutex(wq); unbound_pwq_by_node() 579 return wq->dfl_pwq; unbound_pwq_by_node() 581 return rcu_dereference_raw(wq->numa_pwq_tbl[node]); unbound_pwq_by_node() 1088 if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND))) put_pwq() 1179 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) pwq_dec_nr_in_flight() 1180 complete(&pwq->wq->first_flusher->done); pwq_dec_nr_in_flight() 1248 * item is queued on pwq->wq, and both updating work->data to point try_to_grab_pending() 1324 static bool is_chained_work(struct workqueue_struct *wq) is_chained_work() argument 1330 * Return %true iff I'm a worker execuing a work item on @wq. If is_chained_work() 1333 return worker && worker->current_pwq->wq == wq; is_chained_work() 1336 static void __queue_work(int cpu, struct workqueue_struct *wq, __queue_work() argument 1356 if (unlikely(wq->flags & __WQ_DRAINING) && __queue_work() 1357 WARN_ON_ONCE(!is_chained_work(wq))) __queue_work() 1364 if (!(wq->flags & WQ_UNBOUND)) __queue_work() 1365 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); __queue_work() 1367 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); __queue_work() 1382 if (worker && worker->current_pwq->wq == wq) { __queue_work() 1402 if (wq->flags & WQ_UNBOUND) { __queue_work() 1409 wq->name, cpu); __queue_work() 1440 * @wq: workqueue to use 1448 bool queue_work_on(int cpu, struct workqueue_struct *wq, queue_work_on() argument 1457 __queue_work(cpu, wq, work); queue_work_on() 1471 __queue_work(dwork->cpu, dwork->wq, &dwork->work); delayed_work_timer_fn() 1475 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, __queue_delayed_work() argument 1493 __queue_work(cpu, wq, &dwork->work); __queue_delayed_work() 1499 dwork->wq = wq; __queue_delayed_work() 1512 * @wq: workqueue to use 1520 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, queue_delayed_work_on() argument 1531 __queue_delayed_work(cpu, wq, dwork, delay); queue_delayed_work_on() 1543 * @wq: workqueue to use 1558 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, mod_delayed_work_on() argument 1569 __queue_delayed_work(cpu, wq, dwork, delay); mod_delayed_work_on() 1835 struct workqueue_struct *wq = pwq->wq; send_mayday() local 1839 if (!wq->rescuer) send_mayday() 1845 * If @pwq is for an unbound wq, its base ref may be put at send_mayday() 1850 list_add_tail(&pwq->mayday_node, &wq->maydays); send_mayday() 1851 wake_up_process(wq->rescuer->task); send_mayday() 1861 spin_lock(&wq_mayday_lock); /* for wq->maydays */ pool_mayday_timeout() 1996 bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE; 2066 lock_map_acquire_read(&pwq->wq->lockdep_map); 2076 lock_map_release(&pwq->wq->lockdep_map); 2250 struct workqueue_struct *wq = rescuer->rescue_wq; rescuer_thread() local 2266 * shouldn't have any work pending, but @wq->maydays may still have rescuer_thread() 2269 * @wq->maydays processing before acting on should_stop so that the rescuer_thread() 2277 while (!list_empty(&wq->maydays)) { rescuer_thread() 2278 struct pool_workqueue *pwq = list_first_entry(&wq->maydays, rescuer_thread() 2317 list_move_tail(&pwq->mayday_node, &wq->maydays); rescuer_thread() 2434 * @wq: workqueue being flushed 2444 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq 2447 * The caller should have initialized @wq->first_flusher prior to 2457 * mutex_lock(wq->mutex). 2463 static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, flush_workqueue_prep_pwqs() argument 2470 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush)); flush_workqueue_prep_pwqs() 2471 atomic_set(&wq->nr_pwqs_to_flush, 1); flush_workqueue_prep_pwqs() 2474 for_each_pwq(pwq, wq) { for_each_pwq() 2484 atomic_inc(&wq->nr_pwqs_to_flush); for_each_pwq() 2497 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush)) 2498 complete(&wq->first_flusher->done); 2505 * @wq: workqueue to flush 2510 void flush_workqueue(struct workqueue_struct *wq) flush_workqueue() argument 2519 lock_map_acquire(&wq->lockdep_map); flush_workqueue() 2520 lock_map_release(&wq->lockdep_map); flush_workqueue() 2522 mutex_lock(&wq->mutex); flush_workqueue() 2527 next_color = work_next_color(wq->work_color); flush_workqueue() 2529 if (next_color != wq->flush_color) { flush_workqueue() 2535 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow)); flush_workqueue() 2536 this_flusher.flush_color = wq->work_color; flush_workqueue() 2537 wq->work_color = next_color; flush_workqueue() 2539 if (!wq->first_flusher) { flush_workqueue() 2541 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); flush_workqueue() 2543 wq->first_flusher = &this_flusher; flush_workqueue() 2545 if (!flush_workqueue_prep_pwqs(wq, wq->flush_color, flush_workqueue() 2546 wq->work_color)) { flush_workqueue() 2548 wq->flush_color = next_color; flush_workqueue() 2549 wq->first_flusher = NULL; flush_workqueue() 2554 WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color); flush_workqueue() 2555 list_add_tail(&this_flusher.list, &wq->flusher_queue); flush_workqueue() 2556 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); flush_workqueue() 2564 list_add_tail(&this_flusher.list, &wq->flusher_overflow); flush_workqueue() 2567 mutex_unlock(&wq->mutex); flush_workqueue() 2577 if (wq->first_flusher != &this_flusher) flush_workqueue() 2580 mutex_lock(&wq->mutex); flush_workqueue() 2583 if (wq->first_flusher != &this_flusher) flush_workqueue() 2586 wq->first_flusher = NULL; flush_workqueue() 2589 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); flush_workqueue() 2595 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { flush_workqueue() 2596 if (next->flush_color != wq->flush_color) flush_workqueue() 2602 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) && flush_workqueue() 2603 wq->flush_color != work_next_color(wq->work_color)); flush_workqueue() 2606 wq->flush_color = work_next_color(wq->flush_color); flush_workqueue() 2609 if (!list_empty(&wq->flusher_overflow)) { flush_workqueue() 2616 list_for_each_entry(tmp, &wq->flusher_overflow, list) flush_workqueue() 2617 tmp->flush_color = wq->work_color; flush_workqueue() 2619 wq->work_color = work_next_color(wq->work_color); flush_workqueue() 2621 list_splice_tail_init(&wq->flusher_overflow, flush_workqueue() 2622 &wq->flusher_queue); flush_workqueue() 2623 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); flush_workqueue() 2626 if (list_empty(&wq->flusher_queue)) { flush_workqueue() 2627 WARN_ON_ONCE(wq->flush_color != wq->work_color); flush_workqueue() 2635 WARN_ON_ONCE(wq->flush_color == wq->work_color); flush_workqueue() 2636 WARN_ON_ONCE(wq->flush_color != next->flush_color); flush_workqueue() 2639 wq->first_flusher = next; flush_workqueue() 2641 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1)) flush_workqueue() 2648 wq->first_flusher = NULL; flush_workqueue() 2652 mutex_unlock(&wq->mutex); flush_workqueue() 2658 * @wq: workqueue to drain 2662 * work items on @wq can queue further work items on it. @wq is flushed 2667 void drain_workqueue(struct workqueue_struct *wq) drain_workqueue() argument 2674 * hotter than drain_workqueue() and already looks at @wq->flags. drain_workqueue() 2677 mutex_lock(&wq->mutex); drain_workqueue() 2678 if (!wq->nr_drainers++) drain_workqueue() 2679 wq->flags |= __WQ_DRAINING; drain_workqueue() 2680 mutex_unlock(&wq->mutex); drain_workqueue() 2682 flush_workqueue(wq); drain_workqueue() 2684 mutex_lock(&wq->mutex); drain_workqueue() 2686 for_each_pwq(pwq, wq) { for_each_pwq() 2699 wq->name, flush_cnt); for_each_pwq() 2701 mutex_unlock(&wq->mutex); for_each_pwq() 2705 if (!--wq->nr_drainers) 2706 wq->flags &= ~__WQ_DRAINING; 2707 mutex_unlock(&wq->mutex); 2748 if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer) start_flush_work() 2749 lock_map_acquire(&pwq->wq->lockdep_map); start_flush_work() 2751 lock_map_acquire_read(&pwq->wq->lockdep_map); start_flush_work() 2752 lock_map_release(&pwq->wq->lockdep_map); start_flush_work() 2900 __queue_work(dwork->cpu, dwork->wq, &dwork->work); flush_delayed_work() 3066 * ->no_numa as it is used for both pool and wq attrs. Instead, copy_workqueue_attrs() 3139 struct workqueue_struct *wq = rcu_free_wq() local 3142 if (!(wq->flags & WQ_UNBOUND)) rcu_free_wq() 3143 free_percpu(wq->cpu_pwqs); rcu_free_wq() 3145 free_workqueue_attrs(wq->unbound_attrs); rcu_free_wq() 3147 kfree(wq->rescuer); rcu_free_wq() 3148 kfree(wq); rcu_free_wq() 3310 struct workqueue_struct *wq = pwq->wq; pwq_unbound_release_workfn() local 3314 if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND))) pwq_unbound_release_workfn() 3317 mutex_lock(&wq->mutex); pwq_unbound_release_workfn() 3319 is_last = list_empty(&wq->pwqs); pwq_unbound_release_workfn() 3320 mutex_unlock(&wq->mutex); pwq_unbound_release_workfn() 3329 * If we're the last pwq going away, @wq is already dead and no one pwq_unbound_release_workfn() 3333 call_rcu_sched(&wq->rcu, rcu_free_wq); pwq_unbound_release_workfn() 3346 struct workqueue_struct *wq = pwq->wq; pwq_adjust_max_active() local 3347 bool freezable = wq->flags & WQ_FREEZABLE; pwq_adjust_max_active() 3349 /* for @wq->saved_max_active */ pwq_adjust_max_active() 3350 lockdep_assert_held(&wq->mutex); pwq_adjust_max_active() 3353 if (!freezable && pwq->max_active == wq->saved_max_active) pwq_adjust_max_active() 3364 pwq->max_active = wq->saved_max_active; pwq_adjust_max_active() 3371 * Need to kick a worker after thawed or an unbound wq's pwq_adjust_max_active() 3382 /* initialize newly alloced @pwq which is associated with @wq and @pool */ init_pwq() 3383 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, init_pwq() argument 3391 pwq->wq = wq; init_pwq() 3400 /* sync @pwq with the current state of its associated wq and link it */ link_pwq() 3403 struct workqueue_struct *wq = pwq->wq; link_pwq() local 3405 lockdep_assert_held(&wq->mutex); link_pwq() 3412 pwq->work_color = wq->work_color; link_pwq() 3418 list_add_rcu(&pwq->pwqs_node, &wq->pwqs); link_pwq() 3421 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */ alloc_unbound_pwq() 3422 static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq, alloc_unbound_pwq() argument 3440 init_pwq(pwq, wq, pool); alloc_unbound_pwq() 3489 /* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */ numa_pwq_tbl_install() 3490 static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq, numa_pwq_tbl_install() argument 3497 lockdep_assert_held(&wq->mutex); numa_pwq_tbl_install() 3502 old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]); numa_pwq_tbl_install() 3503 rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq); numa_pwq_tbl_install() 3509 struct workqueue_struct *wq; /* target workqueue */ member in struct:apply_wqattrs_ctx 3534 apply_wqattrs_prepare(struct workqueue_struct *wq, apply_wqattrs_prepare() argument 3573 ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs); apply_wqattrs_prepare() 3579 ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs); for_each_node() 3593 ctx->wq = wq; 3610 mutex_lock(&ctx->wq->mutex); apply_wqattrs_commit() 3612 copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs); apply_wqattrs_commit() 3616 ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node, apply_wqattrs_commit() 3621 swap(ctx->wq->dfl_pwq, ctx->dfl_pwq); apply_wqattrs_commit() 3623 mutex_unlock(&ctx->wq->mutex); apply_wqattrs_commit() 3639 static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, apply_workqueue_attrs_locked() argument 3646 if (WARN_ON(!(wq->flags & WQ_UNBOUND))) apply_workqueue_attrs_locked() 3650 if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs))) apply_workqueue_attrs_locked() 3653 ctx = apply_wqattrs_prepare(wq, attrs); apply_workqueue_attrs_locked() 3668 * @wq: the target workqueue 3671 * Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA 3682 int apply_workqueue_attrs(struct workqueue_struct *wq, apply_workqueue_attrs() argument 3688 ret = apply_workqueue_attrs_locked(wq, attrs); apply_workqueue_attrs() 3695 * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug 3696 * @wq: the target workqueue 3702 * @wq accordingly. 3705 * falls back to @wq->dfl_pwq which may not be optimal but is always 3716 static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu, wq_update_unbound_numa() argument 3727 if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND) || wq_update_unbound_numa() 3728 wq->unbound_attrs->no_numa) wq_update_unbound_numa() 3732 * We don't wanna alloc/free wq_attrs for each wq for each CPU. wq_update_unbound_numa() 3739 copy_workqueue_attrs(target_attrs, wq->unbound_attrs); wq_update_unbound_numa() 3740 pwq = unbound_pwq_by_node(wq, node); wq_update_unbound_numa() 3748 if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) { wq_update_unbound_numa() 3756 pwq = alloc_unbound_pwq(wq, target_attrs); wq_update_unbound_numa() 3759 wq->name); wq_update_unbound_numa() 3764 mutex_lock(&wq->mutex); wq_update_unbound_numa() 3765 old_pwq = numa_pwq_tbl_install(wq, node, pwq); wq_update_unbound_numa() 3769 mutex_lock(&wq->mutex); wq_update_unbound_numa() 3770 spin_lock_irq(&wq->dfl_pwq->pool->lock); wq_update_unbound_numa() 3771 get_pwq(wq->dfl_pwq); wq_update_unbound_numa() 3772 spin_unlock_irq(&wq->dfl_pwq->pool->lock); wq_update_unbound_numa() 3773 old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq); wq_update_unbound_numa() 3775 mutex_unlock(&wq->mutex); wq_update_unbound_numa() 3779 static int alloc_and_link_pwqs(struct workqueue_struct *wq) alloc_and_link_pwqs() argument 3781 bool highpri = wq->flags & WQ_HIGHPRI; alloc_and_link_pwqs() 3784 if (!(wq->flags & WQ_UNBOUND)) { alloc_and_link_pwqs() 3785 wq->cpu_pwqs = alloc_percpu(struct pool_workqueue); alloc_and_link_pwqs() 3786 if (!wq->cpu_pwqs) alloc_and_link_pwqs() 3791 per_cpu_ptr(wq->cpu_pwqs, cpu); for_each_possible_cpu() 3795 init_pwq(pwq, wq, &cpu_pools[highpri]); for_each_possible_cpu() 3797 mutex_lock(&wq->mutex); for_each_possible_cpu() 3799 mutex_unlock(&wq->mutex); for_each_possible_cpu() 3802 } else if (wq->flags & __WQ_ORDERED) { 3803 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]); 3805 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node || 3806 wq->pwqs.prev != &wq->dfl_pwq->pwqs_node), 3807 "ordering guarantee broken for workqueue %s\n", wq->name); 3810 return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); 3834 struct workqueue_struct *wq; __alloc_workqueue_key() local 3841 /* allocate wq and format name */ __alloc_workqueue_key() 3843 tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]); __alloc_workqueue_key() 3845 wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL); __alloc_workqueue_key() 3846 if (!wq) __alloc_workqueue_key() 3850 wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL); __alloc_workqueue_key() 3851 if (!wq->unbound_attrs) __alloc_workqueue_key() 3856 vsnprintf(wq->name, sizeof(wq->name), fmt, args); __alloc_workqueue_key() 3860 max_active = wq_clamp_max_active(max_active, flags, wq->name); __alloc_workqueue_key() 3862 /* init wq */ __alloc_workqueue_key() 3863 wq->flags = flags; __alloc_workqueue_key() 3864 wq->saved_max_active = max_active; __alloc_workqueue_key() 3865 mutex_init(&wq->mutex); __alloc_workqueue_key() 3866 atomic_set(&wq->nr_pwqs_to_flush, 0); __alloc_workqueue_key() 3867 INIT_LIST_HEAD(&wq->pwqs); __alloc_workqueue_key() 3868 INIT_LIST_HEAD(&wq->flusher_queue); __alloc_workqueue_key() 3869 INIT_LIST_HEAD(&wq->flusher_overflow); __alloc_workqueue_key() 3870 INIT_LIST_HEAD(&wq->maydays); __alloc_workqueue_key() 3872 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); __alloc_workqueue_key() 3873 INIT_LIST_HEAD(&wq->list); __alloc_workqueue_key() 3875 if (alloc_and_link_pwqs(wq) < 0) __alloc_workqueue_key() 3889 rescuer->rescue_wq = wq; __alloc_workqueue_key() 3891 wq->name); __alloc_workqueue_key() 3897 wq->rescuer = rescuer; __alloc_workqueue_key() 3902 if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq)) __alloc_workqueue_key() 3907 * Grab it, adjust max_active and add the new @wq to workqueues __alloc_workqueue_key() 3912 mutex_lock(&wq->mutex); __alloc_workqueue_key() 3913 for_each_pwq(pwq, wq) __alloc_workqueue_key() 3915 mutex_unlock(&wq->mutex); __alloc_workqueue_key() 3917 list_add_tail_rcu(&wq->list, &workqueues); __alloc_workqueue_key() 3921 return wq; __alloc_workqueue_key() 3924 free_workqueue_attrs(wq->unbound_attrs); __alloc_workqueue_key() 3925 kfree(wq); __alloc_workqueue_key() 3928 destroy_workqueue(wq); __alloc_workqueue_key() 3935 * @wq: target workqueue 3939 void destroy_workqueue(struct workqueue_struct *wq) destroy_workqueue() argument 3945 drain_workqueue(wq); destroy_workqueue() 3948 mutex_lock(&wq->mutex); for_each_pwq() 3949 for_each_pwq(pwq, wq) { for_each_pwq() 3954 mutex_unlock(&wq->mutex); for_each_pwq() 3959 if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) || for_each_pwq() 3962 mutex_unlock(&wq->mutex); for_each_pwq() 3966 mutex_unlock(&wq->mutex); 3969 * wq list is used to freeze wq, remove from list after 3973 list_del_rcu(&wq->list); 3976 workqueue_sysfs_unregister(wq); 3978 if (wq->rescuer) 3979 kthread_stop(wq->rescuer->task); 3981 if (!(wq->flags & WQ_UNBOUND)) { 3986 call_rcu_sched(&wq->rcu, rcu_free_wq); 3989 * We're the sole accessor of @wq at this point. Directly 3991 * @wq will be freed when the last pwq is released. 3994 pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]); for_each_node() 3995 RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL); for_each_node() 4000 * Put dfl_pwq. @wq may be freed any time after dfl_pwq is 4003 pwq = wq->dfl_pwq; 4004 wq->dfl_pwq = NULL; 4012 * @wq: target workqueue 4015 * Set max_active of @wq to @max_active. 4020 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) workqueue_set_max_active() argument 4025 if (WARN_ON(wq->flags & __WQ_ORDERED)) workqueue_set_max_active() 4028 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); workqueue_set_max_active() 4030 mutex_lock(&wq->mutex); workqueue_set_max_active() 4032 wq->saved_max_active = max_active; workqueue_set_max_active() 4034 for_each_pwq(pwq, wq) workqueue_set_max_active() 4037 mutex_unlock(&wq->mutex); workqueue_set_max_active() 4059 * @wq: target workqueue 4061 * Test whether @wq's cpu workqueue for @cpu is congested. There is 4074 bool workqueue_congested(int cpu, struct workqueue_struct *wq) workqueue_congested() argument 4084 if (!(wq->flags & WQ_UNBOUND)) workqueue_congested() 4085 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); workqueue_congested() 4087 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); workqueue_congested() 4172 struct workqueue_struct *wq = NULL; print_worker_info() local 4191 probe_kernel_read(&wq, &pwq->wq, sizeof(wq)); print_worker_info() 4192 probe_kernel_read(name, wq->name, sizeof(name) - 1); print_worker_info() 4259 worker == pwq->wq->rescuer ? "(RESCUER)" : "", show_pwq() 4308 struct workqueue_struct *wq; show_workqueue_state() local 4317 list_for_each_entry_rcu(wq, &workqueues, list) { show_workqueue_state() 4321 for_each_pwq(pwq, wq) { for_each_pwq() 4330 pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags); 4332 for_each_pwq(pwq, wq) { for_each_pwq() 4553 struct workqueue_struct *wq; workqueue_cpu_up_callback() local 4582 list_for_each_entry(wq, &workqueues, list) 4583 wq_update_unbound_numa(wq, cpu, true); 4601 struct workqueue_struct *wq; workqueue_cpu_down_callback() local 4611 list_for_each_entry(wq, &workqueues, list) workqueue_cpu_down_callback() 4612 wq_update_unbound_numa(wq, cpu, false); workqueue_cpu_down_callback() 4673 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's. 4677 struct workqueue_struct *wq; freeze_workqueues_begin() local 4685 list_for_each_entry(wq, &workqueues, list) { freeze_workqueues_begin() 4686 mutex_lock(&wq->mutex); freeze_workqueues_begin() 4687 for_each_pwq(pwq, wq) freeze_workqueues_begin() 4689 mutex_unlock(&wq->mutex); freeze_workqueues_begin() 4711 struct workqueue_struct *wq; freeze_workqueues_busy() local 4718 list_for_each_entry(wq, &workqueues, list) { freeze_workqueues_busy() 4719 if (!(wq->flags & WQ_FREEZABLE)) freeze_workqueues_busy() 4726 for_each_pwq(pwq, wq) { for_each_pwq() 4748 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's. 4752 struct workqueue_struct *wq; thaw_workqueues() local 4763 list_for_each_entry(wq, &workqueues, list) { thaw_workqueues() 4764 mutex_lock(&wq->mutex); thaw_workqueues() 4765 for_each_pwq(pwq, wq) thaw_workqueues() 4767 mutex_unlock(&wq->mutex); thaw_workqueues() 4779 struct workqueue_struct *wq; workqueue_apply_unbound_cpumask() local 4784 list_for_each_entry(wq, &workqueues, list) { workqueue_apply_unbound_cpumask() 4785 if (!(wq->flags & WQ_UNBOUND)) workqueue_apply_unbound_cpumask() 4788 if (wq->flags & __WQ_ORDERED) workqueue_apply_unbound_cpumask() 4791 ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs); workqueue_apply_unbound_cpumask() 4867 struct workqueue_struct *wq; member in struct:wq_device 4875 return wq_dev->wq; dev_to_wq() 4881 struct workqueue_struct *wq = dev_to_wq(dev); per_cpu_show() local 4883 return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND)); per_cpu_show() 4890 struct workqueue_struct *wq = dev_to_wq(dev); max_active_show() local 4892 return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active); max_active_show() 4899 struct workqueue_struct *wq = dev_to_wq(dev); max_active_store() local 4905 workqueue_set_max_active(wq, val); max_active_store() 4920 struct workqueue_struct *wq = dev_to_wq(dev); wq_pool_ids_show() local 4928 unbound_pwq_by_node(wq, node)->pool->id); for_each_node() 4940 struct workqueue_struct *wq = dev_to_wq(dev); wq_nice_show() local 4943 mutex_lock(&wq->mutex); wq_nice_show() 4944 written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice); wq_nice_show() 4945 mutex_unlock(&wq->mutex); wq_nice_show() 4951 static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq) wq_sysfs_prep_attrs() argument 4961 copy_workqueue_attrs(attrs, wq->unbound_attrs); wq_sysfs_prep_attrs() 4968 struct workqueue_struct *wq = dev_to_wq(dev); wq_nice_store() local 4974 attrs = wq_sysfs_prep_attrs(wq); wq_nice_store() 4980 ret = apply_workqueue_attrs_locked(wq, attrs); wq_nice_store() 4993 struct workqueue_struct *wq = dev_to_wq(dev); wq_cpumask_show() local 4996 mutex_lock(&wq->mutex); wq_cpumask_show() 4998 cpumask_pr_args(wq->unbound_attrs->cpumask)); wq_cpumask_show() 4999 mutex_unlock(&wq->mutex); wq_cpumask_show() 5007 struct workqueue_struct *wq = dev_to_wq(dev); wq_cpumask_store() local 5013 attrs = wq_sysfs_prep_attrs(wq); wq_cpumask_store() 5019 ret = apply_workqueue_attrs_locked(wq, attrs); wq_cpumask_store() 5030 struct workqueue_struct *wq = dev_to_wq(dev); wq_numa_show() local 5033 mutex_lock(&wq->mutex); wq_numa_show() 5035 !wq->unbound_attrs->no_numa); wq_numa_show() 5036 mutex_unlock(&wq->mutex); wq_numa_show() 5044 struct workqueue_struct *wq = dev_to_wq(dev); wq_numa_store() local 5050 attrs = wq_sysfs_prep_attrs(wq); wq_numa_store() 5057 ret = apply_workqueue_attrs_locked(wq, attrs); wq_numa_store() 5134 * @wq: the workqueue to register 5136 * Expose @wq in sysfs under /sys/bus/workqueue/devices. 5147 int workqueue_sysfs_register(struct workqueue_struct *wq) workqueue_sysfs_register() argument 5157 if (WARN_ON(wq->flags & __WQ_ORDERED)) workqueue_sysfs_register() 5160 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); workqueue_sysfs_register() 5164 wq_dev->wq = wq; workqueue_sysfs_register() 5166 wq_dev->dev.init_name = wq->name; workqueue_sysfs_register() 5178 wq->wq_dev = NULL; workqueue_sysfs_register() 5182 if (wq->flags & WQ_UNBOUND) { workqueue_sysfs_register() 5189 wq->wq_dev = NULL; workqueue_sysfs_register() 5202 * @wq: the workqueue to unregister 5204 * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister. 5206 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) workqueue_sysfs_unregister() argument 5208 struct wq_device *wq_dev = wq->wq_dev; workqueue_sysfs_unregister() 5210 if (!wq->wq_dev) workqueue_sysfs_unregister() 5213 wq->wq_dev = NULL; workqueue_sysfs_unregister() 5217 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { } workqueue_sysfs_unregister() argument 5308 /* create default unbound and ordered wq attrs */ 5317 * An ordered wq should have only one pwq as ordering is
|
H A D | cpu.c | 64 wait_queue_head_t wq; member in struct:__anon14782 78 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq), 116 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq)) put_online_cpus() 117 wake_up(&cpu_hotplug.wq); put_online_cpus() 155 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE); cpu_hotplug_begin() 161 finish_wait(&cpu_hotplug.wq, &wait); cpu_hotplug_begin()
|
H A D | padata.c | 143 queue_work_on(target_cpu, pinst->wq, &queue->work); padata_do_parallel() 266 queue_work_on(cb_cpu, pinst->wq, &squeue->work); padata_reorder() 1020 * @wq: workqueue to use for the allocated padata instance 1022 struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq) padata_alloc_possible() argument 1024 return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask); padata_alloc_possible() 1032 * @wq: workqueue to use for the allocated padata instance 1036 struct padata_instance *padata_alloc(struct workqueue_struct *wq, padata_alloc() argument 1064 pinst->wq = wq; padata_alloc()
|
/linux-4.4.14/drivers/staging/rdma/ipath/ |
H A D | ipath_srq.c | 52 struct ipath_rwq *wq; ipath_post_srq_receive() local 68 wq = srq->rq.wq; ipath_post_srq_receive() 69 next = wq->head + 1; ipath_post_srq_receive() 72 if (next == wq->tail) { ipath_post_srq_receive() 79 wqe = get_rwqe_ptr(&srq->rq, wq->head); ipath_post_srq_receive() 86 wq->head = next; ipath_post_srq_receive() 139 srq->rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + srq->rq.size * sz); ipath_create_srq() 140 if (!srq->rq.wq) { ipath_create_srq() 156 srq->rq.wq); ipath_create_srq() 175 srq->rq.wq->head = 0; ipath_create_srq() 176 srq->rq.wq->tail = 0; ipath_create_srq() 201 vfree(srq->rq.wq); ipath_create_srq() 220 struct ipath_rwq *wq; ipath_modify_srq() local 239 wq = vmalloc_user(sizeof(struct ipath_rwq) + size * sz); ipath_modify_srq() 240 if (!wq) { ipath_modify_srq() 267 owq = srq->rq.wq; ipath_modify_srq() 284 p = wq->wq; ipath_modify_srq() 299 srq->rq.wq = wq; ipath_modify_srq() 301 wq->head = n; ipath_modify_srq() 302 wq->tail = 0; ipath_modify_srq() 314 ipath_update_mmap_info(dev, ip, s, wq); ipath_modify_srq() 346 vfree(wq); ipath_modify_srq() 376 vfree(srq->rq.wq); ipath_destroy_srq()
|
H A D | ipath_qp.c | 359 if (qp->r_rq.wq) { ipath_reset_qp() 360 qp->r_rq.wq->head = 0; ipath_reset_qp() 361 qp->r_rq.wq->tail = 0; ipath_reset_qp() 409 if (qp->r_rq.wq) { ipath_error_qp() 410 struct ipath_rwq *wq; ipath_error_qp() local 417 wq = qp->r_rq.wq; ipath_error_qp() 418 head = wq->head; ipath_error_qp() 421 tail = wq->tail; ipath_error_qp() 430 wq->tail = tail; ipath_error_qp() 689 struct ipath_rwq *wq = qp->r_rq.wq; ipath_compute_aeth() local 694 head = wq->head; ipath_compute_aeth() 697 tail = wq->tail; ipath_compute_aeth() 823 qp->r_rq.wq = NULL; ipath_create_qp() 831 qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + ipath_create_qp() 833 if (!qp->r_rq.wq) { ipath_create_qp() 864 vfree(qp->r_rq.wq); ipath_create_qp() 885 if (!qp->r_rq.wq) { ipath_create_qp() 901 qp->r_rq.wq); ipath_create_qp() 939 vfree(qp->r_rq.wq); ipath_create_qp() 1007 vfree(qp->r_rq.wq); ipath_destroy_qp()
|
H A D | ipath_ruc.c | 169 struct ipath_rwq *wq; ipath_get_rwqe() local 192 wq = rq->wq; ipath_get_rwqe() 193 tail = wq->tail; ipath_get_rwqe() 198 if (unlikely(tail == wq->head)) { ipath_get_rwqe() 212 wq->tail = tail; ipath_get_rwqe() 223 n = wq->head; ipath_get_rwqe()
|
H A D | ipath_ud.c | 59 struct ipath_rwq *wq; ipath_ud_loopback() local 122 wq = rq->wq; ipath_ud_loopback() 123 tail = wq->tail; ipath_ud_loopback() 127 if (unlikely(tail == wq->head)) { ipath_ud_loopback() 147 wq->tail = tail; ipath_ud_loopback() 156 n = wq->head; ipath_ud_loopback()
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
H A D | srq.c | 69 struct hfi1_rwq *wq; hfi1_post_srq_receive() local 85 wq = srq->rq.wq; hfi1_post_srq_receive() 86 next = wq->head + 1; hfi1_post_srq_receive() 89 if (next == wq->tail) { hfi1_post_srq_receive() 96 wqe = get_rwqe_ptr(&srq->rq, wq->head); hfi1_post_srq_receive() 103 wq->head = next; hfi1_post_srq_receive() 153 srq->rq.wq = vmalloc_user(sizeof(struct hfi1_rwq) + srq->rq.size * sz); hfi1_create_srq() 154 if (!srq->rq.wq) { hfi1_create_srq() 169 srq->rq.wq); hfi1_create_srq() 188 srq->rq.wq->head = 0; hfi1_create_srq() 189 srq->rq.wq->tail = 0; hfi1_create_srq() 214 vfree(srq->rq.wq); hfi1_create_srq() 233 struct hfi1_rwq *wq; hfi1_modify_srq() local 252 wq = vmalloc_user(sizeof(struct hfi1_rwq) + size * sz); hfi1_modify_srq() 253 if (!wq) { hfi1_modify_srq() 280 owq = srq->rq.wq; hfi1_modify_srq() 297 p = wq->wq; hfi1_modify_srq() 312 srq->rq.wq = wq; hfi1_modify_srq() 314 wq->head = n; hfi1_modify_srq() 315 wq->tail = 0; hfi1_modify_srq() 327 hfi1_update_mmap_info(dev, ip, s, wq); hfi1_modify_srq() 363 vfree(wq); hfi1_modify_srq() 393 vfree(srq->rq.wq); hfi1_destroy_srq()
|
H A D | qp.c | 396 if (qp->r_rq.wq) { reset_qp() 397 qp->r_rq.wq->head = 0; reset_qp() 398 qp->r_rq.wq->tail = 0; reset_qp() 513 if (qp->r_rq.wq) { hfi1_error_qp() 514 struct hfi1_rwq *wq; hfi1_error_qp() local 521 wq = qp->r_rq.wq; hfi1_error_qp() 522 head = wq->head; hfi1_error_qp() 525 tail = wq->tail; hfi1_error_qp() 534 wq->tail = tail; hfi1_error_qp() 960 struct hfi1_rwq *wq = qp->r_rq.wq; hfi1_compute_aeth() local 965 head = wq->head; hfi1_compute_aeth() 968 tail = wq->tail; hfi1_compute_aeth() 1099 qp->r_rq.wq = vmalloc_user(sizeof(struct hfi1_rwq) + hfi1_create_qp() 1101 if (!qp->r_rq.wq) { hfi1_create_qp() 1131 vfree(qp->r_rq.wq); hfi1_create_qp() 1153 if (!qp->r_rq.wq) { hfi1_create_qp() 1167 qp->r_rq.wq); hfi1_create_qp() 1228 vfree(qp->r_rq.wq); hfi1_create_qp() 1285 vfree(qp->r_rq.wq); hfi1_destroy_qp()
|
H A D | iowait.h | 152 * @wq: workqueue for schedule 156 struct workqueue_struct *wq) iowait_schedule() 158 queue_work(wq, &wait->iowork); iowait_schedule() 154 iowait_schedule( struct iowait *wait, struct workqueue_struct *wq) iowait_schedule() argument
|
H A D | ruc.c | 161 struct hfi1_rwq *wq; hfi1_get_rwqe() local 184 wq = rq->wq; hfi1_get_rwqe() 185 tail = wq->tail; hfi1_get_rwqe() 189 if (unlikely(tail == wq->head)) { hfi1_get_rwqe() 203 wq->tail = tail; hfi1_get_rwqe() 219 n = wq->head; hfi1_get_rwqe()
|
/linux-4.4.14/drivers/infiniband/hw/qib/ |
H A D | qib_srq.c | 52 struct qib_rwq *wq; qib_post_srq_receive() local 68 wq = srq->rq.wq; qib_post_srq_receive() 69 next = wq->head + 1; qib_post_srq_receive() 72 if (next == wq->tail) { qib_post_srq_receive() 79 wqe = get_rwqe_ptr(&srq->rq, wq->head); qib_post_srq_receive() 86 wq->head = next; qib_post_srq_receive() 136 srq->rq.wq = vmalloc_user(sizeof(struct qib_rwq) + srq->rq.size * sz); qib_create_srq() 137 if (!srq->rq.wq) { qib_create_srq() 152 srq->rq.wq); qib_create_srq() 171 srq->rq.wq->head = 0; qib_create_srq() 172 srq->rq.wq->tail = 0; qib_create_srq() 197 vfree(srq->rq.wq); qib_create_srq() 216 struct qib_rwq *wq; qib_modify_srq() local 235 wq = vmalloc_user(sizeof(struct qib_rwq) + size * sz); qib_modify_srq() 236 if (!wq) { qib_modify_srq() 263 owq = srq->rq.wq; qib_modify_srq() 280 p = wq->wq; qib_modify_srq() 295 srq->rq.wq = wq; qib_modify_srq() 297 wq->head = n; qib_modify_srq() 298 wq->tail = 0; qib_modify_srq() 310 qib_update_mmap_info(dev, ip, s, wq); qib_modify_srq() 346 vfree(wq); qib_modify_srq() 376 vfree(srq->rq.wq); qib_destroy_srq()
|
H A D | qib_qp.c | 411 if (qp->r_rq.wq) { qib_reset_qp() 412 qp->r_rq.wq->head = 0; qib_reset_qp() 413 qp->r_rq.wq->tail = 0; qib_reset_qp() 529 if (qp->r_rq.wq) { qib_error_qp() 530 struct qib_rwq *wq; qib_error_qp() local 537 wq = qp->r_rq.wq; qib_error_qp() 538 head = wq->head; qib_error_qp() 541 tail = wq->tail; qib_error_qp() 550 wq->tail = tail; qib_error_qp() 924 struct qib_rwq *wq = qp->r_rq.wq; qib_compute_aeth() local 929 head = wq->head; qib_compute_aeth() 932 tail = wq->tail; qib_compute_aeth() 1072 qp->r_rq.wq = vmalloc_user( qib_create_qp() 1076 qp->r_rq.wq = __vmalloc( qib_create_qp() 1081 if (!qp->r_rq.wq) { qib_create_qp() 1114 vfree(qp->r_rq.wq); qib_create_qp() 1135 if (!qp->r_rq.wq) { qib_create_qp() 1149 qp->r_rq.wq); qib_create_qp() 1187 vfree(qp->r_rq.wq); qib_create_qp() 1244 vfree(qp->r_rq.wq); qib_destroy_qp()
|
H A D | qib_ruc.c | 143 struct qib_rwq *wq; qib_get_rwqe() local 166 wq = rq->wq; qib_get_rwqe() 167 tail = wq->tail; qib_get_rwqe() 171 if (unlikely(tail == wq->head)) { qib_get_rwqe() 185 wq->tail = tail; qib_get_rwqe() 201 n = wq->head; qib_get_rwqe()
|
/linux-4.4.14/drivers/md/bcache/ |
H A D | request.h | 8 struct workqueue_struct *wq; member in struct:data_insert_op
|
H A D | closure.h | 151 struct workqueue_struct *wq; member in struct:closure::__anon5710::__anon5711 239 struct workqueue_struct *wq) set_closure_fn() 244 cl->wq = wq; set_closure_fn() 251 struct workqueue_struct *wq = cl->wq; closure_queue() local 252 if (wq) { closure_queue() 254 BUG_ON(!queue_work(wq, &cl->work)); closure_queue() 311 * of @wq (or, if @wq is NULL, @fn will be called by closure_put() directly). 338 * Causes @fn to be executed out of @cl, in @wq context (or called directly if 339 * @wq is NULL). 376 struct workqueue_struct *wq, closure_call() 380 continue_at_nobarrier(cl, fn, wq); closure_call() 238 set_closure_fn(struct closure *cl, closure_fn *fn, struct workqueue_struct *wq) set_closure_fn() argument 375 closure_call(struct closure *cl, closure_fn fn, struct workqueue_struct *wq, struct closure *parent) closure_call() argument
|
H A D | movinggc.c | 116 continue_at(cl, write_moving_finish, op->wq); write_moving() 126 continue_at(cl, write_moving, io->op.wq); read_moving_submit() 161 io->op.wq = c->moving_gc_wq; read_moving()
|
H A D | request.c | 92 continue_at(cl, bch_data_insert_start, op->wq); bch_data_insert_keys() 143 continue_at(cl, bch_data_insert_keys, op->wq); bch_data_invalidate() 186 set_closure_fn(cl, bch_data_insert_error, op->wq); bch_data_insert_endio() 222 continue_at(cl, bch_data_insert_keys, op->wq); bch_data_insert_start() 261 continue_at(cl, bch_data_insert_keys, op->wq); bch_data_insert_start() 291 continue_at(cl, bch_data_insert_keys, op->wq); bch_data_insert_start() 672 s->iop.wq = bcache_wq; search_alloc()
|
/linux-4.4.14/fs/jfs/ |
H A D | jfs_lock.h | 35 #define __SLEEP_COND(wq, cond, lock_cmd, unlock_cmd) \ 39 add_wait_queue(&wq, &__wait); \ 49 remove_wait_queue(&wq, &__wait); \
|
/linux-4.4.14/drivers/usb/chipidea/ |
H A D | otg.c | 161 ci->wq = create_freezable_workqueue("ci_otg"); ci_hdrc_otg_init() 162 if (!ci->wq) { ci_hdrc_otg_init() 179 if (ci->wq) { ci_hdrc_otg_destroy() 180 flush_workqueue(ci->wq); ci_hdrc_otg_destroy() 181 destroy_workqueue(ci->wq); ci_hdrc_otg_destroy()
|
H A D | otg.h | 23 queue_work(ci->wq, &ci->work); ci_otg_queue_work()
|
/linux-4.4.14/drivers/net/ethernet/cavium/liquidio/ |
H A D | response_manager.c | 58 oct->dma_comp_wq.wq = create_workqueue("dma-comp"); octeon_setup_response_list() 59 if (!oct->dma_comp_wq.wq) { octeon_setup_response_list() 60 dev_err(&oct->pci_dev->dev, "failed to create wq thread\n"); octeon_setup_response_list() 67 queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100)); octeon_setup_response_list() 75 flush_workqueue(oct->dma_comp_wq.wq); octeon_delete_response_list() 76 destroy_workqueue(oct->dma_comp_wq.wq); octeon_delete_response_list() 177 queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100)); oct_poll_req_completion()
|
H A D | request_manager.c | 147 oct->check_db_wq[iq_no].wq = create_workqueue("check_iq_db"); octeon_init_instr_queue() 148 if (!oct->check_db_wq[iq_no].wq) { octeon_init_instr_queue() 150 dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n", octeon_init_instr_queue() 160 queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1)); octeon_init_instr_queue() 171 flush_workqueue(oct->check_db_wq[iq_no].wq); octeon_delete_instr_queue() 172 destroy_workqueue(oct->check_db_wq[iq_no].wq); octeon_delete_instr_queue() 515 queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1)); check_db_timeout()
|
/linux-4.4.14/net/core/ |
H A D | stream.c | 31 struct socket_wq *wq; sk_stream_write_space() local 37 wq = rcu_dereference(sk->sk_wq); sk_stream_write_space() 38 if (wq_has_sleeper(wq)) sk_stream_write_space() 39 wake_up_interruptible_poll(&wq->wait, POLLOUT | sk_stream_write_space() 41 if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) sk_stream_write_space() 42 sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); sk_stream_write_space()
|
H A D | sock.c | 1982 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait, 1990 finish_wait(&sk->sk_lock.wq, &wait); 2280 struct socket_wq *wq; sock_def_wakeup() local 2283 wq = rcu_dereference(sk->sk_wq); sock_def_wakeup() 2284 if (wq_has_sleeper(wq)) sock_def_wakeup() 2285 wake_up_interruptible_all(&wq->wait); sock_def_wakeup() 2291 struct socket_wq *wq; sock_def_error_report() local 2294 wq = rcu_dereference(sk->sk_wq); sock_def_error_report() 2295 if (wq_has_sleeper(wq)) sock_def_error_report() 2296 wake_up_interruptible_poll(&wq->wait, POLLERR); sock_def_error_report() 2303 struct socket_wq *wq; sock_def_readable() local 2306 wq = rcu_dereference(sk->sk_wq); sock_def_readable() 2307 if (wq_has_sleeper(wq)) sock_def_readable() 2308 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI | sock_def_readable() 2316 struct socket_wq *wq; sock_def_write_space() local 2324 wq = rcu_dereference(sk->sk_wq); sock_def_write_space() 2325 if (wq_has_sleeper(wq)) sock_def_write_space() 2326 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | sock_def_write_space() 2384 sk->sk_wq = sock->wq; sock_init_data() 2465 if (waitqueue_active(&sk->sk_lock.wq)) release_sock() 2466 wake_up(&sk->sk_lock.wq); release_sock()
|
/linux-4.4.14/drivers/gpu/drm/radeon/ |
H A D | radeon_sa.c | 56 init_waitqueue_head(&sa_manager->wq); radeon_sa_bo_manager_init() 333 spin_lock(&sa_manager->wq.lock); radeon_sa_bo_new() 345 spin_unlock(&sa_manager->wq.lock); radeon_sa_bo_new() 355 spin_unlock(&sa_manager->wq.lock); radeon_sa_bo_new() 359 spin_lock(&sa_manager->wq.lock); radeon_sa_bo_new() 363 sa_manager->wq, radeon_sa_bo_new() 370 spin_unlock(&sa_manager->wq.lock); radeon_sa_bo_new() 386 spin_lock(&sa_manager->wq.lock); radeon_sa_bo_free() 394 wake_up_all_locked(&sa_manager->wq); radeon_sa_bo_free() 395 spin_unlock(&sa_manager->wq.lock); radeon_sa_bo_free() 405 spin_lock(&sa_manager->wq.lock); radeon_sa_bo_dump_debug_info() 422 spin_unlock(&sa_manager->wq.lock); radeon_sa_bo_dump_debug_info()
|
/linux-4.4.14/drivers/gpu/drm/amd/amdgpu/ |
H A D | amdgpu_sa.c | 56 init_waitqueue_head(&sa_manager->wq); amdgpu_sa_bo_manager_init() 336 spin_lock(&sa_manager->wq.lock); amdgpu_sa_bo_new() 348 spin_unlock(&sa_manager->wq.lock); amdgpu_sa_bo_new() 360 spin_unlock(&sa_manager->wq.lock); amdgpu_sa_bo_new() 367 spin_lock(&sa_manager->wq.lock); amdgpu_sa_bo_new() 371 sa_manager->wq, amdgpu_sa_bo_new() 378 spin_unlock(&sa_manager->wq.lock); amdgpu_sa_bo_new() 394 spin_lock(&sa_manager->wq.lock); amdgpu_sa_bo_free() 403 wake_up_all_locked(&sa_manager->wq); amdgpu_sa_bo_free() 404 spin_unlock(&sa_manager->wq.lock); amdgpu_sa_bo_free() 434 spin_lock(&sa_manager->wq.lock); amdgpu_sa_bo_dump_debug_info() 449 spin_unlock(&sa_manager->wq.lock); amdgpu_sa_bo_dump_debug_info()
|
/linux-4.4.14/drivers/power/ |
H A D | ipaq_micro_battery.c | 43 struct workqueue_struct *wq; member in struct:micro_battery 91 queue_delayed_work(mb->wq, &mb->update, msecs_to_jiffies(BATT_PERIOD)); micro_battery_work() 238 mb->wq = create_singlethread_workqueue("ipaq-battery-wq"); micro_batt_probe() 239 if (!mb->wq) micro_batt_probe() 244 queue_delayed_work(mb->wq, &mb->update, 1); micro_batt_probe() 267 destroy_workqueue(mb->wq); micro_batt_probe() 279 destroy_workqueue(mb->wq); micro_batt_remove() 296 queue_delayed_work(mb->wq, &mb->update, msecs_to_jiffies(BATT_PERIOD)); micro_batt_resume()
|
/linux-4.4.14/kernel/sched/ |
H A D | wait.c | 309 * add_wait_queue(&wq, &wait); 323 * remove_wait_queue(&wq, &wait); 387 __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q, __wait_on_bit() argument 393 prepare_to_wait(wq, &q->wait, mode); __wait_on_bit() 397 finish_wait(wq, &q->wait); __wait_on_bit() 405 wait_queue_head_t *wq = bit_waitqueue(word, bit); out_of_line_wait_on_bit() local 408 return __wait_on_bit(wq, &wait, action, mode); out_of_line_wait_on_bit() 416 wait_queue_head_t *wq = bit_waitqueue(word, bit); out_of_line_wait_on_bit_timeout() local 420 return __wait_on_bit(wq, &wait, action, mode); out_of_line_wait_on_bit_timeout() 425 __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q, __wait_on_bit_lock() argument 431 prepare_to_wait_exclusive(wq, &q->wait, mode); __wait_on_bit_lock() 437 abort_exclusive_wait(wq, &q->wait, mode, &q->key); __wait_on_bit_lock() 440 finish_wait(wq, &q->wait); __wait_on_bit_lock() 448 wait_queue_head_t *wq = bit_waitqueue(word, bit); out_of_line_wait_on_bit_lock() local 451 return __wait_on_bit_lock(wq, &wait, action, mode); out_of_line_wait_on_bit_lock() 455 void __wake_up_bit(wait_queue_head_t *wq, void *word, int bit) __wake_up_bit() argument 458 if (waitqueue_active(wq)) __wake_up_bit() 459 __wake_up(wq, TASK_NORMAL, 1, &key); __wake_up_bit() 531 int __wait_on_atomic_t(wait_queue_head_t *wq, struct wait_bit_queue *q, __wait_on_atomic_t() argument 538 prepare_to_wait(wq, &q->wait, mode); __wait_on_atomic_t() 544 finish_wait(wq, &q->wait); __wait_on_atomic_t() 562 wait_queue_head_t *wq = atomic_t_waitqueue(p); out_of_line_wait_on_atomic_t() local 565 return __wait_on_atomic_t(wq, &wait, action, mode); out_of_line_wait_on_atomic_t()
|
/linux-4.4.14/drivers/hid/ |
H A D | hid-elo.c | 35 static struct workqueue_struct *wq; variable in typeref:struct:workqueue_struct 177 queue_delayed_work(wq, &priv->work, ELO_PERIODIC_READ_INTERVAL); elo_work() 250 queue_delayed_work(wq, &priv->work, ELO_PERIODIC_READ_INTERVAL); elo_probe() 264 flush_workqueue(wq); elo_remove() 288 wq = create_singlethread_workqueue("elousb"); elo_driver_init() 289 if (!wq) elo_driver_init() 294 destroy_workqueue(wq); elo_driver_init() 303 destroy_workqueue(wq); elo_driver_exit()
|
/linux-4.4.14/drivers/infiniband/ulp/ipoib/ |
H A D | ipoib_verbs.c | 159 priv->wq = create_singlethread_workqueue("ipoib_wq"); ipoib_transport_dev_init() 160 if (!priv->wq) { ipoib_transport_dev_init() 248 destroy_workqueue(priv->wq); ipoib_transport_dev_init() 249 priv->wq = NULL; ipoib_transport_dev_init() 277 if (priv->wq) { ipoib_transport_dev_cleanup() 278 flush_workqueue(priv->wq); ipoib_transport_dev_cleanup() 279 destroy_workqueue(priv->wq); ipoib_transport_dev_cleanup() 280 priv->wq = NULL; ipoib_transport_dev_cleanup()
|
H A D | ipoib_multicast.c | 97 queue_delayed_work(priv->wq, &priv->mcast_task, 0); __ipoib_mcast_schedule_join_thread() 104 queue_delayed_work(priv->wq, &priv->mcast_task, HZ); __ipoib_mcast_schedule_join_thread() 106 queue_delayed_work(priv->wq, &priv->mcast_task, 0); __ipoib_mcast_schedule_join_thread() 383 * Defer carrier on work to priv->wq to avoid a ipoib_mcast_join_complete() 391 queue_work(priv->wq, &priv->carrier_on_task); ipoib_mcast_join_complete() 649 queue_delayed_work(priv->wq, &priv->mcast_task, ipoib_mcast_join_task() 685 flush_workqueue(priv->wq); ipoib_mcast_stop_thread()
|
H A D | ipoib_cm.c | 477 queue_delayed_work(priv->wq, ipoib_cm_req_handler() 579 queue_work(priv->wq, &priv->cm.rx_reap_task); ipoib_cm_handle_rx_wc() 606 queue_work(priv->wq, &priv->cm.rx_reap_task); ipoib_cm_handle_rx_wc() 824 queue_work(priv->wq, &priv->cm.reap_task); ipoib_cm_handle_tx_wc() 1254 queue_work(priv->wq, &priv->cm.reap_task); ipoib_cm_tx_handler() 1283 queue_work(priv->wq, &priv->cm.start_task); ipoib_cm_create_tx() 1294 queue_work(priv->wq, &priv->cm.reap_task); ipoib_cm_destroy_tx() 1416 queue_work(priv->wq, &priv->cm.skb_task); ipoib_cm_skb_too_long() 1449 queue_delayed_work(priv->wq, ipoib_cm_stale_task()
|
/linux-4.4.14/drivers/i2c/busses/ |
H A D | i2c-taos-evm.c | 38 static DECLARE_WAIT_QUEUE_HEAD(wq); 112 wait_event_interruptible_timeout(wq, taos->state == TAOS_STATE_IDLE, taos_smbus_xfer() 163 wake_up_interruptible(&wq); taos_interrupt() 168 wake_up_interruptible(&wq); taos_interrupt() 175 wake_up_interruptible(&wq); taos_interrupt() 228 wait_event_interruptible_timeout(wq, taos->state == TAOS_STATE_IDLE, taos_connect() 250 wait_event_interruptible_timeout(wq, taos->state == TAOS_STATE_IDLE, taos_connect()
|
H A D | i2c-ibm_iic.h | 48 wait_queue_head_t wq; member in struct:ibm_iic_private
|
/linux-4.4.14/drivers/infiniband/hw/mlx5/ |
H A D | cq.c | 103 static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx) get_umr_comp() argument 105 switch (wq->wr_data[idx]) { get_umr_comp() 122 struct mlx5_ib_wq *wq, int idx) handle_good_req() 161 wc->opcode = get_umr_comp(wq, idx); handle_good_req() 176 struct mlx5_ib_wq *wq; handle_responder() local 198 wq = &qp->rq; handle_responder() 199 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; handle_responder() 200 ++wq->tail; handle_responder() 418 struct mlx5_ib_wq *wq; mlx5_poll_one() local 474 wq = &(*cur_qp)->sq; mlx5_poll_one() 476 idx = wqe_ctr & (wq->wqe_cnt - 1); mlx5_poll_one() 477 handle_good_req(wc, cqe64, wq, idx); mlx5_poll_one() 478 handle_atomics(*cur_qp, cqe64, wq->last_poll, idx); mlx5_poll_one() 479 wc->wr_id = wq->wrid[idx]; mlx5_poll_one() 480 wq->tail = wq->wqe_head[idx] + 1; mlx5_poll_one() 502 wq = &(*cur_qp)->sq; mlx5_poll_one() 504 idx = wqe_ctr & (wq->wqe_cnt - 1); mlx5_poll_one() 505 wc->wr_id = wq->wrid[idx]; mlx5_poll_one() 506 wq->tail = wq->wqe_head[idx] + 1; mlx5_poll_one() 516 wq = &(*cur_qp)->rq; mlx5_poll_one() 517 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; mlx5_poll_one() 518 ++wq->tail; mlx5_poll_one() 121 handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, struct mlx5_ib_wq *wq, int idx) handle_good_req() argument
|
H A D | mr.c | 372 queue_delayed_work(cache->wq, &ent->dwork, __cache_work_func() 377 queue_delayed_work(cache->wq, &ent->dwork, __cache_work_func() 380 queue_work(cache->wq, &ent->work); __cache_work_func() 400 queue_work(cache->wq, &ent->work); __cache_work_func() 402 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); __cache_work_func() 450 queue_work(cache->wq, &ent->work); alloc_cached_mr() 455 queue_work(cache->wq, &ent->work); alloc_cached_mr() 485 queue_work(cache->wq, &ent->work); free_cached_mr() 582 cache->wq = create_singlethread_workqueue("mkey_cache"); mlx5_mr_cache_init() 583 if (!cache->wq) { mlx5_mr_cache_init() 607 queue_work(cache->wq, &ent->work); mlx5_mr_cache_init() 622 flush_workqueue(dev->cache.wq); mlx5_mr_cache_cleanup() 629 destroy_workqueue(dev->cache.wq); mlx5_mr_cache_cleanup()
|
H A D | qp.c | 121 struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq; mlx5_ib_read_user_wqe() local 129 if (wq->wqe_cnt == 0) { mlx5_ib_read_user_wqe() 135 offset = wq->offset + ((wqe_index % wq->wqe_cnt) << wq->wqe_shift); mlx5_ib_read_user_wqe() 136 wq_end = wq->offset + (wq->wqe_cnt << wq->wqe_shift); mlx5_ib_read_user_wqe() 156 wqe_length = 1 << wq->wqe_shift; mlx5_ib_read_user_wqe() 162 ret = ib_umem_copy_from(buffer + first_copy_length, umem, wq->offset, mlx5_ib_read_user_wqe() 1813 static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq) mlx5_wq_overflow() argument 1818 cur = wq->head - wq->tail; mlx5_wq_overflow() 1819 if (likely(cur + nreq < wq->max_post)) mlx5_wq_overflow() 1824 cur = wq->head - wq->tail; mlx5_wq_overflow() 1827 return cur + nreq >= wq->max_post; mlx5_wq_overflow()
|
/linux-4.4.14/drivers/nfc/ |
H A D | nfcsim.c | 63 static struct workqueue_struct *wq; variable in typeref:struct:workqueue_struct 211 queue_delayed_work(wq, &dev->poll_work, 0); nfcsim_start_poll() 326 queue_delayed_work(wq, &peer->recv_work, msecs_to_jiffies(5)); nfcsim_tx() 427 * Because the wq is ordered and only 1 work item is executed at a time, nfcsim_wq_poll() 431 queue_delayed_work(wq, &dev->poll_work, msecs_to_jiffies(200)); nfcsim_wq_poll() 488 /* We need an ordered wq to ensure that poll_work items are executed nfcsim_init() 491 wq = alloc_ordered_workqueue("nfcsim", 0); nfcsim_init() 492 if (!wq) { nfcsim_init() 533 destroy_workqueue(wq); nfcsim_exit()
|
H A D | pn533.c | 357 struct workqueue_struct *wq; member in struct:pn533 749 queue_work(dev->wq, &dev->cmd_complete_work); pn533_recv_response() 803 queue_work(dev->wq, &dev->cmd_complete_work); pn533_recv_ack() 1066 queue_work(dev->wq, &dev->cmd_work); pn533_wq_cmd_complete() 1651 queue_work(dev->wq, &dev->mi_tm_rx_work); pn533_tm_get_data_complete() 1733 queue_work(dev->wq, &dev->cmd_work); pn533_wq_tm_mi_send() 1794 queue_work(dev->wq, &dev->tg_work); pn533_init_target_complete() 1809 queue_delayed_work(dev->wq, &dev->poll_work, pn533_listen_mode_timer() 1828 queue_delayed_work(dev->wq, &dev->poll_work, pn533_rf_complete() 1876 queue_work(dev->wq, &dev->rf_work); pn533_poll_dep_complete() 1927 queue_work(dev->wq, &dev->rf_work); pn533_poll_dep() 2017 queue_work(dev->wq, &dev->rf_work); pn533_poll_complete() 2544 queue_work(dev->wq, &dev->mi_rx_work); pn533_data_exchange_complete() 2551 queue_work(dev->wq, &dev->mi_tx_work); pn533_data_exchange_complete() 2706 queue_work(dev->wq, &dev->mi_tm_tx_work); pn533_tm_send_complete() 2719 queue_work(dev->wq, &dev->tg_work); pn533_tm_send_complete() 2807 queue_work(dev->wq, &dev->cmd_work); pn533_wq_mi_recv() 2862 queue_work(dev->wq, &dev->cmd_work); pn533_wq_mi_send() 3170 dev->wq = alloc_ordered_workqueue("pn533", 0); pn533_probe() 3171 if (dev->wq == NULL) pn533_probe() 3257 destroy_workqueue(dev->wq); pn533_probe() 3281 destroy_workqueue(dev->wq); pn533_disconnect()
|
/linux-4.4.14/fs/ |
H A D | userfaultfd.c | 64 wait_queue_t wq; member in struct:userfaultfd_wait_queue 73 static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode, userfaultfd_wake_function() argument 81 uwq = container_of(wq, struct userfaultfd_wait_queue, wq); userfaultfd_wake_function() 89 ret = wake_up_state(wq->private, mode); userfaultfd_wake_function() 99 * wq->private is read before calling the extern userfaultfd_wake_function() 105 list_del_init(&wq->task_list); userfaultfd_wake_function() 333 init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function); handle_userfault() 334 uwq.wq.private = current; handle_userfault() 346 __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq); handle_userfault() 406 if (!list_empty_careful(&uwq.wq.task_list)) { handle_userfault() 412 list_del(&uwq.wq.task_list); handle_userfault() 489 wait_queue_t *wq; find_userfault() local 498 wq = list_last_entry(&ctx->fault_pending_wqh.task_list, find_userfault() 499 typeof(*wq), task_list); find_userfault() 500 uwq = container_of(wq, struct userfaultfd_wait_queue, wq); find_userfault() 580 * handle_userfault(). The uwq->wq.task_list userfaultfd_ctx_read() 587 list_del(&uwq->wq.task_list); userfaultfd_ctx_read() 588 __add_wait_queue(&ctx->fault_wqh, &uwq->wq); userfaultfd_ctx_read() 1203 wait_queue_t *wq; userfaultfd_show_fdinfo() local 1208 list_for_each_entry(wq, &ctx->fault_pending_wqh.task_list, task_list) { userfaultfd_show_fdinfo() 1209 uwq = container_of(wq, struct userfaultfd_wait_queue, wq); userfaultfd_show_fdinfo() 1213 list_for_each_entry(wq, &ctx->fault_wqh.task_list, task_list) { userfaultfd_show_fdinfo() 1214 uwq = container_of(wq, struct userfaultfd_wait_queue, wq); userfaultfd_show_fdinfo()
|
/linux-4.4.14/drivers/staging/lustre/lustre/include/ |
H A D | lustre_lib.h | 447 * l_wait_event(&obj->wq, ....); (1) 449 * wake_up(&obj->wq): (2) 528 #define __l_wait_event(wq, condition, info, ret, l_add_wait) \ 540 l_add_wait(&wq, &__wait); \ 601 remove_wait_queue(&wq, &__wait); \ 604 #define l_wait_event(wq, condition, info) \ 609 __l_wait_event(wq, condition, __info, \ 614 #define l_wait_event_exclusive(wq, condition, info) \ 619 __l_wait_event(wq, condition, __info, \ 624 #define l_wait_event_exclusive_head(wq, condition, info) \ 629 __l_wait_event(wq, condition, __info, \ 634 #define l_wait_condition(wq, condition) \ 637 l_wait_event(wq, condition, &lwi); \ 640 #define l_wait_condition_exclusive(wq, condition) \ 643 l_wait_event_exclusive(wq, condition, &lwi); \ 646 #define l_wait_condition_exclusive_head(wq, condition) \ 649 l_wait_event_exclusive_head(wq, condition, &lwi); \
|
/linux-4.4.14/drivers/thunderbolt/ |
H A D | tb.c | 214 * Executes on tb->wq. 296 queue_work(tb->wq, &ev->work); tb_schedule_hotplug_handler() 332 if (tb->wq) { thunderbolt_shutdown_and_free() 333 flush_workqueue(tb->wq); thunderbolt_shutdown_and_free() 334 destroy_workqueue(tb->wq); thunderbolt_shutdown_and_free() 335 tb->wq = NULL; thunderbolt_shutdown_and_free() 366 tb->wq = alloc_ordered_workqueue("thunderbolt", 0); thunderbolt_alloc_and_start() 367 if (!tb->wq) thunderbolt_alloc_and_start()
|
H A D | tb.h | 106 struct workqueue_struct *wq; /* ordered workqueue for plug events */ member in struct:tb 113 * wq after cfg has been paused.
|
/linux-4.4.14/drivers/iio/adc/ |
H A D | berlin2-adc.c | 75 wait_queue_head_t wq; member in struct:berlin2_adc_priv 126 ret = wait_event_interruptible_timeout(priv->wq, priv->data_available, berlin2_adc_read() 177 ret = wait_event_interruptible_timeout(priv->wq, priv->data_available, berlin2_adc_tsen_read() 253 wake_up_interruptible(&priv->wq); berlin2_adc_irq() 273 wake_up_interruptible(&priv->wq); berlin2_adc_tsen_irq() 322 init_waitqueue_head(&priv->wq); berlin2_adc_probe()
|
/linux-4.4.14/include/trace/events/ |
H A D | btrfs.h | 997 __field( void *, wq ) 1006 __entry->wq = work->wq; 1013 TP_printk("work=%p (normal_work=%p), wq=%p, func=%pf, ordered_func=%p," 1015 __entry->work, __entry->normal_work, __entry->wq, 1067 TP_PROTO(struct __btrfs_workqueue *wq, const char *name, int high), 1069 TP_ARGS(wq, name, high), 1072 __field( void *, wq ) 1078 __entry->wq = wq; 1083 TP_printk("name=%s%s, wq=%p", __get_str(name), 1086 __entry->wq) 1091 TP_PROTO(struct __btrfs_workqueue *wq, const char *name, int high), 1093 TP_ARGS(wq, name, high) 1098 TP_PROTO(struct __btrfs_workqueue *wq), 1100 TP_ARGS(wq), 1103 __field( void *, wq ) 1107 __entry->wq = wq; 1110 TP_printk("wq=%p", __entry->wq) 1115 TP_PROTO(struct __btrfs_workqueue *wq), 1117 TP_ARGS(wq)
|
H A D | workqueue.h | 55 __entry->workqueue = pwq->wq;
|
/linux-4.4.14/drivers/staging/most/aim-cdev/ |
H A D | cdev.c | 34 wait_queue_head_t wq; member in struct:aim_channel 131 wake_up_interruptible(&channel->wq); aim_close() 143 wake_up_interruptible(&channel->wq); aim_close() 177 channel->wq, aim_write() 238 if (wait_event_interruptible(channel->wq, aim_read() 285 poll_wait(filp, &c->wq, wait); aim_poll() 345 wake_up_interruptible(&channel->wq); aim_disconnect_channel() 373 wake_up_interruptible(&channel->wq); aim_rx_completion() 400 wake_up_interruptible(&channel->wq); aim_tx_completion() 460 init_waitqueue_head(&channel->wq); aim_probe()
|
/linux-4.4.14/drivers/infiniband/hw/mlx4/ |
H A D | cq.c | 610 struct mlx4_ib_wq *wq; mlx4_ib_qp_sw_comp() local 614 wq = is_send ? &qp->sq : &qp->rq; mlx4_ib_qp_sw_comp() 615 cur = wq->head - wq->tail; mlx4_ib_qp_sw_comp() 621 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; mlx4_ib_qp_sw_comp() 624 wq->tail++; mlx4_ib_qp_sw_comp() 662 struct mlx4_ib_wq *wq; mlx4_ib_poll_one() local 749 wq = &(*cur_qp)->sq; mlx4_ib_poll_one() 752 wq->tail += (u16) (wqe_ctr - (u16) wq->tail); mlx4_ib_poll_one() 754 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; mlx4_ib_poll_one() 755 ++wq->tail; mlx4_ib_poll_one() 767 wq = &(*cur_qp)->rq; mlx4_ib_poll_one() 768 tail = wq->tail & (wq->wqe_cnt - 1); mlx4_ib_poll_one() 769 wc->wr_id = wq->wrid[tail]; mlx4_ib_poll_one() 770 ++wq->tail; mlx4_ib_poll_one()
|
H A D | alias_GUID.c | 438 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq, aliasguid_query_handler() 570 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq, set_guid_rec() 632 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq, mlx4_ib_invalidate_all_guid_record() 791 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq, mlx4_ib_init_alias_guid_work() 827 flush_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); mlx4_ib_destroy_alias_guid_service() 828 destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); mlx4_ib_destroy_alias_guid_service() 883 dev->sriov.alias_guid.ports_guid[i].wq = mlx4_ib_init_alias_guid_service() 885 if (!dev->sriov.alias_guid.ports_guid[i].wq) { mlx4_ib_init_alias_guid_service() 896 destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); mlx4_ib_init_alias_guid_service() 897 dev->sriov.alias_guid.ports_guid[i].wq = NULL; mlx4_ib_init_alias_guid_service()
|
H A D | mad.c | 1131 queue_work(ctx->wq, &ctx->work); mlx4_ib_tunnel_comp_handler() 1868 ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq; create_pv_resources() 1879 ctx->wq = NULL; create_pv_resources() 1916 flush_workqueue(ctx->wq); destroy_pv_resources() 2013 ctx->wq = create_singlethread_workqueue(name); mlx4_ib_alloc_demux_ctx() 2014 if (!ctx->wq) { mlx4_ib_alloc_demux_ctx() 2031 destroy_workqueue(ctx->wq); mlx4_ib_alloc_demux_ctx() 2032 ctx->wq = NULL; mlx4_ib_alloc_demux_ctx() 2048 flush_workqueue(sqp_ctx->wq); mlx4_ib_free_sqp_ctx() 2077 flush_workqueue(ctx->wq); mlx4_ib_free_demux_ctx() 2084 destroy_workqueue(ctx->wq); mlx4_ib_free_demux_ctx()
|
/linux-4.4.14/drivers/mtd/chips/ |
H A D | cfi_cmdset_0020.c | 159 init_waitqueue_head(&(cfi->chips[i].wq)); cfi_cmdset_0020() 297 wake_up(&chip->wq); do_read_onechip() 352 add_wait_queue(&chip->wq, &wait); do_read_onechip() 355 remove_wait_queue(&chip->wq, &wait); do_read_onechip() 377 wake_up(&chip->wq); do_read_onechip() 485 add_wait_queue(&chip->wq, &wait); do_write_buffer() 488 remove_wait_queue(&chip->wq, &wait); do_write_buffer() 542 add_wait_queue(&chip->wq, &wait); do_write_buffer() 545 remove_wait_queue(&chip->wq, &wait); do_write_buffer() 595 wake_up(&chip->wq); do_write_buffer() 599 wake_up(&chip->wq); do_write_buffer() 778 add_wait_queue(&chip->wq, &wait); do_erase_oneblock() 781 remove_wait_queue(&chip->wq, &wait); do_erase_oneblock() 807 add_wait_queue(&chip->wq, &wait); do_erase_oneblock() 810 remove_wait_queue(&chip->wq, &wait); do_erase_oneblock() 884 wake_up(&chip->wq); do_erase_oneblock() 1007 add_wait_queue(&chip->wq, &wait); cfi_staa_sync() 1011 remove_wait_queue(&chip->wq, &wait); cfi_staa_sync() 1026 wake_up(&chip->wq); cfi_staa_sync() 1077 add_wait_queue(&chip->wq, &wait); do_lock_oneblock() 1080 remove_wait_queue(&chip->wq, &wait); do_lock_oneblock() 1123 wake_up(&chip->wq); do_lock_oneblock() 1223 add_wait_queue(&chip->wq, &wait); do_unlock_oneblock() 1226 remove_wait_queue(&chip->wq, &wait); do_unlock_oneblock() 1269 wake_up(&chip->wq); do_unlock_oneblock() 1359 wake_up(&chip->wq); cfi_staa_suspend() 1385 wake_up(&chip->wq); cfi_staa_resume()
|
H A D | cfi_cmdset_0002.c | 666 init_waitqueue_head(&(cfi->chips[i].wq)); cfi_cmdset_0002() 872 add_wait_queue(&chip->wq, &wait); get_chip() 875 remove_wait_queue(&chip->wq, &wait); get_chip() 907 wake_up(&chip->wq); put_chip() 1022 add_wait_queue(&chip->wq, &wait); xip_udelay() 1025 remove_wait_queue(&chip->wq, &wait); xip_udelay() 1222 add_wait_queue(&chip->wq, &wait); do_read_secsi_onechip() 1227 remove_wait_queue(&chip->wq, &wait); do_read_secsi_onechip() 1241 wake_up(&chip->wq); do_read_secsi_onechip() 1619 add_wait_queue(&chip->wq, &wait); do_write_oneword() 1622 remove_wait_queue(&chip->wq, &wait); do_write_oneword() 1691 add_wait_queue(&cfi->chips[chipnum].wq, &wait); cfi_amdstd_write_words() 1696 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); cfi_amdstd_write_words() 1762 add_wait_queue(&cfi->chips[chipnum].wq, &wait); cfi_amdstd_write_words() 1767 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); cfi_amdstd_write_words() 1868 add_wait_queue(&chip->wq, &wait); do_write_buffer() 1871 remove_wait_queue(&chip->wq, &wait); do_write_buffer() 2279 add_wait_queue(&chip->wq, &wait); do_erase_chip() 2282 remove_wait_queue(&chip->wq, &wait); do_erase_chip() 2368 add_wait_queue(&chip->wq, &wait); do_erase_oneblock() 2371 remove_wait_queue(&chip->wq, &wait); do_erase_oneblock() 2760 add_wait_queue(&chip->wq, &wait); cfi_amdstd_sync() 2766 remove_wait_queue(&chip->wq, &wait); cfi_amdstd_sync() 2781 wake_up(&chip->wq); cfi_amdstd_sync() 2832 wake_up(&chip->wq); cfi_amdstd_suspend() 2858 wake_up(&chip->wq); cfi_amdstd_resume()
|
H A D | cfi_cmdset_0001.c | 573 init_waitqueue_head(&(cfi->chips[i].wq)); cfi_cmdset_0001() 762 init_waitqueue_head(&chip->wq); cfi_intelext_partition_fixup() 883 add_wait_queue(&chip->wq, &wait); chip_ready() 886 remove_wait_queue(&chip->wq, &wait); chip_ready() 967 add_wait_queue(&chip->wq, &wait); get_chip() 970 remove_wait_queue(&chip->wq, &wait); get_chip() 1007 wake_up(&chip->wq); put_chip() 1021 wake_up(&chip->wq); put_chip() 1056 wake_up(&chip->wq); put_chip() 1187 add_wait_queue(&chip->wq, &wait); xip_wait_for_operation() 1190 remove_wait_queue(&chip->wq, &wait); xip_wait_for_operation() 1263 add_wait_queue(&chip->wq, &wait); inval_cache_and_wait_for_operation() 1266 remove_wait_queue(&chip->wq, &wait); inval_cache_and_wait_for_operation() 2041 wake_up(&chip->wq); cfi_intelext_sync() 2548 wake_up(&chip->wq); cfi_intelext_suspend() 2597 wake_up(&chip->wq); cfi_intelext_resume()
|
/linux-4.4.14/drivers/staging/android/ |
H A D | sync.c | 169 init_waitqueue_head(&fence->wq); sync_fence_alloc() 187 wake_up_all(&fence->wq); fence_check_cb_func() 342 spin_lock_irqsave(&fence->wq.lock, flags); sync_fence_wait_async() 345 __add_wait_queue_tail(&fence->wq, &waiter->work); sync_fence_wait_async() 346 spin_unlock_irqrestore(&fence->wq.lock, flags); sync_fence_wait_async() 361 spin_lock_irqsave(&fence->wq.lock, flags); sync_fence_cancel_async() 366 spin_unlock_irqrestore(&fence->wq.lock, flags); sync_fence_cancel_async() 384 ret = wait_event_interruptible_timeout(fence->wq, sync_fence_wait() 549 poll_wait(file, &fence->wq, wait); sync_fence_poll()
|
H A D | sync_debug.c | 163 spin_lock_irqsave(&fence->wq.lock, flags); sync_print_fence() 164 list_for_each_entry(pos, &fence->wq.task_list, task_list) { sync_print_fence() 174 spin_unlock_irqrestore(&fence->wq.lock, flags); sync_print_fence()
|
/linux-4.4.14/drivers/media/pci/ddbridge/ |
H A D | ddbridge.h | 85 wait_queue_head_t wq; member in struct:ddb_input 113 wait_queue_head_t wq; member in struct:ddb_output 132 wait_queue_head_t wq; member in struct:ddb_i2c
|
H A D | ddbridge-core.c | 89 stat = wait_event_timeout(i2c->wq, i2c->done == 1, HZ); ddb_i2c_cmd() 186 init_waitqueue_head(&i2c->wq); ddb_i2c_init() 911 output->wq, ddb_output_free(output) >= 188) < 0) ts_write() 938 input->wq, ddb_input_avail(input) >= 188) < 0) ts_read() 1019 wake_up(&input->wq); input_tasklet() 1034 wake_up(&output->wq); output_tasklet() 1216 init_waitqueue_head(&input->wq); ddb_input_init() 1232 init_waitqueue_head(&output->wq); ddb_output_init() 1280 wake_up(&i2c->wq); irq_handle_i2c()
|
/linux-4.4.14/drivers/gpu/drm/ |
H A D | drm_flip_work.c | 91 * @wq: the work-queue to run the queued work on 99 struct workqueue_struct *wq) drm_flip_work_commit() 107 queue_work(wq, &work->worker); drm_flip_work_commit() 98 drm_flip_work_commit(struct drm_flip_work *work, struct workqueue_struct *wq) drm_flip_work_commit() argument
|
/linux-4.4.14/fs/jffs2/ |
H A D | os-linux.h | 40 #define sleep_on_spinunlock(wq, s) \ 43 add_wait_queue((wq), &__wait); \ 47 remove_wait_queue((wq), &__wait); \
|
/linux-4.4.14/drivers/usb/misc/ |
H A D | appledisplay.c | 88 static struct workqueue_struct *wq; variable in typeref:struct:workqueue_struct 125 queue_delayed_work(wq, &pdata->work, 0); appledisplay_complete() 368 wq = create_singlethread_workqueue("appledisplay"); appledisplay_init() 369 if (!wq) { appledisplay_init() 379 flush_workqueue(wq); appledisplay_exit() 380 destroy_workqueue(wq); appledisplay_exit()
|
/linux-4.4.14/drivers/gpu/host1x/ |
H A D | cdma.h | 53 struct delayed_work wq; /* work queue */ member in struct:buffer_timeout 78 struct buffer_timeout timeout; /* channel's timeout state/wq */
|
H A D | intr.c | 124 wait_queue_head_t *wq = waiter->data; action_wakeup() local 125 wake_up(wq); action_wakeup() 130 wait_queue_head_t *wq = waiter->data; action_wakeup_interruptible() local 131 wake_up_interruptible(wq); action_wakeup_interruptible()
|
H A D | syncpt.c | 191 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); host1x_syncpt_wait() 230 &wq, waiter, &ref); host1x_syncpt_wait() 242 int remain = wait_event_interruptible_timeout(wq, host1x_syncpt_wait()
|
/linux-4.4.14/drivers/media/pci/netup_unidvb/ |
H A D | netup_unidvb.h | 82 wait_queue_head_t wq; member in struct:netup_i2c 118 struct workqueue_struct *wq; member in struct:netup_unidvb_dev
|
H A D | netup_unidvb_i2c.c | 124 wake_up(&i2c->wq); netup_i2c_interrupt() 236 if (wait_event_timeout(i2c->wq, netup_i2c_xfer() 322 init_waitqueue_head(&i2c->wq); netup_i2c_init()
|
/linux-4.4.14/crypto/ |
H A D | algif_aead.c | 102 struct socket_wq *wq; aead_wmem_wakeup() local 108 wq = rcu_dereference(sk->sk_wq); aead_wmem_wakeup() 109 if (wq_has_sleeper(wq)) aead_wmem_wakeup() 110 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | aead_wmem_wakeup() 151 struct socket_wq *wq; aead_data_wakeup() local 159 wq = rcu_dereference(sk->sk_wq); aead_data_wakeup() 160 if (wq_has_sleeper(wq)) aead_data_wakeup() 161 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | aead_data_wakeup()
|
H A D | pcrypt.c | 35 struct workqueue_struct *wq; member in struct:padata_pcrypt 410 pcrypt->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, pcrypt_init_padata() 412 if (!pcrypt->wq) pcrypt_init_padata() 415 pcrypt->pinst = padata_alloc_possible(pcrypt->wq); pcrypt_init_padata() 451 destroy_workqueue(pcrypt->wq); pcrypt_init_padata() 465 destroy_workqueue(pcrypt->wq); pcrypt_fini_padata()
|
H A D | algif_skcipher.c | 228 struct socket_wq *wq; skcipher_wmem_wakeup() local 234 wq = rcu_dereference(sk->sk_wq); skcipher_wmem_wakeup() 235 if (wq_has_sleeper(wq)) skcipher_wmem_wakeup() 236 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | skcipher_wmem_wakeup() 278 struct socket_wq *wq; skcipher_data_wakeup() local 284 wq = rcu_dereference(sk->sk_wq); skcipher_data_wakeup() 285 if (wq_has_sleeper(wq)) skcipher_data_wakeup() 286 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | skcipher_data_wakeup()
|
/linux-4.4.14/net/sunrpc/ |
H A D | svcsock.c | 402 static bool sunrpc_waitqueue_active(wait_queue_head_t *wq) sunrpc_waitqueue_active() argument 404 if (!wq) sunrpc_waitqueue_active() 424 return waitqueue_active(wq); sunrpc_waitqueue_active() 433 wait_queue_head_t *wq = sk_sleep(sk); svc_udp_data_ready() local 442 if (sunrpc_waitqueue_active(wq)) svc_udp_data_ready() 443 wake_up_interruptible(wq); svc_udp_data_ready() 452 wait_queue_head_t *wq = sk_sleep(sk); svc_write_space() local 460 if (sunrpc_waitqueue_active(wq)) { svc_write_space() 463 wake_up_interruptible(wq); svc_write_space() 791 wait_queue_head_t *wq; svc_tcp_listen_data_ready() local 814 wq = sk_sleep(sk); svc_tcp_listen_data_ready() 815 if (sunrpc_waitqueue_active(wq)) svc_tcp_listen_data_ready() 816 wake_up_interruptible_all(wq); svc_tcp_listen_data_ready() 825 wait_queue_head_t *wq = sk_sleep(sk); svc_tcp_state_change() local 836 if (sunrpc_waitqueue_active(wq)) svc_tcp_state_change() 837 wake_up_interruptible_all(wq); svc_tcp_state_change() 843 wait_queue_head_t *wq = sk_sleep(sk); svc_tcp_data_ready() local 851 if (sunrpc_waitqueue_active(wq)) svc_tcp_data_ready() 852 wake_up_interruptible(wq); svc_tcp_data_ready() 1611 wait_queue_head_t *wq; svc_sock_detach() local 1620 wq = sk_sleep(sk); svc_sock_detach() 1621 if (sunrpc_waitqueue_active(wq)) svc_sock_detach() 1622 wake_up_interruptible(wq); svc_sock_detach()
|
H A D | sched.c | 289 wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE); rpc_complete_task() local 296 spin_lock_irqsave(&wq->lock, flags); rpc_complete_task() 299 if (waitqueue_active(wq)) rpc_complete_task() 300 __wake_up_locked_key(wq, TASK_NORMAL, &k); rpc_complete_task() 301 spin_unlock_irqrestore(&wq->lock, flags); rpc_complete_task() 309 * to enforce taking of the wq->lock and hence avoid races with 1066 struct workqueue_struct *wq; rpciod_start() local 1073 wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); rpciod_start() 1074 rpciod_workqueue = wq; rpciod_start() 1080 struct workqueue_struct *wq = NULL; rpciod_stop() local 1086 wq = rpciod_workqueue; rpciod_stop() 1088 destroy_workqueue(wq); rpciod_stop()
|
/linux-4.4.14/drivers/staging/rtl8192e/rtl8192e/ |
H A D | rtl_ps.c | 66 queue_delayed_work_rsl(priv->rtllib->wq, rtl92e_hw_wakeup() 114 queue_delayed_work_rsl(priv->rtllib->wq, rtl92e_enter_sleep() 116 queue_delayed_work_rsl(priv->rtllib->wq, rtl92e_enter_sleep() 206 queue_work_rsl(priv->rtllib->wq, rtl92e_rtllib_ips_leave_wq()
|
/linux-4.4.14/drivers/sbus/char/ |
H A D | bbc_i2c.h | 61 wait_queue_head_t wq; member in struct:bbc_i2c_bus
|
H A D | bbc_i2c.c | 129 add_wait_queue(&bp->wq, &wait); wait_for_pin() 134 bp->wq, wait_for_pin() 143 remove_wait_queue(&bp->wq, &wait); wait_for_pin() 279 wake_up_interruptible(&bp->wq); bbc_i2c_interrupt() 317 init_waitqueue_head(&bp->wq); attach_one_i2c()
|
/linux-4.4.14/drivers/char/tpm/ |
H A D | tpm_ibmvtpm.h | 45 wait_queue_head_t wq; member in struct:ibmvtpm_dev
|
H A D | tpm_ibmvtpm.c | 93 sig = wait_event_interruptible(ibmvtpm->wq, ibmvtpm->res_len != 0); tpm_ibmvtpm_recv() 518 wake_up_interruptible(&ibmvtpm->wq); ibmvtpm_crq_process() 624 init_waitqueue_head(&ibmvtpm->wq); tpm_ibmvtpm_probe()
|
/linux-4.4.14/fs/logfs/ |
H A D | dev_bdev.c | 54 static DECLARE_WAIT_QUEUE_HEAD(wq); 71 wake_up(&wq); 163 wake_up(&wq); erase_end_io() 242 wait_event(wq, atomic_read(&super->s_pending_writes) == 0); bdev_sync()
|
/linux-4.4.14/fs/nfs/blocklayout/ |
H A D | rpc_pipefs.c | 62 DECLARE_WAITQUEUE(wq, current); bl_resolve_deviceid() 87 add_wait_queue(&nn->bl_wq, &wq); bl_resolve_deviceid() 90 remove_wait_queue(&nn->bl_wq, &wq); bl_resolve_deviceid() 96 remove_wait_queue(&nn->bl_wq, &wq); bl_resolve_deviceid()
|
/linux-4.4.14/drivers/mtd/lpddr/ |
H A D | lpddr_cmds.c | 101 init_waitqueue_head(&chip->wq); lpddr_cmdset() 159 add_wait_queue(&chip->wq, &wait); wait_for_ready() 162 remove_wait_queue(&chip->wq, &wait); wait_for_ready() 258 add_wait_queue(&chip->wq, &wait); get_chip() 261 remove_wait_queue(&chip->wq, &wait); get_chip() 325 add_wait_queue(&chip->wq, &wait); chip_ready() 328 remove_wait_queue(&chip->wq, &wait); chip_ready() 351 wake_up(&chip->wq); put_chip() 365 wake_up(&chip->wq); put_chip() 386 wake_up(&chip->wq); put_chip()
|
/linux-4.4.14/drivers/target/tcm_fc/ |
H A D | tfc_conf.c | 236 struct workqueue_struct *wq; ft_add_tpg() local 267 wq = alloc_workqueue("tcm_fc", 0, 1); ft_add_tpg() 268 if (!wq) { ft_add_tpg() 275 destroy_workqueue(wq); ft_add_tpg() 279 tpg->workqueue = wq; ft_add_tpg()
|
/linux-4.4.14/drivers/net/wireless/cw1200/ |
H A D | cw1200_spi.c | 43 wait_queue_head_t wq; member in struct:hwbus_priv 205 add_wait_queue(&self->wq, &wait); cw1200_spi_lock() 218 remove_wait_queue(&self->wq, &wait); cw1200_spi_lock() 230 wake_up(&self->wq); cw1200_spi_unlock() 413 init_waitqueue_head(&self->wq); cw1200_spi_probe()
|
/linux-4.4.14/drivers/infiniband/hw/mthca/ |
H A D | mthca_cq.c | 489 struct mthca_wq *wq; mthca_poll_one() local 539 wq = &(*cur_qp)->sq; mthca_poll_one() 541 >> wq->wqe_shift); mthca_poll_one() 547 wq = NULL; mthca_poll_one() 553 wq = &(*cur_qp)->rq; mthca_poll_one() 555 wqe_index = wqe >> wq->wqe_shift; mthca_poll_one() 562 wqe_index = wq->max - 1; mthca_poll_one() 566 if (wq) { mthca_poll_one() 567 if (wq->last_comp < wqe_index) mthca_poll_one() 568 wq->tail += wqe_index - wq->last_comp; mthca_poll_one() 570 wq->tail += wqe_index + wq->max - wq->last_comp; mthca_poll_one() 572 wq->last_comp = wqe_index; mthca_poll_one()
|
H A D | mthca_qp.c | 229 static void mthca_wq_reset(struct mthca_wq *wq) mthca_wq_reset() argument 231 wq->next_ind = 0; mthca_wq_reset() 232 wq->last_comp = wq->max - 1; mthca_wq_reset() 233 wq->head = 0; mthca_wq_reset() 234 wq->tail = 0; mthca_wq_reset() 1545 static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, mthca_wq_overflow() argument 1551 cur = wq->head - wq->tail; mthca_wq_overflow() 1552 if (likely(cur + nreq < wq->max)) mthca_wq_overflow() 1557 cur = wq->head - wq->tail; mthca_wq_overflow() 1560 return cur + nreq >= wq->max; mthca_wq_overflow()
|
/linux-4.4.14/drivers/net/ethernet/qlogic/qlcnic/ |
H A D | qlcnic_dcb.c | 289 if (dcb->wq) { __qlcnic_dcb_free() 290 destroy_workqueue(dcb->wq); __qlcnic_dcb_free() 291 dcb->wq = NULL; __qlcnic_dcb_free() 314 dcb->wq = create_singlethread_workqueue("qlcnic-dcb"); __qlcnic_dcb_attach() 315 if (!dcb->wq) { __qlcnic_dcb_attach() 339 destroy_workqueue(dcb->wq); __qlcnic_dcb_attach() 340 dcb->wq = NULL; __qlcnic_dcb_attach() 539 queue_delayed_work(dcb->wq, &dcb->aen_work, 0); qlcnic_82xx_dcb_aen_handler() 653 queue_delayed_work(dcb->wq, &dcb->aen_work, 0); qlcnic_83xx_dcb_aen_handler()
|
H A D | qlcnic_dcb.h | 39 struct workqueue_struct *wq; member in struct:qlcnic_dcb
|
/linux-4.4.14/drivers/gpu/drm/tilcdc/ |
H A D | tilcdc_drv.c | 135 flush_workqueue(priv->wq); tilcdc_unload() 136 destroy_workqueue(priv->wq); tilcdc_unload() 168 priv->wq = alloc_ordered_workqueue("tilcdc", 0); tilcdc_load() 169 if (!priv->wq) { tilcdc_load() 343 flush_workqueue(priv->wq); tilcdc_load() 344 destroy_workqueue(priv->wq); tilcdc_load()
|
H A D | tilcdc_drv.h | 77 struct workqueue_struct *wq; member in struct:tilcdc_drm_private
|
/linux-4.4.14/drivers/media/i2c/ |
H A D | saa7110.c | 63 wait_queue_head_t wq; member in struct:saa7110 199 prepare_to_wait(&decoder->wq, &wait, TASK_UNINTERRUPTIBLE); determine_norm() 201 finish_wait(&decoder->wq, &wait); determine_norm() 234 prepare_to_wait(&decoder->wq, &wait, TASK_UNINTERRUPTIBLE); determine_norm() 236 finish_wait(&decoder->wq, &wait); determine_norm() 415 init_waitqueue_head(&decoder->wq); saa7110_probe()
|
H A D | msp3400-driver.h | 102 wait_queue_head_t wq; member in struct:msp_state
|
H A D | msp3400-driver.c | 323 wake_up_interruptible(&state->wq); msp_wake_thread() 330 add_wait_queue(&state->wq, &wait); msp_sleep() 341 remove_wait_queue(&state->wq, &wait); msp_sleep() 712 init_waitqueue_head(&state->wq); msp_probe()
|
/linux-4.4.14/drivers/scsi/libsas/ |
H A D | sas_event.c | 59 struct workqueue_struct *wq = ha->core.shost->work_q; __sas_drain_work() local 67 drain_workqueue(wq); __sas_drain_work()
|
/linux-4.4.14/drivers/iommu/ |
H A D | amd_iommu_v2.c | 59 wait_queue_head_t wq; /* To wait for count == 0 */ member in struct:pasid_state 74 wait_queue_head_t wq; member in struct:device_state 159 wake_up(&dev_state->wq); put_device_state() 272 wake_up(&pasid_state->wq); put_pasid_state() 278 wait_event(pasid_state->wq, !atomic_read(&pasid_state->count)); put_pasid_state_wait() 660 init_waitqueue_head(&pasid_state->wq); amd_iommu_bind_pasid() 785 init_waitqueue_head(&dev_state->wq); amd_iommu_init_device() 882 wait_event(dev_state->wq, !atomic_read(&dev_state->count)); amd_iommu_free_device()
|
/linux-4.4.14/drivers/gpu/drm/atmel-hlcdc/ |
H A D | atmel_hlcdc_dc.h | 130 * @wq: display controller workqueue 139 struct workqueue_struct *wq; member in struct:atmel_hlcdc_dc
|
H A D | atmel_hlcdc_dc.c | 503 dc->wq = alloc_ordered_workqueue("atmel-hlcdc-dc", 0); atmel_hlcdc_dc_load() 504 if (!dc->wq) atmel_hlcdc_dc_load() 555 destroy_workqueue(dc->wq); atmel_hlcdc_dc_load() 566 flush_workqueue(dc->wq); atmel_hlcdc_dc_unload() 579 destroy_workqueue(dc->wq); atmel_hlcdc_dc_unload()
|
H A D | atmel_hlcdc_layer.c | 62 drm_flip_work_commit(&layer->gc, layer->wq); atmel_hlcdc_layer_fb_flip_release_queue() 611 layer->wq = dc->wq; atmel_hlcdc_layer_init()
|
/linux-4.4.14/net/atm/ |
H A D | common.c | 95 struct socket_wq *wq; vcc_def_wakeup() local 98 wq = rcu_dereference(sk->sk_wq); vcc_def_wakeup() 99 if (wq_has_sleeper(wq)) vcc_def_wakeup() 100 wake_up(&wq->wait); vcc_def_wakeup() 114 struct socket_wq *wq; vcc_write_space() local 119 wq = rcu_dereference(sk->sk_wq); vcc_write_space() 120 if (wq_has_sleeper(wq)) vcc_write_space() 121 wake_up_interruptible(&wq->wait); vcc_write_space()
|
/linux-4.4.14/virt/kvm/ |
H A D | async_pf.c | 101 if (waitqueue_active(&vcpu->wq)) async_pf_execute() 102 wake_up_interruptible(&vcpu->wq); async_pf_execute()
|
/linux-4.4.14/drivers/pci/hotplug/ |
H A D | shpchp_core.c | 131 slot->wq = alloc_workqueue("shpchp-%d", 0, 0, slot->number); init_slots() 132 if (!slot->wq) { init_slots() 168 destroy_workqueue(slot->wq); init_slots() 189 destroy_workqueue(slot->wq); cleanup_slots()
|
H A D | pciehp_ctrl.c | 53 queue_work(p_slot->wq, &info->work); pciehp_queue_interrupt_event() 222 queue_work(p_slot->wq, &info->work); pciehp_queue_power_work() 266 queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ); handle_button_press_event()
|
H A D | pciehp.h | 80 struct workqueue_struct *wq; member in struct:slot
|
/linux-4.4.14/fs/nfs/ |
H A D | callback.c | 112 DEFINE_WAIT(wq); nfs41_callback_svc() 120 prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE); nfs41_callback_svc() 127 finish_wait(&serv->sv_cb_waitq, &wq); nfs41_callback_svc() 135 finish_wait(&serv->sv_cb_waitq, &wq); nfs41_callback_svc()
|
/linux-4.4.14/drivers/gpu/host1x/hw/ |
H A D | cdma_hw.c | 243 timeout.wq); cdma_timeout_handler() 296 INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler); cdma_timeout_init() 308 cancel_delayed_work(&cdma->timeout.wq); cdma_timeout_destroy()
|
/linux-4.4.14/arch/arm/kvm/ |
H A D | psci.c | 73 wait_queue_head_t *wq; kvm_psci_vcpu_on() local 121 wq = kvm_arch_vcpu_wq(vcpu); kvm_psci_vcpu_on() 122 wake_up_interruptible(wq); kvm_psci_vcpu_on()
|
/linux-4.4.14/arch/x86/kernel/ |
H A D | kvm.c | 94 wait_queue_head_t wq; member in struct:kvm_task_sleep_node 144 init_waitqueue_head(&n.wq); kvm_async_pf_task_wait() 150 prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE); kvm_async_pf_task_wait() 169 finish_wait(&n.wq, &wait); kvm_async_pf_task_wait() 181 else if (waitqueue_active(&n->wq)) apf_task_wake_one() 182 wake_up(&n->wq); apf_task_wake_one() 234 init_waitqueue_head(&n->wq); kvm_async_pf_task_wake()
|
/linux-4.4.14/net/ |
H A D | socket.c | 248 struct socket_wq *wq; sock_alloc_inode() local 253 wq = kmalloc(sizeof(*wq), GFP_KERNEL); sock_alloc_inode() 254 if (!wq) { sock_alloc_inode() 258 init_waitqueue_head(&wq->wait); sock_alloc_inode() 259 wq->fasync_list = NULL; sock_alloc_inode() 260 wq->flags = 0; sock_alloc_inode() 261 RCU_INIT_POINTER(ei->socket.wq, wq); sock_alloc_inode() 275 struct socket_wq *wq; sock_destroy_inode() local 278 wq = rcu_dereference_protected(ei->socket.wq, 1); sock_destroy_inode() 279 kfree_rcu(wq, rcu); sock_destroy_inode() 577 if (rcu_dereference_protected(sock->wq, 1)->fasync_list) sock_release() 1042 struct socket_wq *wq; sock_fasync() local 1048 wq = rcu_dereference_protected(sock->wq, sock_owned_by_user(sk)); sock_fasync() 1049 fasync_helper(fd, filp, on, &wq->fasync_list); sock_fasync() 1051 if (!wq->fasync_list) sock_fasync() 1062 int sock_wake_async(struct socket_wq *wq, int how, int band) sock_wake_async() argument 1064 if (!wq || !wq->fasync_list) sock_wake_async() 1069 if (test_bit(SOCKWQ_ASYNC_WAITDATA, &wq->flags)) sock_wake_async() 1073 if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &wq->flags)) sock_wake_async() 1078 kill_fasync(&wq->fasync_list, SIGIO, band); sock_wake_async() 1081 kill_fasync(&wq->fasync_list, SIGURG, band); sock_wake_async()
|
/linux-4.4.14/drivers/scsi/bfa/ |
H A D | bfad_im.c | 161 wait_queue_head_t *wq; bfa_cb_tskim_done() local 165 wq = (wait_queue_head_t *) cmnd->SCp.ptr; bfa_cb_tskim_done() 168 if (wq) bfa_cb_tskim_done() 169 wake_up(wq); bfa_cb_tskim_done() 298 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); bfad_im_reset_lun_handler() 326 cmnd->SCp.ptr = (char *)&wq; bfad_im_reset_lun_handler() 334 wait_event(wq, test_bit(IO_DONE_BIT, bfad_im_reset_lun_handler() 361 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); bfad_im_reset_bus_handler() 368 cmnd->SCp.ptr = (char *)&wq; bfad_im_reset_bus_handler() 377 wait_event(wq, test_bit(IO_DONE_BIT, bfad_im_reset_bus_handler()
|
/linux-4.4.14/drivers/ps3/ |
H A D | ps3av.c | 47 struct workqueue_struct *wq; member in struct:ps3av 488 queue_work(ps3av->wq, &ps3av->work); ps3av_set_videomode() 959 ps3av->wq = create_singlethread_workqueue("ps3avd"); ps3av_probe() 960 if (!ps3av->wq) { ps3av_probe() 1021 if (ps3av->wq) ps3av_remove() 1022 destroy_workqueue(ps3av->wq); ps3av_remove()
|
/linux-4.4.14/arch/powerpc/platforms/cell/spufs/ |
H A D | spufs.h | 321 #define spufs_wait(wq, condition) \ 326 prepare_to_wait(&(wq), &__wait, TASK_INTERRUPTIBLE); \ 339 finish_wait(&(wq), &__wait); \
|
/linux-4.4.14/drivers/mtd/ |
H A D | mtd_blkdevs.c | 184 queue_work(dev->wq, &dev->work); mtd_blktrans_request() 428 new->wq = alloc_workqueue("%s%d", 0, 0, add_mtd_blktrans_dev() 430 if (!new->wq) add_mtd_blktrans_dev() 474 destroy_workqueue(old->wq); del_mtd_blktrans_dev()
|
/linux-4.4.14/drivers/media/platform/vsp1/ |
H A D | vsp1_video.h | 69 wait_queue_head_t wq; member in struct:vsp1_pipeline
|
/linux-4.4.14/drivers/misc/cxl/ |
H A D | file.c | 298 poll_wait(file, &ctx->wq, poll); afu_poll() 341 prepare_to_wait(&ctx->wq, &wait, TASK_INTERRUPTIBLE); afu_read() 367 finish_wait(&ctx->wq, &wait); afu_read() 407 finish_wait(&ctx->wq, &wait); afu_read()
|
H A D | context.c | 62 init_waitqueue_head(&ctx->wq); cxl_context_init() 234 wake_up_all(&ctx->wq); cxl_context_detach()
|
/linux-4.4.14/drivers/net/ipvlan/ |
H A D | ipvlan.h | 96 struct work_struct wq; member in struct:ipvl_port
|
/linux-4.4.14/drivers/net/wireless/ath/ar5523/ |
H A D | ar5523.h | 93 struct workqueue_struct *wq; member in struct:ar5523
|
/linux-4.4.14/drivers/net/wireless/ath/wcn36xx/ |
H A D | txrx.h | 27 /* broadcast wq ID */
|
/linux-4.4.14/drivers/gpu/drm/msm/ |
H A D | msm_gpu.c | 242 queue_work(priv->wq, &gpu->inactive_work); inactive_handler() 324 queue_work(priv->wq, &gpu->recover_work); hangcheck_handler() 332 queue_work(priv->wq, &gpu->retire_work); hangcheck_handler() 494 queue_work(priv->wq, &gpu->retire_work); msm_gpu_retire()
|
/linux-4.4.14/include/drm/ |
H A D | drm_flip_work.h | 87 struct workqueue_struct *wq);
|
/linux-4.4.14/include/linux/mtd/ |
H A D | blktrans.h | 47 struct workqueue_struct *wq; member in struct:mtd_blktrans_dev
|
H A D | flashchip.h | 90 wait_queue_head_t wq; /* Wait on here when we're waiting for the chip member in struct:flchip
|
H A D | onenand.h | 77 * @wq: [INTERN] wait queue to sleep on if a OneNAND 128 wait_queue_head_t wq; member in struct:onenand_chip
|
/linux-4.4.14/net/nfc/hci/ |
H A D | hci.h | 39 wait_queue_head_t *wq; member in struct:hcp_exec_waiter
|
H A D | command.c | 64 wake_up(hcp_ew->wq); nfc_hci_execute_cb() 73 hcp_ew.wq = &ew_wq; nfc_hci_execute_cmd()
|
/linux-4.4.14/drivers/staging/nvec/ |
H A D | nvec.h | 122 * @wq: The work queue in which @rx_work and @tx_work are executed 149 struct workqueue_struct *wq; member in struct:nvec_chip
|
/linux-4.4.14/drivers/net/caif/ |
H A D | caif_hsi.c | 78 queue_work(cfhsi->wq, &cfhsi->wake_down_work); cfhsi_inactivity_tout() 988 queue_work(cfhsi->wq, &cfhsi->wake_up_work); cfhsi_wake_up_cb() 1107 queue_work(cfhsi->wq, &cfhsi->wake_up_work); cfhsi_xmit() 1204 cfhsi->wq = create_singlethread_workqueue(cfhsi->ndev->name); cfhsi_open() 1205 if (!cfhsi->wq) { cfhsi_open() 1251 destroy_workqueue(cfhsi->wq); cfhsi_open() 1271 flush_workqueue(cfhsi->wq); cfhsi_close() 1282 destroy_workqueue(cfhsi->wq); cfhsi_close()
|
H A D | caif_spi.c | 641 cfspi->wq = create_singlethread_workqueue(dev->name); cfspi_init() 642 if (!cfspi->wq) { cfspi_init() 665 queue_work(cfspi->wq, &cfspi->work); cfspi_init() 692 destroy_workqueue(cfspi->wq); cfspi_uninit()
|
/linux-4.4.14/net/9p/ |
H A D | trans_fd.c | 121 * @wq: current write work 144 struct work_struct wq; member in struct:p9_conn 451 m = container_of(work, struct p9_conn, wq); p9_write_work() 510 schedule_work(&m->wq); p9_write_work() 592 INIT_WORK(&m->wq, p9_write_work); p9_conn_create() 644 schedule_work(&m->wq); p9_poll_mux() 682 schedule_work(&m->wq); p9_fd_request() 853 cancel_work_sync(&m->wq); p9_conn_destroy()
|
/linux-4.4.14/include/linux/power/ |
H A D | charger-manager.h | 53 * @wq: the workqueue to control charger according to the state of 70 struct work_struct wq; member in struct:charger_cable
|
/linux-4.4.14/drivers/mtd/ubi/ |
H A D | block.c | 93 struct workqueue_struct *wq; member in struct:ubiblock 333 queue_work(dev->wq, &pdu->work); ubiblock_queue_rq() 437 dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name); ubiblock_create() 438 if (!dev->wq) { ubiblock_create() 472 destroy_workqueue(dev->wq); ubiblock_cleanup()
|
/linux-4.4.14/drivers/bluetooth/ |
H A D | bluecard_cs.c | 283 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); bluecard_write_wakeup() 306 prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE); bluecard_write_wakeup() 308 finish_wait(&wq, &wait); bluecard_write_wakeup() 320 prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE); bluecard_write_wakeup() 322 finish_wait(&wq, &wait); bluecard_write_wakeup()
|
/linux-4.4.14/drivers/md/ |
H A D | dm-era-target.c | 1151 struct workqueue_struct *wq; member in struct:era 1206 queue_work(era->wq, &era->worker); wake_worker() 1373 flush_workqueue(era->wq); stop_worker() 1396 if (era->wq) era_destroy() 1397 destroy_workqueue(era->wq); era_destroy() 1499 era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); era_ctr() 1500 if (!era->wq) { era_ctr()
|
/linux-4.4.14/drivers/net/can/spi/ |
H A D | mcp251x.c | 259 struct workqueue_struct *wq; member in struct:mcp251x_priv 531 queue_work(priv->wq, &priv->tx_work); mcp251x_hard_start_xmit() 548 queue_work(priv->wq, &priv->restart_work); mcp251x_do_set_mode() 702 destroy_workqueue(priv->wq); mcp251x_stop() 703 priv->wq = NULL; mcp251x_stop() 964 priv->wq = create_freezable_workqueue("mcp251x_wq"); mcp251x_open() 1230 queue_work(priv->wq, &priv->restart_work); mcp251x_can_resume()
|
/linux-4.4.14/drivers/mtd/nand/ |
H A D | tmio_nand.c | 175 if (unlikely(!waitqueue_active(&nand_chip->controller->wq))) tmio_irq() 178 wake_up(&nand_chip->controller->wq); tmio_irq() 198 timeout = wait_event_timeout(nand_chip->controller->wq, tmio_nand_wait()
|
/linux-4.4.14/drivers/net/ethernet/sfc/ |
H A D | mcdi.h | 46 * @state: Request handling state. Waited for by @wq. 48 * @wq: Wait queue for threads waiting for @state != %MCDI_STATE_RUNNING 68 wait_queue_head_t wq; member in struct:efx_mcdi_iface
|
/linux-4.4.14/fs/ext4/ |
H A D | page-io.c | 215 struct workqueue_struct *wq; ext4_add_complete_io() local 222 wq = sbi->rsv_conversion_wq; ext4_add_complete_io() 224 queue_work(wq, &ei->i_rsv_conversion_work); ext4_add_complete_io()
|
/linux-4.4.14/net/sunrpc/xprtrdma/ |
H A D | frwr_ops.c | 91 struct workqueue_struct *wq; frwr_destroy_recovery_wq() local 96 wq = frwr_recovery_wq; frwr_destroy_recovery_wq() 98 destroy_workqueue(wq); frwr_destroy_recovery_wq()
|
/linux-4.4.14/fs/ocfs2/dlm/ |
H A D | dlmconvert.c | 94 wake_up(&res->wq); dlmconvert_master() 352 wake_up(&res->wq); dlmconvert_remote() 539 wake_up(&res->wq); dlm_convert_lock_handler()
|
/linux-4.4.14/sound/soc/intel/atom/sst/ |
H A D | sst.h | 362 * @ipc_post_msg_wq : wq to post IPC messages context 364 * @mad_wq : MAD driver wq 365 * @post_msg_wq : wq to post IPC messages
|
/linux-4.4.14/drivers/staging/lustre/lustre/ptlrpc/ |
H A D | niobuf.c | 244 wait_queue_head_t *wq; ptlrpc_unregister_bulk() local 277 wq = &req->rq_set->set_waitq; ptlrpc_unregister_bulk() 279 wq = &req->rq_reply_waitq; ptlrpc_unregister_bulk() 286 rc = l_wait_event(*wq, !ptlrpc_client_bulk_active(req), &lwi); ptlrpc_unregister_bulk()
|
/linux-4.4.14/drivers/vfio/ |
H A D | virqfd.c | 213 * Even if we don't queue the job, flush the wq to be sure it's vfio_virqfd_disable()
|
/linux-4.4.14/drivers/infiniband/core/ |
H A D | mad_priv.h | 206 struct workqueue_struct *wq; member in struct:ib_mad_port_private
|
/linux-4.4.14/drivers/gpu/drm/via/ |
H A D | via_dmablit.h | 77 struct work_struct wq; member in struct:_drm_via_blitq
|
/linux-4.4.14/drivers/mfd/ |
H A D | dln2.c | 87 wait_queue_head_t wq; member in struct:dln2_mod_rx_slots 388 ret = wait_event_interruptible(dln2->mod_rx_slots[handle].wq, alloc_rx_slot() 426 wake_up_interruptible(&rxs->wq); free_rx_slot() 753 init_waitqueue_head(&dln2->mod_rx_slots[i].wq); dln2_probe()
|
/linux-4.4.14/drivers/gpu/drm/i2c/ |
H A D | adv7511.c | 38 wait_queue_head_t wq; member in struct:adv7511 448 wake_up_all(&adv7511->wq); adv7511_irq_process() 472 ret = wait_event_interruptible_timeout(adv7511->wq, adv7511_wait_for_edid() 917 init_waitqueue_head(&adv7511->wq); adv7511_probe()
|
/linux-4.4.14/drivers/char/ |
H A D | tlclk.c | 200 static DECLARE_WAIT_QUEUE_HEAD(wq); 253 wait_event_interruptible(wq, got_event); tlclk_read() 873 wake_up(&wq); switchover_timeout() 929 wake_up(&wq); tlclk_interrupt()
|
/linux-4.4.14/fs/ncpfs/ |
H A D | sock.c | 60 wait_queue_head_t wq; member in struct:ncp_request_reply 82 init_waitqueue_head(&req->wq); ncp_alloc_req() 140 wake_up_all(&req->wq); ncp_finish_request() 722 if (wait_event_interruptible(req->wq, req->status == RQ_DONE)) { do_ncp_rpc_call()
|