Searched refs:wq (Results 1 - 200 of 413) sorted by relevance

123

/linux-4.1.27/drivers/net/ethernet/cisco/enic/
H A Dvnic_wq.c30 static int vnic_wq_alloc_bufs(struct vnic_wq *wq) vnic_wq_alloc_bufs() argument
33 unsigned int i, j, count = wq->ring.desc_count; vnic_wq_alloc_bufs()
37 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC); vnic_wq_alloc_bufs()
38 if (!wq->bufs[i]) vnic_wq_alloc_bufs()
43 buf = wq->bufs[i]; vnic_wq_alloc_bufs()
46 buf->desc = (u8 *)wq->ring.descs + vnic_wq_alloc_bufs()
47 wq->ring.desc_size * buf->index; vnic_wq_alloc_bufs()
49 buf->next = wq->bufs[0]; vnic_wq_alloc_bufs()
53 buf->next = wq->bufs[i + 1]; vnic_wq_alloc_bufs()
63 wq->to_use = wq->to_clean = wq->bufs[0]; vnic_wq_alloc_bufs()
68 void vnic_wq_free(struct vnic_wq *wq) vnic_wq_free() argument
73 vdev = wq->vdev; vnic_wq_free()
75 vnic_dev_free_desc_ring(vdev, &wq->ring); vnic_wq_free()
78 if (wq->bufs[i]) { vnic_wq_free()
79 kfree(wq->bufs[i]); vnic_wq_free()
80 wq->bufs[i] = NULL; vnic_wq_free()
84 wq->ctrl = NULL; vnic_wq_free()
87 int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, vnic_wq_alloc() argument
92 wq->index = index; vnic_wq_alloc()
93 wq->vdev = vdev; vnic_wq_alloc()
95 wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index); vnic_wq_alloc()
96 if (!wq->ctrl) { vnic_wq_alloc()
101 vnic_wq_disable(wq); vnic_wq_alloc()
103 err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); vnic_wq_alloc()
107 err = vnic_wq_alloc_bufs(wq); vnic_wq_alloc()
109 vnic_wq_free(wq); vnic_wq_alloc()
116 static void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, vnic_wq_init_start() argument
122 unsigned int count = wq->ring.desc_count; vnic_wq_init_start()
124 paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; vnic_wq_init_start()
125 writeq(paddr, &wq->ctrl->ring_base); vnic_wq_init_start()
126 iowrite32(count, &wq->ctrl->ring_size); vnic_wq_init_start()
127 iowrite32(fetch_index, &wq->ctrl->fetch_index); vnic_wq_init_start()
128 iowrite32(posted_index, &wq->ctrl->posted_index); vnic_wq_init_start()
129 iowrite32(cq_index, &wq->ctrl->cq_index); vnic_wq_init_start()
130 iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); vnic_wq_init_start()
131 iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); vnic_wq_init_start()
132 iowrite32(0, &wq->ctrl->error_status); vnic_wq_init_start()
134 wq->to_use = wq->to_clean = vnic_wq_init_start()
135 &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)] vnic_wq_init_start()
139 void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, vnic_wq_init() argument
143 vnic_wq_init_start(wq, cq_index, 0, 0, vnic_wq_init()
148 unsigned int vnic_wq_error_status(struct vnic_wq *wq) vnic_wq_error_status() argument
150 return ioread32(&wq->ctrl->error_status); vnic_wq_error_status()
153 void vnic_wq_enable(struct vnic_wq *wq) vnic_wq_enable() argument
155 iowrite32(1, &wq->ctrl->enable); vnic_wq_enable()
158 int vnic_wq_disable(struct vnic_wq *wq) vnic_wq_disable() argument
162 iowrite32(0, &wq->ctrl->enable); vnic_wq_disable()
166 if (!(ioread32(&wq->ctrl->running))) vnic_wq_disable()
171 pr_err("Failed to disable WQ[%d]\n", wq->index); vnic_wq_disable()
176 void vnic_wq_clean(struct vnic_wq *wq, vnic_wq_clean() argument
177 void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) vnic_wq_clean()
181 buf = wq->to_clean; vnic_wq_clean()
183 while (vnic_wq_desc_used(wq) > 0) { vnic_wq_clean()
185 (*buf_clean)(wq, buf); vnic_wq_clean()
187 buf = wq->to_clean = buf->next; vnic_wq_clean()
188 wq->ring.desc_avail++; vnic_wq_clean()
191 wq->to_use = wq->to_clean = wq->bufs[0]; vnic_wq_clean()
193 iowrite32(0, &wq->ctrl->fetch_index); vnic_wq_clean()
194 iowrite32(0, &wq->ctrl->posted_index); vnic_wq_clean()
195 iowrite32(0, &wq->ctrl->error_status); vnic_wq_clean()
197 vnic_dev_clear_desc_ring(&wq->ring); vnic_wq_clean()
H A Dvnic_wq.h91 static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) vnic_wq_desc_avail() argument
94 return wq->ring.desc_avail; vnic_wq_desc_avail()
97 static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) vnic_wq_desc_used() argument
100 return wq->ring.desc_count - wq->ring.desc_avail - 1; vnic_wq_desc_used()
103 static inline void *vnic_wq_next_desc(struct vnic_wq *wq) vnic_wq_next_desc() argument
105 return wq->to_use->desc; vnic_wq_next_desc()
108 static inline void vnic_wq_doorbell(struct vnic_wq *wq) vnic_wq_doorbell() argument
116 iowrite32(wq->to_use->index, &wq->ctrl->posted_index); vnic_wq_doorbell()
119 static inline void vnic_wq_post(struct vnic_wq *wq, vnic_wq_post() argument
125 struct vnic_wq_buf *buf = wq->to_use; vnic_wq_post()
137 wq->to_use = buf; vnic_wq_post()
139 wq->ring.desc_avail -= desc_skip_cnt; vnic_wq_post()
142 static inline void vnic_wq_service(struct vnic_wq *wq, vnic_wq_service() argument
144 void (*buf_service)(struct vnic_wq *wq, vnic_wq_service()
150 buf = wq->to_clean; vnic_wq_service()
153 (*buf_service)(wq, cq_desc, buf, opaque); vnic_wq_service()
155 wq->ring.desc_avail++; vnic_wq_service()
157 wq->to_clean = buf->next; vnic_wq_service()
162 buf = wq->to_clean; vnic_wq_service()
166 void vnic_wq_free(struct vnic_wq *wq);
167 int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
169 void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
172 unsigned int vnic_wq_error_status(struct vnic_wq *wq);
173 void vnic_wq_enable(struct vnic_wq *wq);
174 int vnic_wq_disable(struct vnic_wq *wq);
175 void vnic_wq_clean(struct vnic_wq *wq,
176 void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
H A Denic_res.h43 static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq, enic_queue_wq_desc_ex() argument
49 struct wq_enet_desc *desc = vnic_wq_next_desc(wq); enic_queue_wq_desc_ex()
65 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt, enic_queue_wq_desc_ex()
69 static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq, enic_queue_wq_desc_cont() argument
73 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, enic_queue_wq_desc_cont()
78 static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf, enic_queue_wq_desc() argument
82 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, enic_queue_wq_desc()
88 static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq, enic_queue_wq_desc_csum() argument
93 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, enic_queue_wq_desc_csum()
100 static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq, enic_queue_wq_desc_csum_l4() argument
105 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, enic_queue_wq_desc_csum_l4()
111 static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq, enic_queue_wq_desc_tso() argument
116 enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, enic_queue_wq_desc_tso()
H A Denic_main.c139 static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) enic_free_wq_buf() argument
141 struct enic *enic = vnic_dev_priv(wq->vdev); enic_free_wq_buf()
154 static void enic_wq_free_buf(struct vnic_wq *wq, enic_wq_free_buf() argument
157 enic_free_wq_buf(wq, buf); enic_wq_free_buf()
167 vnic_wq_service(&enic->wq[q_number], cq_desc, enic_wq_service()
172 vnic_wq_desc_avail(&enic->wq[q_number]) >= enic_wq_service()
187 error_status = vnic_wq_error_status(&enic->wq[i]); enic_log_q_error()
355 static int enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq, enic_queue_wq_skb_cont() argument
370 enic_queue_wq_desc_cont(wq, skb, dma_addr, skb_frag_size(frag), enic_queue_wq_skb_cont()
378 static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq, enic_queue_wq_skb_vlan() argument
398 enic_queue_wq_desc(wq, skb, dma_addr, head_len, vlan_tag_insert, enic_queue_wq_skb_vlan()
402 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); enic_queue_wq_skb_vlan()
407 static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq, enic_queue_wq_skb_csum_l4() argument
429 enic_queue_wq_desc_csum_l4(wq, skb, dma_addr, head_len, csum_offset, enic_queue_wq_skb_csum_l4()
434 err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); enic_queue_wq_skb_csum_l4()
439 static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, enic_queue_wq_skb_tso() argument
476 enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len, enic_queue_wq_skb_tso()
502 enic_queue_wq_desc_cont(wq, skb, dma_addr, len, enic_queue_wq_skb_tso()
515 struct vnic_wq *wq, struct sk_buff *skb) enic_queue_wq_skb()
533 err = enic_queue_wq_skb_tso(enic, wq, skb, mss, enic_queue_wq_skb()
537 err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert, enic_queue_wq_skb()
540 err = enic_queue_wq_skb_vlan(enic, wq, skb, vlan_tag_insert, enic_queue_wq_skb()
545 buf = wq->to_use->prev; enic_queue_wq_skb()
549 while (!buf->os_buf && (buf->next != wq->to_clean)) { enic_queue_wq_skb()
550 enic_free_wq_buf(wq, buf); enic_queue_wq_skb()
551 wq->ring.desc_avail++; enic_queue_wq_skb()
554 wq->to_use = buf->next; enic_queue_wq_skb()
564 struct vnic_wq *wq; enic_hard_start_xmit() local
574 wq = &enic->wq[txq_map]; enic_hard_start_xmit()
591 if (vnic_wq_desc_avail(wq) < enic_hard_start_xmit()
600 enic_queue_wq_skb(enic, wq, skb); enic_hard_start_xmit()
602 if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS) enic_hard_start_xmit()
605 vnic_wq_doorbell(wq); enic_hard_start_xmit()
1345 struct vnic_wq *wq = &enic->wq[wq_index]; enic_poll_msix_wq() local
1352 wq_irq = wq->index; enic_poll_msix_wq()
1500 int wq = enic_cq_wq(enic, i); enic_request_intr() local
1507 enic->msix[intr].devid = &enic->napi[wq]; enic_request_intr()
1668 vnic_wq_enable(&enic->wq[i]); enic_open()
1743 err = vnic_wq_disable(&enic->wq[i]); enic_stop()
1757 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); enic_stop()
2573 /* Setup notification timer, HW reset task, and wq locks enic_probe()
514 enic_queue_wq_skb(struct enic *enic, struct vnic_wq *wq, struct sk_buff *skb) enic_queue_wq_skb() argument
H A Denic_res.c98 "vNIC MAC addr %pM wq/rq %d/%d mtu %d\n", enic_get_vnic_config()
187 vnic_wq_free(&enic->wq[i]); enic_free_vnic_resources()
205 "vNIC resources avail: wq %d rq %d cq %d intr %d\n", enic_get_res_counts()
252 vnic_wq_init(&enic->wq[i], enic_init_vnic_resources()
321 "wq %d rq %d cq %d intr %d intr mode %s\n", enic_alloc_vnic_resources()
333 err = vnic_wq_alloc(enic->vdev, &enic->wq[i], i, enic_alloc_vnic_resources()
H A Denic.h167 ____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX]; member in struct:enic
204 static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq) enic_cq_wq() argument
206 return enic->rq_count + wq; enic_cq_wq()
231 unsigned int wq) enic_msix_wq_intr()
233 return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset; enic_msix_wq_intr()
230 enic_msix_wq_intr(struct enic *enic, unsigned int wq) enic_msix_wq_intr() argument
/linux-4.1.27/drivers/scsi/fnic/
H A Dvnic_wq_copy.h36 static inline unsigned int vnic_wq_copy_desc_avail(struct vnic_wq_copy *wq) vnic_wq_copy_desc_avail() argument
38 return wq->ring.desc_avail; vnic_wq_copy_desc_avail()
41 static inline unsigned int vnic_wq_copy_desc_in_use(struct vnic_wq_copy *wq) vnic_wq_copy_desc_in_use() argument
43 return wq->ring.desc_count - 1 - wq->ring.desc_avail; vnic_wq_copy_desc_in_use()
46 static inline void *vnic_wq_copy_next_desc(struct vnic_wq_copy *wq) vnic_wq_copy_next_desc() argument
48 struct fcpio_host_req *desc = wq->ring.descs; vnic_wq_copy_next_desc()
49 return &desc[wq->to_use_index]; vnic_wq_copy_next_desc()
52 static inline void vnic_wq_copy_post(struct vnic_wq_copy *wq) vnic_wq_copy_post() argument
55 ((wq->to_use_index + 1) == wq->ring.desc_count) ? vnic_wq_copy_post()
56 (wq->to_use_index = 0) : (wq->to_use_index++); vnic_wq_copy_post()
57 wq->ring.desc_avail--; vnic_wq_copy_post()
66 iowrite32(wq->to_use_index, &wq->ctrl->posted_index); vnic_wq_copy_post()
69 static inline void vnic_wq_copy_desc_process(struct vnic_wq_copy *wq, u16 index) vnic_wq_copy_desc_process() argument
73 if (wq->to_clean_index <= index) vnic_wq_copy_desc_process()
74 cnt = (index - wq->to_clean_index) + 1; vnic_wq_copy_desc_process()
76 cnt = wq->ring.desc_count - wq->to_clean_index + index + 1; vnic_wq_copy_desc_process()
78 wq->to_clean_index = ((index + 1) % wq->ring.desc_count); vnic_wq_copy_desc_process()
79 wq->ring.desc_avail += cnt; vnic_wq_copy_desc_process()
83 static inline void vnic_wq_copy_service(struct vnic_wq_copy *wq, vnic_wq_copy_service() argument
85 void (*q_service)(struct vnic_wq_copy *wq, vnic_wq_copy_service()
88 struct fcpio_host_req *wq_desc = wq->ring.descs; vnic_wq_copy_service()
94 (*q_service)(wq, &wq_desc[wq->to_clean_index]); vnic_wq_copy_service()
96 wq->ring.desc_avail++; vnic_wq_copy_service()
98 curr_index = wq->to_clean_index; vnic_wq_copy_service()
103 ((wq->to_clean_index + 1) == wq->ring.desc_count) ? vnic_wq_copy_service()
104 (wq->to_clean_index = 0) : (wq->to_clean_index++); vnic_wq_copy_service()
111 (wq->to_clean_index == wq->to_use_index)) vnic_wq_copy_service()
116 void vnic_wq_copy_enable(struct vnic_wq_copy *wq);
117 int vnic_wq_copy_disable(struct vnic_wq_copy *wq);
118 void vnic_wq_copy_free(struct vnic_wq_copy *wq);
119 int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq,
121 void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index,
124 void vnic_wq_copy_clean(struct vnic_wq_copy *wq,
125 void (*q_clean)(struct vnic_wq_copy *wq,
H A Dvnic_wq.c27 static int vnic_wq_alloc_bufs(struct vnic_wq *wq) vnic_wq_alloc_bufs() argument
31 unsigned int i, j, count = wq->ring.desc_count; vnic_wq_alloc_bufs()
34 vdev = wq->vdev; vnic_wq_alloc_bufs()
37 wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ, GFP_ATOMIC); vnic_wq_alloc_bufs()
38 if (!wq->bufs[i]) { vnic_wq_alloc_bufs()
45 buf = wq->bufs[i]; vnic_wq_alloc_bufs()
48 buf->desc = (u8 *)wq->ring.descs + vnic_wq_alloc_bufs()
49 wq->ring.desc_size * buf->index; vnic_wq_alloc_bufs()
51 buf->next = wq->bufs[0]; vnic_wq_alloc_bufs()
54 buf->next = wq->bufs[i + 1]; vnic_wq_alloc_bufs()
62 wq->to_use = wq->to_clean = wq->bufs[0]; vnic_wq_alloc_bufs()
67 void vnic_wq_free(struct vnic_wq *wq) vnic_wq_free() argument
72 vdev = wq->vdev; vnic_wq_free()
74 vnic_dev_free_desc_ring(vdev, &wq->ring); vnic_wq_free()
77 kfree(wq->bufs[i]); vnic_wq_free()
78 wq->bufs[i] = NULL; vnic_wq_free()
81 wq->ctrl = NULL; vnic_wq_free()
85 int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, vnic_wq_alloc() argument
90 wq->index = index; vnic_wq_alloc()
91 wq->vdev = vdev; vnic_wq_alloc()
93 wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index); vnic_wq_alloc()
94 if (!wq->ctrl) { vnic_wq_alloc()
99 vnic_wq_disable(wq); vnic_wq_alloc()
101 err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); vnic_wq_alloc()
105 err = vnic_wq_alloc_bufs(wq); vnic_wq_alloc()
107 vnic_wq_free(wq); vnic_wq_alloc()
114 void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, vnic_wq_init() argument
120 paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; vnic_wq_init()
121 writeq(paddr, &wq->ctrl->ring_base); vnic_wq_init()
122 iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size); vnic_wq_init()
123 iowrite32(0, &wq->ctrl->fetch_index); vnic_wq_init()
124 iowrite32(0, &wq->ctrl->posted_index); vnic_wq_init()
125 iowrite32(cq_index, &wq->ctrl->cq_index); vnic_wq_init()
126 iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); vnic_wq_init()
127 iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); vnic_wq_init()
128 iowrite32(0, &wq->ctrl->error_status); vnic_wq_init()
131 unsigned int vnic_wq_error_status(struct vnic_wq *wq) vnic_wq_error_status() argument
133 return ioread32(&wq->ctrl->error_status); vnic_wq_error_status()
136 void vnic_wq_enable(struct vnic_wq *wq) vnic_wq_enable() argument
138 iowrite32(1, &wq->ctrl->enable); vnic_wq_enable()
141 int vnic_wq_disable(struct vnic_wq *wq) vnic_wq_disable() argument
145 iowrite32(0, &wq->ctrl->enable); vnic_wq_disable()
149 if (!(ioread32(&wq->ctrl->running))) vnic_wq_disable()
154 printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index); vnic_wq_disable()
159 void vnic_wq_clean(struct vnic_wq *wq, vnic_wq_clean() argument
160 void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) vnic_wq_clean()
164 BUG_ON(ioread32(&wq->ctrl->enable)); vnic_wq_clean()
166 buf = wq->to_clean; vnic_wq_clean()
168 while (vnic_wq_desc_used(wq) > 0) { vnic_wq_clean()
170 (*buf_clean)(wq, buf); vnic_wq_clean()
172 buf = wq->to_clean = buf->next; vnic_wq_clean()
173 wq->ring.desc_avail++; vnic_wq_clean()
176 wq->to_use = wq->to_clean = wq->bufs[0]; vnic_wq_clean()
178 iowrite32(0, &wq->ctrl->fetch_index); vnic_wq_clean()
179 iowrite32(0, &wq->ctrl->posted_index); vnic_wq_clean()
180 iowrite32(0, &wq->ctrl->error_status); vnic_wq_clean()
182 vnic_dev_clear_desc_ring(&wq->ring); vnic_wq_clean()
H A Dvnic_wq_copy.c25 void vnic_wq_copy_enable(struct vnic_wq_copy *wq) vnic_wq_copy_enable() argument
27 iowrite32(1, &wq->ctrl->enable); vnic_wq_copy_enable()
30 int vnic_wq_copy_disable(struct vnic_wq_copy *wq) vnic_wq_copy_disable() argument
34 iowrite32(0, &wq->ctrl->enable); vnic_wq_copy_disable()
38 if (!(ioread32(&wq->ctrl->running))) vnic_wq_copy_disable()
45 wq->index, ioread32(&wq->ctrl->fetch_index), vnic_wq_copy_disable()
46 ioread32(&wq->ctrl->posted_index)); vnic_wq_copy_disable()
51 void vnic_wq_copy_clean(struct vnic_wq_copy *wq, vnic_wq_copy_clean() argument
52 void (*q_clean)(struct vnic_wq_copy *wq, vnic_wq_copy_clean()
55 BUG_ON(ioread32(&wq->ctrl->enable)); vnic_wq_copy_clean()
57 if (vnic_wq_copy_desc_in_use(wq)) vnic_wq_copy_clean()
58 vnic_wq_copy_service(wq, -1, q_clean); vnic_wq_copy_clean()
60 wq->to_use_index = wq->to_clean_index = 0; vnic_wq_copy_clean()
62 iowrite32(0, &wq->ctrl->fetch_index); vnic_wq_copy_clean()
63 iowrite32(0, &wq->ctrl->posted_index); vnic_wq_copy_clean()
64 iowrite32(0, &wq->ctrl->error_status); vnic_wq_copy_clean()
66 vnic_dev_clear_desc_ring(&wq->ring); vnic_wq_copy_clean()
69 void vnic_wq_copy_free(struct vnic_wq_copy *wq) vnic_wq_copy_free() argument
73 vdev = wq->vdev; vnic_wq_copy_free()
74 vnic_dev_free_desc_ring(vdev, &wq->ring); vnic_wq_copy_free()
75 wq->ctrl = NULL; vnic_wq_copy_free()
78 int vnic_wq_copy_alloc(struct vnic_dev *vdev, struct vnic_wq_copy *wq, vnic_wq_copy_alloc() argument
84 wq->index = index; vnic_wq_copy_alloc()
85 wq->vdev = vdev; vnic_wq_copy_alloc()
86 wq->to_use_index = wq->to_clean_index = 0; vnic_wq_copy_alloc()
87 wq->ctrl = vnic_dev_get_res(vdev, RES_TYPE_WQ, index); vnic_wq_copy_alloc()
88 if (!wq->ctrl) { vnic_wq_copy_alloc()
93 vnic_wq_copy_disable(wq); vnic_wq_copy_alloc()
95 err = vnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count, desc_size); vnic_wq_copy_alloc()
102 void vnic_wq_copy_init(struct vnic_wq_copy *wq, unsigned int cq_index, vnic_wq_copy_init() argument
108 paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET; vnic_wq_copy_init()
109 writeq(paddr, &wq->ctrl->ring_base); vnic_wq_copy_init()
110 iowrite32(wq->ring.desc_count, &wq->ctrl->ring_size); vnic_wq_copy_init()
111 iowrite32(0, &wq->ctrl->fetch_index); vnic_wq_copy_init()
112 iowrite32(0, &wq->ctrl->posted_index); vnic_wq_copy_init()
113 iowrite32(cq_index, &wq->ctrl->cq_index); vnic_wq_copy_init()
114 iowrite32(error_interrupt_enable, &wq->ctrl->error_interrupt_enable); vnic_wq_copy_init()
115 iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); vnic_wq_copy_init()
H A Dvnic_wq.h96 static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) vnic_wq_desc_avail() argument
99 return wq->ring.desc_avail; vnic_wq_desc_avail()
102 static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) vnic_wq_desc_used() argument
105 return wq->ring.desc_count - wq->ring.desc_avail - 1; vnic_wq_desc_used()
108 static inline void *vnic_wq_next_desc(struct vnic_wq *wq) vnic_wq_next_desc() argument
110 return wq->to_use->desc; vnic_wq_next_desc()
113 static inline void vnic_wq_post(struct vnic_wq *wq, vnic_wq_post() argument
117 struct vnic_wq_buf *buf = wq->to_use; vnic_wq_post()
132 iowrite32(buf->index, &wq->ctrl->posted_index); vnic_wq_post()
134 wq->to_use = buf; vnic_wq_post()
136 wq->ring.desc_avail--; vnic_wq_post()
139 static inline void vnic_wq_service(struct vnic_wq *wq, vnic_wq_service() argument
141 void (*buf_service)(struct vnic_wq *wq, vnic_wq_service()
147 buf = wq->to_clean; vnic_wq_service()
150 (*buf_service)(wq, cq_desc, buf, opaque); vnic_wq_service()
152 wq->ring.desc_avail++; vnic_wq_service()
154 wq->to_clean = buf->next; vnic_wq_service()
159 buf = wq->to_clean; vnic_wq_service()
163 void vnic_wq_free(struct vnic_wq *wq);
164 int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index,
166 void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index,
169 unsigned int vnic_wq_error_status(struct vnic_wq *wq);
170 void vnic_wq_enable(struct vnic_wq *wq);
171 int vnic_wq_disable(struct vnic_wq *wq);
172 void vnic_wq_clean(struct vnic_wq *wq,
173 void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf));
H A Dfnic_res.h30 static inline void fnic_queue_wq_desc(struct vnic_wq *wq, fnic_queue_wq_desc() argument
37 struct wq_enet_desc *desc = vnic_wq_next_desc(wq); fnic_queue_wq_desc()
51 vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop); fnic_queue_wq_desc()
54 static inline void fnic_queue_wq_eth_desc(struct vnic_wq *wq, fnic_queue_wq_eth_desc() argument
61 struct wq_enet_desc *desc = vnic_wq_next_desc(wq); fnic_queue_wq_eth_desc()
76 vnic_wq_post(wq, os_buf, dma_addr, len, 1, 1); fnic_queue_wq_eth_desc()
79 static inline void fnic_queue_wq_copy_desc_icmnd_16(struct vnic_wq_copy *wq, fnic_queue_wq_copy_desc_icmnd_16() argument
91 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); fnic_queue_wq_copy_desc_icmnd_16()
121 vnic_wq_copy_post(wq); fnic_queue_wq_copy_desc_icmnd_16()
124 static inline void fnic_queue_wq_copy_desc_itmf(struct vnic_wq_copy *wq, fnic_queue_wq_copy_desc_itmf() argument
130 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); fnic_queue_wq_copy_desc_itmf()
147 vnic_wq_copy_post(wq); fnic_queue_wq_copy_desc_itmf()
150 static inline void fnic_queue_wq_copy_desc_flogi_reg(struct vnic_wq_copy *wq, fnic_queue_wq_copy_desc_flogi_reg() argument
154 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); fnic_queue_wq_copy_desc_flogi_reg()
166 vnic_wq_copy_post(wq); fnic_queue_wq_copy_desc_flogi_reg()
169 static inline void fnic_queue_wq_copy_desc_fip_reg(struct vnic_wq_copy *wq, fnic_queue_wq_copy_desc_fip_reg() argument
174 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); fnic_queue_wq_copy_desc_fip_reg()
190 vnic_wq_copy_post(wq); fnic_queue_wq_copy_desc_fip_reg()
193 static inline void fnic_queue_wq_copy_desc_fw_reset(struct vnic_wq_copy *wq, fnic_queue_wq_copy_desc_fw_reset() argument
196 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); fnic_queue_wq_copy_desc_fw_reset()
203 vnic_wq_copy_post(wq); fnic_queue_wq_copy_desc_fw_reset()
206 static inline void fnic_queue_wq_copy_desc_lunmap(struct vnic_wq_copy *wq, fnic_queue_wq_copy_desc_lunmap() argument
210 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); fnic_queue_wq_copy_desc_lunmap()
220 vnic_wq_copy_post(wq); fnic_queue_wq_copy_desc_lunmap()
H A Dfnic_scsi.c143 static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq) free_wq_copy_descs() argument
153 if (wq->to_clean_index <= fnic->fw_ack_index[0]) free_wq_copy_descs()
154 wq->ring.desc_avail += (fnic->fw_ack_index[0] free_wq_copy_descs()
155 - wq->to_clean_index + 1); free_wq_copy_descs()
157 wq->ring.desc_avail += (wq->ring.desc_count free_wq_copy_descs()
158 - wq->to_clean_index free_wq_copy_descs()
166 wq->to_clean_index = free_wq_copy_descs()
167 (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count; free_wq_copy_descs()
208 struct vnic_wq_copy *wq = &fnic->wq_copy[0]; fnic_fw_reset_handler() local
224 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) fnic_fw_reset_handler()
225 free_wq_copy_descs(fnic, wq); fnic_fw_reset_handler()
227 if (!vnic_wq_copy_desc_avail(wq)) fnic_fw_reset_handler()
230 fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG); fnic_fw_reset_handler()
261 struct vnic_wq_copy *wq = &fnic->wq_copy[0]; fnic_flogi_reg_handler() local
270 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) fnic_flogi_reg_handler()
271 free_wq_copy_descs(fnic, wq); fnic_flogi_reg_handler()
273 if (!vnic_wq_copy_desc_avail(wq)) { fnic_flogi_reg_handler()
287 fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG, fnic_flogi_reg_handler()
295 fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG, fnic_flogi_reg_handler()
315 * Routine to enqueue a wq copy desc
318 struct vnic_wq_copy *wq, fnic_queue_wq_copy_desc()
361 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) fnic_queue_wq_copy_desc()
362 free_wq_copy_descs(fnic, wq); fnic_queue_wq_copy_desc()
364 if (unlikely(!vnic_wq_copy_desc_avail(wq))) { fnic_queue_wq_copy_desc()
383 fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag, fnic_queue_wq_copy_desc()
420 struct vnic_wq_copy *wq; fnic_queuecommand_lck() local
530 /* create copy wq desc and enqueue it */ fnic_queuecommand_lck()
531 wq = &fnic->wq_copy[0]; fnic_queuecommand_lck()
532 ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count); fnic_queuecommand_lck()
728 static inline int is_ack_index_in_range(struct vnic_wq_copy *wq, is_ack_index_in_range() argument
731 if (wq->to_clean_index <= wq->to_use_index) { is_ack_index_in_range()
733 if (request_out < wq->to_clean_index || is_ack_index_in_range()
734 request_out >= wq->to_use_index) is_ack_index_in_range()
738 if (request_out < wq->to_clean_index && is_ack_index_in_range()
739 request_out >= wq->to_use_index) is_ack_index_in_range()
757 struct vnic_wq_copy *wq; fnic_fcpio_ack_handler() local
763 wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count]; fnic_fcpio_ack_handler()
767 if (is_ack_index_in_range(wq, request_out)) { fnic_fcpio_ack_handler()
1226 case FCPIO_ACK: /* fw copied copy wq desc to its queue */ fnic_fcpio_cmpl_handler()
1259 * Routine to process wq copy
1361 void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, fnic_wq_copy_cleanup_handler() argument
1365 struct fnic *fnic = vnic_dev_priv(wq->vdev); fnic_wq_copy_cleanup_handler()
1426 struct vnic_wq_copy *wq = &fnic->wq_copy[0]; fnic_queue_abort_io_req() local
1442 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) fnic_queue_abort_io_req()
1443 free_wq_copy_descs(fnic, wq); fnic_queue_abort_io_req()
1445 if (!vnic_wq_copy_desc_avail(wq)) { fnic_queue_abort_io_req()
1453 fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT, fnic_queue_abort_io_req()
1949 struct vnic_wq_copy *wq = &fnic->wq_copy[0]; fnic_queue_dr_io_req() local
1967 if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0]) fnic_queue_dr_io_req()
1968 free_wq_copy_descs(fnic, wq); fnic_queue_dr_io_req()
1970 if (!vnic_wq_copy_desc_avail(wq)) { fnic_queue_dr_io_req()
1981 fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST, fnic_queue_dr_io_req()
317 fnic_queue_wq_copy_desc(struct fnic *fnic, struct vnic_wq_copy *wq, struct fnic_io_req *io_req, struct scsi_cmnd *sc, int sg_count) fnic_queue_wq_copy_desc() argument
H A Dfnic_res.c148 "wq/wq_copy/rq %d/%d/%d\n", fnic_get_vnic_config()
215 vnic_wq_free(&fnic->wq[i]); fnic_free_vnic_resources()
250 "wq %d cp_wq %d raw_wq %d rq %d cq %d intr %d\n", fnic_alloc_vnic_resources()
256 err = vnic_wq_alloc(fnic->vdev, &fnic->wq[i], i, fnic_alloc_vnic_resources()
338 * Note for copy wq we always initialize with cq_index = 0 fnic_alloc_vnic_resources()
365 vnic_wq_init(&fnic->wq[i], fnic_alloc_vnic_resources()
H A Dfnic_fcs.c979 struct vnic_wq *wq = &fnic->wq[0]; fnic_eth_send() local
1007 if (!vnic_wq_desc_avail(wq)) { fnic_eth_send()
1014 fnic_queue_wq_eth_desc(wq, skb, pa, skb->len, fnic_eth_send()
1025 struct vnic_wq *wq = &fnic->wq[0]; fnic_send_frame() local
1081 if (!vnic_wq_desc_avail(wq)) { fnic_send_frame()
1088 fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp), fnic_send_frame()
1188 static void fnic_wq_complete_frame_send(struct vnic_wq *wq, fnic_wq_complete_frame_send() argument
1194 struct fnic *fnic = vnic_dev_priv(wq->vdev); fnic_wq_complete_frame_send()
1211 vnic_wq_service(&fnic->wq[q_number], cq_desc, completed_index, fnic_wq_cmpl_handler_cont()
1234 void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) fnic_free_wq_buf() argument
1237 struct fnic *fnic = vnic_dev_priv(wq->vdev); fnic_free_wq_buf()
H A Dfnic_isr.c197 "%.11s-fcs-wq", fnic->name); fnic_request_intr()
202 "%.11s-scsi-wq", fnic->name); fnic_request_intr()
238 unsigned int m = ARRAY_SIZE(fnic->wq); fnic_set_intr_mode()
H A Dfnic.h305 ____cacheline_aligned struct vnic_wq wq[FNIC_WQ_MAX]; member in struct:fnic
330 void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
355 void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
/linux-4.1.27/fs/btrfs/
H A Dasync-thread.c132 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
168 static inline void thresh_queue_hook(struct __btrfs_workqueue *wq) thresh_queue_hook() argument
170 if (wq->thresh == NO_THRESHOLD) thresh_queue_hook()
172 atomic_inc(&wq->pending); thresh_queue_hook()
180 static inline void thresh_exec_hook(struct __btrfs_workqueue *wq) thresh_exec_hook() argument
186 if (wq->thresh == NO_THRESHOLD) thresh_exec_hook()
189 atomic_dec(&wq->pending); thresh_exec_hook()
190 spin_lock(&wq->thres_lock); thresh_exec_hook()
192 * Use wq->count to limit the calling frequency of thresh_exec_hook()
195 wq->count++; thresh_exec_hook()
196 wq->count %= (wq->thresh / 4); thresh_exec_hook()
197 if (!wq->count) thresh_exec_hook()
199 new_max_active = wq->current_max; thresh_exec_hook()
205 pending = atomic_read(&wq->pending); thresh_exec_hook()
206 if (pending > wq->thresh) thresh_exec_hook()
208 if (pending < wq->thresh / 2) thresh_exec_hook()
210 new_max_active = clamp_val(new_max_active, 1, wq->max_active); thresh_exec_hook()
211 if (new_max_active != wq->current_max) { thresh_exec_hook()
213 wq->current_max = new_max_active; thresh_exec_hook()
216 spin_unlock(&wq->thres_lock); thresh_exec_hook()
219 workqueue_set_max_active(wq->normal_wq, wq->current_max); thresh_exec_hook()
223 static void run_ordered_work(struct __btrfs_workqueue *wq) run_ordered_work() argument
225 struct list_head *list = &wq->ordered_list; run_ordered_work()
227 spinlock_t *lock = &wq->list_lock; run_ordered_work()
268 struct __btrfs_workqueue *wq; normal_work_helper() local
281 wq = work->wq; normal_work_helper()
284 thresh_exec_hook(wq); normal_work_helper()
288 run_ordered_work(wq); normal_work_helper()
307 static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq, __btrfs_queue_work() argument
312 work->wq = wq; __btrfs_queue_work()
313 thresh_queue_hook(wq); __btrfs_queue_work()
315 spin_lock_irqsave(&wq->list_lock, flags); __btrfs_queue_work()
316 list_add_tail(&work->ordered_list, &wq->ordered_list); __btrfs_queue_work()
317 spin_unlock_irqrestore(&wq->list_lock, flags); __btrfs_queue_work()
320 queue_work(wq->normal_wq, &work->normal_work); __btrfs_queue_work()
323 void btrfs_queue_work(struct btrfs_workqueue *wq, btrfs_queue_work() argument
328 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high) btrfs_queue_work()
329 dest_wq = wq->high; btrfs_queue_work()
331 dest_wq = wq->normal; btrfs_queue_work()
336 __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq) __btrfs_destroy_workqueue() argument
338 destroy_workqueue(wq->normal_wq); __btrfs_destroy_workqueue()
339 trace_btrfs_workqueue_destroy(wq); __btrfs_destroy_workqueue()
340 kfree(wq); __btrfs_destroy_workqueue()
343 void btrfs_destroy_workqueue(struct btrfs_workqueue *wq) btrfs_destroy_workqueue() argument
345 if (!wq) btrfs_destroy_workqueue()
347 if (wq->high) btrfs_destroy_workqueue()
348 __btrfs_destroy_workqueue(wq->high); btrfs_destroy_workqueue()
349 __btrfs_destroy_workqueue(wq->normal); btrfs_destroy_workqueue()
350 kfree(wq); btrfs_destroy_workqueue()
353 void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max) btrfs_workqueue_set_max() argument
355 if (!wq) btrfs_workqueue_set_max()
357 wq->normal->max_active = max; btrfs_workqueue_set_max()
358 if (wq->high) btrfs_workqueue_set_max()
359 wq->high->max_active = max; btrfs_workqueue_set_max()
H A Dasync-thread.h39 struct __btrfs_workqueue *wq; member in struct:btrfs_work
76 void btrfs_queue_work(struct btrfs_workqueue *wq,
78 void btrfs_destroy_workqueue(struct btrfs_workqueue *wq);
79 void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max);
/linux-4.1.27/fs/autofs4/
H A Dwaitq.c29 struct autofs_wait_queue *wq, *nwq; autofs4_catatonic_mode() local
40 wq = sbi->queues; autofs4_catatonic_mode()
42 while (wq) { autofs4_catatonic_mode()
43 nwq = wq->next; autofs4_catatonic_mode()
44 wq->status = -ENOENT; /* Magic is gone - report failure */ autofs4_catatonic_mode()
45 kfree(wq->name.name); autofs4_catatonic_mode()
46 wq->name.name = NULL; autofs4_catatonic_mode()
47 wq->wait_ctr--; autofs4_catatonic_mode()
48 wake_up_interruptible(&wq->queue); autofs4_catatonic_mode()
49 wq = nwq; autofs4_catatonic_mode()
94 struct autofs_wait_queue *wq, autofs4_notify_daemon()
106 (unsigned long) wq->wait_queue_token, wq->name.len, wq->name.name, type); autofs4_notify_daemon()
121 mp->wait_queue_token = wq->wait_queue_token; autofs4_notify_daemon()
122 mp->len = wq->name.len; autofs4_notify_daemon()
123 memcpy(mp->name, wq->name.name, wq->name.len); autofs4_notify_daemon()
124 mp->name[wq->name.len] = '\0'; autofs4_notify_daemon()
133 ep->wait_queue_token = wq->wait_queue_token; autofs4_notify_daemon()
134 ep->len = wq->name.len; autofs4_notify_daemon()
135 memcpy(ep->name, wq->name.name, wq->name.len); autofs4_notify_daemon()
136 ep->name[wq->name.len] = '\0'; autofs4_notify_daemon()
153 packet->wait_queue_token = wq->wait_queue_token; autofs4_notify_daemon()
154 packet->len = wq->name.len; autofs4_notify_daemon()
155 memcpy(packet->name, wq->name.name, wq->name.len); autofs4_notify_daemon()
156 packet->name[wq->name.len] = '\0'; autofs4_notify_daemon()
157 packet->dev = wq->dev; autofs4_notify_daemon()
158 packet->ino = wq->ino; autofs4_notify_daemon()
159 packet->uid = from_kuid_munged(user_ns, wq->uid); autofs4_notify_daemon()
160 packet->gid = from_kgid_munged(user_ns, wq->gid); autofs4_notify_daemon()
161 packet->pid = wq->pid; autofs4_notify_daemon()
162 packet->tgid = wq->tgid; autofs4_notify_daemon()
228 struct autofs_wait_queue *wq; autofs4_find_wait() local
230 for (wq = sbi->queues; wq; wq = wq->next) { autofs4_find_wait()
231 if (wq->name.hash == qstr->hash && autofs4_find_wait()
232 wq->name.len == qstr->len && autofs4_find_wait()
233 wq->name.name && autofs4_find_wait()
234 !memcmp(wq->name.name, qstr->name, qstr->len)) autofs4_find_wait()
237 return wq; autofs4_find_wait()
253 struct autofs_wait_queue *wq; validate_request() local
260 wq = autofs4_find_wait(sbi, qstr); validate_request()
261 if (wq) { validate_request()
262 *wait = wq; validate_request()
294 wq = autofs4_find_wait(sbi, qstr); validate_request()
295 if (wq) { validate_request()
296 *wait = wq; validate_request()
346 struct autofs_wait_queue *wq; autofs4_wait() local
404 ret = validate_request(&wq, sbi, &qstr, dentry, notify); autofs4_wait()
412 if (!wq) { autofs4_wait()
414 wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL); autofs4_wait()
415 if (!wq) { autofs4_wait()
421 wq->wait_queue_token = autofs4_next_wait_queue; autofs4_wait()
424 wq->next = sbi->queues; autofs4_wait()
425 sbi->queues = wq; autofs4_wait()
426 init_waitqueue_head(&wq->queue); autofs4_wait()
427 memcpy(&wq->name, &qstr, sizeof(struct qstr)); autofs4_wait()
428 wq->dev = autofs4_get_dev(sbi); autofs4_wait()
429 wq->ino = autofs4_get_ino(sbi); autofs4_wait()
430 wq->uid = current_uid(); autofs4_wait()
431 wq->gid = current_gid(); autofs4_wait()
432 wq->pid = pid; autofs4_wait()
433 wq->tgid = tgid; autofs4_wait()
434 wq->status = -EINTR; /* Status return if interrupted */ autofs4_wait()
435 wq->wait_ctr = 2; autofs4_wait()
454 (unsigned long) wq->wait_queue_token, wq->name.len, autofs4_wait()
455 wq->name.name, notify); autofs4_wait()
458 autofs4_notify_daemon(sbi, wq, type); autofs4_wait()
460 wq->wait_ctr++; autofs4_wait()
462 (unsigned long) wq->wait_queue_token, wq->name.len, autofs4_wait()
463 wq->name.name, notify); autofs4_wait()
469 * wq->name.name is NULL iff the lock is already released autofs4_wait()
472 if (wq->name.name) { autofs4_wait()
483 wait_event_interruptible(wq->queue, wq->name.name == NULL); autofs4_wait()
493 status = wq->status; autofs4_wait()
520 ino->uid = wq->uid; autofs4_wait()
521 ino->gid = wq->gid; autofs4_wait()
531 if (!--wq->wait_ctr) autofs4_wait()
532 kfree(wq); autofs4_wait()
541 struct autofs_wait_queue *wq, **wql; autofs4_wait_release() local
544 for (wql = &sbi->queues; (wq = *wql) != NULL; wql = &wq->next) { autofs4_wait_release()
545 if (wq->wait_queue_token == wait_queue_token) autofs4_wait_release()
549 if (!wq) { autofs4_wait_release()
554 *wql = wq->next; /* Unlink from chain */ autofs4_wait_release()
555 kfree(wq->name.name); autofs4_wait_release()
556 wq->name.name = NULL; /* Do not wait on this queue */ autofs4_wait_release()
557 wq->status = status; autofs4_wait_release()
558 wake_up_interruptible(&wq->queue); autofs4_wait_release()
559 if (!--wq->wait_ctr) autofs4_wait_release()
560 kfree(wq); autofs4_wait_release()
93 autofs4_notify_daemon(struct autofs_sb_info *sbi, struct autofs_wait_queue *wq, int type) autofs4_notify_daemon() argument
/linux-4.1.27/drivers/infiniband/hw/cxgb4/
H A Dt4.h340 static inline int t4_rqes_posted(struct t4_wq *wq) t4_rqes_posted() argument
342 return wq->rq.in_use; t4_rqes_posted()
345 static inline int t4_rq_empty(struct t4_wq *wq) t4_rq_empty() argument
347 return wq->rq.in_use == 0; t4_rq_empty()
350 static inline int t4_rq_full(struct t4_wq *wq) t4_rq_full() argument
352 return wq->rq.in_use == (wq->rq.size - 1); t4_rq_full()
355 static inline u32 t4_rq_avail(struct t4_wq *wq) t4_rq_avail() argument
357 return wq->rq.size - 1 - wq->rq.in_use; t4_rq_avail()
360 static inline void t4_rq_produce(struct t4_wq *wq, u8 len16) t4_rq_produce() argument
362 wq->rq.in_use++; t4_rq_produce()
363 if (++wq->rq.pidx == wq->rq.size) t4_rq_produce()
364 wq->rq.pidx = 0; t4_rq_produce()
365 wq->rq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); t4_rq_produce()
366 if (wq->rq.wq_pidx >= wq->rq.size * T4_RQ_NUM_SLOTS) t4_rq_produce()
367 wq->rq.wq_pidx %= wq->rq.size * T4_RQ_NUM_SLOTS; t4_rq_produce()
370 static inline void t4_rq_consume(struct t4_wq *wq) t4_rq_consume() argument
372 wq->rq.in_use--; t4_rq_consume()
373 wq->rq.msn++; t4_rq_consume()
374 if (++wq->rq.cidx == wq->rq.size) t4_rq_consume()
375 wq->rq.cidx = 0; t4_rq_consume()
378 static inline u16 t4_rq_host_wq_pidx(struct t4_wq *wq) t4_rq_host_wq_pidx() argument
380 return wq->rq.queue[wq->rq.size].status.host_wq_pidx; t4_rq_host_wq_pidx()
383 static inline u16 t4_rq_wq_size(struct t4_wq *wq) t4_rq_wq_size() argument
385 return wq->rq.size * T4_RQ_NUM_SLOTS; t4_rq_wq_size()
393 static inline int t4_sq_empty(struct t4_wq *wq) t4_sq_empty() argument
395 return wq->sq.in_use == 0; t4_sq_empty()
398 static inline int t4_sq_full(struct t4_wq *wq) t4_sq_full() argument
400 return wq->sq.in_use == (wq->sq.size - 1); t4_sq_full()
403 static inline u32 t4_sq_avail(struct t4_wq *wq) t4_sq_avail() argument
405 return wq->sq.size - 1 - wq->sq.in_use; t4_sq_avail()
408 static inline void t4_sq_produce(struct t4_wq *wq, u8 len16) t4_sq_produce() argument
410 wq->sq.in_use++; t4_sq_produce()
411 if (++wq->sq.pidx == wq->sq.size) t4_sq_produce()
412 wq->sq.pidx = 0; t4_sq_produce()
413 wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); t4_sq_produce()
414 if (wq->sq.wq_pidx >= wq->sq.size * T4_SQ_NUM_SLOTS) t4_sq_produce()
415 wq->sq.wq_pidx %= wq->sq.size * T4_SQ_NUM_SLOTS; t4_sq_produce()
418 static inline void t4_sq_consume(struct t4_wq *wq) t4_sq_consume() argument
420 BUG_ON(wq->sq.in_use < 1); t4_sq_consume()
421 if (wq->sq.cidx == wq->sq.flush_cidx) t4_sq_consume()
422 wq->sq.flush_cidx = -1; t4_sq_consume()
423 wq->sq.in_use--; t4_sq_consume()
424 if (++wq->sq.cidx == wq->sq.size) t4_sq_consume()
425 wq->sq.cidx = 0; t4_sq_consume()
428 static inline u16 t4_sq_host_wq_pidx(struct t4_wq *wq) t4_sq_host_wq_pidx() argument
430 return wq->sq.queue[wq->sq.size].status.host_wq_pidx; t4_sq_host_wq_pidx()
433 static inline u16 t4_sq_wq_size(struct t4_wq *wq) t4_sq_wq_size() argument
435 return wq->sq.size * T4_SQ_NUM_SLOTS; t4_sq_wq_size()
454 static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, u8 t5, t4_ring_sq_db() argument
462 PDBG("%s: WC wq->sq.pidx = %d\n", t4_ring_sq_db()
463 __func__, wq->sq.pidx); t4_ring_sq_db()
464 pio_copy(wq->sq.udb + 7, (void *)wqe); t4_ring_sq_db()
466 PDBG("%s: DB wq->sq.pidx = %d\n", t4_ring_sq_db()
467 __func__, wq->sq.pidx); t4_ring_sq_db()
468 writel(PIDX_T5_V(inc), wq->sq.udb); t4_ring_sq_db()
475 writel(QID_V(wq->sq.qid) | PIDX_V(inc), wq->db); t4_ring_sq_db()
478 static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, u8 t5, t4_ring_rq_db() argument
486 PDBG("%s: WC wq->rq.pidx = %d\n", t4_ring_rq_db()
487 __func__, wq->rq.pidx); t4_ring_rq_db()
488 pio_copy(wq->rq.udb + 7, (void *)wqe); t4_ring_rq_db()
490 PDBG("%s: DB wq->rq.pidx = %d\n", t4_ring_rq_db()
491 __func__, wq->rq.pidx); t4_ring_rq_db()
492 writel(PIDX_T5_V(inc), wq->rq.udb); t4_ring_rq_db()
499 writel(QID_V(wq->rq.qid) | PIDX_V(inc), wq->db); t4_ring_rq_db()
502 static inline int t4_wq_in_error(struct t4_wq *wq) t4_wq_in_error() argument
504 return wq->rq.queue[wq->rq.size].status.qp_err; t4_wq_in_error()
507 static inline void t4_set_wq_in_error(struct t4_wq *wq) t4_set_wq_in_error() argument
509 wq->rq.queue[wq->rq.size].status.qp_err = 1; t4_set_wq_in_error()
512 static inline void t4_disable_wq_db(struct t4_wq *wq) t4_disable_wq_db() argument
514 wq->rq.queue[wq->rq.size].status.db_off = 1; t4_disable_wq_db()
517 static inline void t4_enable_wq_db(struct t4_wq *wq) t4_enable_wq_db() argument
519 wq->rq.queue[wq->rq.size].status.db_off = 0; t4_enable_wq_db()
522 static inline int t4_wq_db_enabled(struct t4_wq *wq) t4_wq_db_enabled() argument
524 return !wq->rq.queue[wq->rq.size].status.db_off; t4_wq_db_enabled()
H A Dcq.c185 static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq) insert_recv_cqe() argument
189 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__, insert_recv_cqe()
190 wq, cq, cq->sw_cidx, cq->sw_pidx); insert_recv_cqe()
196 CQE_QPID_V(wq->sq.qid)); insert_recv_cqe()
202 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count) c4iw_flush_rq() argument
205 int in_use = wq->rq.in_use - count; c4iw_flush_rq()
208 PDBG("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__, c4iw_flush_rq()
209 wq, cq, wq->rq.in_use, count); c4iw_flush_rq()
211 insert_recv_cqe(wq, cq); c4iw_flush_rq()
217 static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq, insert_sq_cqe() argument
222 PDBG("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__, insert_sq_cqe()
223 wq, cq, cq->sw_cidx, cq->sw_pidx); insert_sq_cqe()
229 CQE_QPID_V(wq->sq.qid)); insert_sq_cqe()
236 static void advance_oldest_read(struct t4_wq *wq);
241 struct t4_wq *wq = &qhp->wq; c4iw_flush_sq() local
247 if (wq->sq.flush_cidx == -1) c4iw_flush_sq()
248 wq->sq.flush_cidx = wq->sq.cidx; c4iw_flush_sq()
249 idx = wq->sq.flush_cidx; c4iw_flush_sq()
250 BUG_ON(idx >= wq->sq.size); c4iw_flush_sq()
251 while (idx != wq->sq.pidx) { c4iw_flush_sq()
252 swsqe = &wq->sq.sw_sq[idx]; c4iw_flush_sq()
255 insert_sq_cqe(wq, cq, swsqe); c4iw_flush_sq()
256 if (wq->sq.oldest_read == swsqe) { c4iw_flush_sq()
258 advance_oldest_read(wq); c4iw_flush_sq()
261 if (++idx == wq->sq.size) c4iw_flush_sq()
264 wq->sq.flush_cidx += flushed; c4iw_flush_sq()
265 if (wq->sq.flush_cidx >= wq->sq.size) c4iw_flush_sq()
266 wq->sq.flush_cidx -= wq->sq.size; c4iw_flush_sq()
270 static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq) flush_completed_wrs() argument
275 if (wq->sq.flush_cidx == -1) flush_completed_wrs()
276 wq->sq.flush_cidx = wq->sq.cidx; flush_completed_wrs()
277 cidx = wq->sq.flush_cidx; flush_completed_wrs()
278 BUG_ON(cidx > wq->sq.size); flush_completed_wrs()
280 while (cidx != wq->sq.pidx) { flush_completed_wrs()
281 swsqe = &wq->sq.sw_sq[cidx]; flush_completed_wrs()
283 if (++cidx == wq->sq.size) flush_completed_wrs()
298 if (++cidx == wq->sq.size) flush_completed_wrs()
300 wq->sq.flush_cidx = cidx; flush_completed_wrs()
306 static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe, create_read_req_cqe() argument
309 read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx; create_read_req_cqe()
310 read_cqe->len = htonl(wq->sq.oldest_read->read_len); create_read_req_cqe()
318 static void advance_oldest_read(struct t4_wq *wq) advance_oldest_read() argument
321 u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1; advance_oldest_read()
323 if (rptr == wq->sq.size) advance_oldest_read()
325 while (rptr != wq->sq.pidx) { advance_oldest_read()
326 wq->sq.oldest_read = &wq->sq.sw_sq[rptr]; advance_oldest_read()
328 if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ) advance_oldest_read()
330 if (++rptr == wq->sq.size) advance_oldest_read()
333 wq->sq.oldest_read = NULL; advance_oldest_read()
385 if (!qhp->wq.sq.oldest_read->signaled) { c4iw_flush_hw_cq()
386 advance_oldest_read(&qhp->wq); c4iw_flush_hw_cq()
394 create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe); c4iw_flush_hw_cq()
396 advance_oldest_read(&qhp->wq); c4iw_flush_hw_cq()
403 swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)]; c4iw_flush_hw_cq()
406 flush_completed_wrs(&qhp->wq, &chp->cq); c4iw_flush_hw_cq()
419 static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) cqe_completes_wr() argument
430 if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq)) cqe_completes_wr()
435 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count) c4iw_count_rcqes() argument
446 (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq)) c4iw_count_rcqes()
459 * supply the wq assicated with the qpid.
470 static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, poll_cq() argument
492 if (wq == NULL) { poll_cq()
498 * skip hw cqe's if the wq is flushed. poll_cq()
500 if (wq->flushed && !SW_CQE(hw_cqe)) { poll_cq()
528 t4_set_wq_in_error(wq); poll_cq()
539 t4_set_wq_in_error(wq); poll_cq()
547 if (!wq->sq.oldest_read->signaled) { poll_cq()
548 advance_oldest_read(wq); poll_cq()
557 create_read_req_cqe(wq, hw_cqe, &read_cqe); poll_cq()
559 advance_oldest_read(wq); poll_cq()
562 if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) { poll_cq()
564 t4_set_wq_in_error(wq); poll_cq()
575 * then we complete this with T4_ERR_MSN and mark the wq in poll_cq()
579 if (t4_rq_empty(wq)) { poll_cq()
580 t4_set_wq_in_error(wq); poll_cq()
584 if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) { poll_cq()
585 t4_set_wq_in_error(wq); poll_cq()
603 if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) { poll_cq()
608 swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)]; poll_cq()
624 BUG_ON(idx >= wq->sq.size); poll_cq()
634 if (idx < wq->sq.cidx) poll_cq()
635 wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx; poll_cq()
637 wq->sq.in_use -= idx - wq->sq.cidx; poll_cq()
638 BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size); poll_cq()
640 wq->sq.cidx = (uint16_t)idx; poll_cq()
641 PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx); poll_cq()
642 *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id; poll_cq()
644 c4iw_log_wr_stats(wq, hw_cqe); poll_cq()
645 t4_sq_consume(wq); poll_cq()
647 PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx); poll_cq()
648 *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id; poll_cq()
649 BUG_ON(t4_rq_empty(wq)); poll_cq()
651 c4iw_log_wr_stats(wq, hw_cqe); poll_cq()
652 t4_rq_consume(wq); poll_cq()
660 flush_completed_wrs(wq, cq); poll_cq()
688 struct t4_wq *wq; c4iw_poll_cq_one() local
701 wq = NULL; c4iw_poll_cq_one()
704 wq = &(qhp->wq); c4iw_poll_cq_one()
706 ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit); c4iw_poll_cq_one()
823 if (wq) c4iw_poll_cq_one()
H A Dqp.c149 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, destroy_qp() argument
157 wq->rq.memsize, wq->rq.queue, destroy_qp()
158 dma_unmap_addr(&wq->rq, mapping)); destroy_qp()
159 dealloc_sq(rdev, &wq->sq); destroy_qp()
160 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); destroy_qp()
161 kfree(wq->rq.sw_rq); destroy_qp()
162 kfree(wq->sq.sw_sq); destroy_qp()
163 c4iw_put_qpid(rdev, wq->rq.qid, uctx); destroy_qp()
164 c4iw_put_qpid(rdev, wq->sq.qid, uctx); destroy_qp()
168 static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, create_qp() argument
181 wq->sq.qid = c4iw_get_qpid(rdev, uctx); create_qp()
182 if (!wq->sq.qid) create_qp()
185 wq->rq.qid = c4iw_get_qpid(rdev, uctx); create_qp()
186 if (!wq->rq.qid) { create_qp()
192 wq->sq.sw_sq = kzalloc(wq->sq.size * sizeof *wq->sq.sw_sq, create_qp()
194 if (!wq->sq.sw_sq) { create_qp()
199 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq, create_qp()
201 if (!wq->rq.sw_rq) { create_qp()
210 wq->rq.rqt_size = roundup_pow_of_two(max_t(u16, wq->rq.size, 16)); create_qp()
211 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size); create_qp()
212 if (!wq->rq.rqt_hwaddr) { create_qp()
217 ret = alloc_sq(rdev, &wq->sq, user); create_qp()
220 memset(wq->sq.queue, 0, wq->sq.memsize); create_qp()
221 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); create_qp()
223 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), create_qp()
224 wq->rq.memsize, &(wq->rq.dma_addr), create_qp()
226 if (!wq->rq.queue) { create_qp()
231 __func__, wq->sq.queue, create_qp()
232 (unsigned long long)virt_to_phys(wq->sq.queue), create_qp()
233 wq->rq.queue, create_qp()
234 (unsigned long long)virt_to_phys(wq->rq.queue)); create_qp()
235 memset(wq->rq.queue, 0, wq->rq.memsize); create_qp()
236 dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr); create_qp()
238 wq->db = rdev->lldi.db_reg; create_qp()
239 wq->gts = rdev->lldi.gts_reg; create_qp()
243 off = (wq->sq.qid << rdev->qpshift) & PAGE_MASK; create_qp()
245 wq->sq.udb = (u64 __iomem *)(rdev->bar2_pa + off); create_qp()
247 off += 128 * (wq->sq.qid & rdev->qpmask) + 8; create_qp()
248 wq->sq.udb = (u64 __iomem *)(rdev->bar2_kva + off); create_qp()
250 off = (wq->rq.qid << rdev->qpshift) & PAGE_MASK; create_qp()
252 wq->rq.udb = (u64 __iomem *)(rdev->bar2_pa + off); create_qp()
254 off += 128 * (wq->rq.qid & rdev->qpmask) + 8; create_qp()
255 wq->rq.udb = (u64 __iomem *)(rdev->bar2_kva + off); create_qp()
258 wq->rdev = rdev; create_qp()
259 wq->rq.msn = 1; create_qp()
286 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + create_qp()
293 (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_ONCHIP_F : 0) | create_qp()
303 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid); create_qp()
304 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr); create_qp()
312 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + create_qp()
327 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid); create_qp()
328 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr); create_qp()
335 ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, wq->sq.qid, __func__); create_qp()
340 __func__, wq->sq.qid, wq->rq.qid, wq->db, create_qp()
341 (__force unsigned long) wq->sq.udb, create_qp()
342 (__force unsigned long) wq->rq.udb); create_qp()
347 wq->rq.memsize, wq->rq.queue, create_qp()
348 dma_unmap_addr(&wq->rq, mapping)); create_qp()
350 dealloc_sq(rdev, &wq->sq); create_qp()
352 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); create_qp()
354 kfree(wq->rq.sw_rq); create_qp()
356 kfree(wq->sq.sw_sq); create_qp()
358 c4iw_put_qpid(rdev, wq->rq.qid, uctx); create_qp()
360 c4iw_put_qpid(rdev, wq->sq.qid, uctx); create_qp()
578 ret = build_isgl((__be64 *)qhp->wq.rq.queue, build_rdma_recv()
579 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size], build_rdma_recv()
695 t4_ring_sq_db(&qhp->wq, inc, ring_kernel_sq_db()
699 qhp->wq.sq.wq_pidx_inc += inc; ring_kernel_sq_db()
713 t4_ring_rq_db(&qhp->wq, inc, ring_kernel_rq_db()
717 qhp->wq.rq.wq_pidx_inc += inc; ring_kernel_rq_db()
740 if (t4_wq_in_error(&qhp->wq)) { c4iw_post_send()
744 num_wrs = t4_sq_avail(&qhp->wq); c4iw_post_send()
755 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue + c4iw_post_send()
756 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE); c4iw_post_send()
763 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; c4iw_post_send()
774 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16); c4iw_post_send()
779 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16); c4iw_post_send()
793 if (!qhp->wq.sq.oldest_read) c4iw_post_send()
794 qhp->wq.sq.oldest_read = swsqe; c4iw_post_send()
799 err = build_fastreg(&qhp->wq.sq, wqe, wr, &len16, c4iw_post_send()
820 swsqe->idx = qhp->wq.sq.pidx; c4iw_post_send()
832 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16); c4iw_post_send()
835 __func__, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx, c4iw_post_send()
839 t4_sq_produce(&qhp->wq, len16); c4iw_post_send()
843 t4_ring_sq_db(&qhp->wq, idx, c4iw_post_send()
866 if (t4_wq_in_error(&qhp->wq)) { c4iw_post_receive()
870 num_wrs = t4_rq_avail(&qhp->wq); c4iw_post_receive()
881 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue + c4iw_post_receive()
882 qhp->wq.rq.wq_pidx * c4iw_post_receive()
893 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id; c4iw_post_receive()
895 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts = c4iw_post_receive()
899 &qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_ts); c4iw_post_receive()
904 wqe->recv.wrid = qhp->wq.rq.pidx; c4iw_post_receive()
910 (unsigned long long) wr->wr_id, qhp->wq.rq.pidx); c4iw_post_receive()
911 t4_rq_produce(&qhp->wq, len16); c4iw_post_receive()
917 t4_ring_rq_db(&qhp->wq, idx, c4iw_post_receive()
1075 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, post_terminate()
1117 if (qhp->wq.flushed) { __flush_qp()
1122 qhp->wq.flushed = 1; __flush_qp()
1125 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); __flush_qp()
1126 rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); __flush_qp()
1171 t4_set_wq_in_error(&qhp->wq); flush_qp()
1196 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, rdma_fini()
1220 qhp->wq.sq.qid, __func__); rdma_fini()
1258 qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord); rdma_init()
1302 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq)); rdma_init()
1304 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid); rdma_init()
1305 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid); rdma_init()
1306 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid); rdma_init()
1313 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size); rdma_init()
1314 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr - rdma_init()
1324 qhp->ep->hwtid, qhp->wq.sq.qid, __func__); rdma_init()
1348 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state, c4iw_modify_qp()
1437 t4_set_wq_in_error(&qhp->wq); c4iw_modify_qp()
1450 t4_set_wq_in_error(&qhp->wq); c4iw_modify_qp()
1467 t4_set_wq_in_error(&qhp->wq); c4iw_modify_qp()
1508 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) { c4iw_modify_qp()
1531 qhp->wq.sq.qid); c4iw_modify_qp()
1588 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); c4iw_destroy_qp()
1600 destroy_qp(&rhp->rdev, &qhp->wq, c4iw_destroy_qp()
1603 PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid); c4iw_destroy_qp()
1654 qhp->wq.sq.size = sqsize; c4iw_create_qp()
1655 qhp->wq.sq.memsize = c4iw_create_qp()
1657 sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64); c4iw_create_qp()
1658 qhp->wq.sq.flush_cidx = -1; c4iw_create_qp()
1659 qhp->wq.rq.size = rqsize; c4iw_create_qp()
1660 qhp->wq.rq.memsize = c4iw_create_qp()
1662 sizeof(*qhp->wq.rq.queue); c4iw_create_qp()
1665 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE); c4iw_create_qp()
1666 qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE); c4iw_create_qp()
1669 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq, c4iw_create_qp()
1700 ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid); c4iw_create_qp()
1725 if (t4_sq_onchip(&qhp->wq.sq)) { c4iw_create_qp()
1735 uresp.sqid = qhp->wq.sq.qid; c4iw_create_qp()
1736 uresp.sq_size = qhp->wq.sq.size; c4iw_create_qp()
1737 uresp.sq_memsize = qhp->wq.sq.memsize; c4iw_create_qp()
1738 uresp.rqid = qhp->wq.rq.qid; c4iw_create_qp()
1739 uresp.rq_size = qhp->wq.rq.size; c4iw_create_qp()
1740 uresp.rq_memsize = qhp->wq.rq.memsize; c4iw_create_qp()
1761 mm1->addr = qhp->wq.sq.phys_addr; c4iw_create_qp()
1762 mm1->len = PAGE_ALIGN(qhp->wq.sq.memsize); c4iw_create_qp()
1765 mm2->addr = virt_to_phys(qhp->wq.rq.queue); c4iw_create_qp()
1766 mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize); c4iw_create_qp()
1769 mm3->addr = (__force unsigned long)qhp->wq.sq.udb; c4iw_create_qp()
1773 mm4->addr = (__force unsigned long)qhp->wq.rq.udb; c4iw_create_qp()
1784 qhp->ibqp.qp_num = qhp->wq.sq.qid; c4iw_create_qp()
1789 qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize, c4iw_create_qp()
1790 attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size, c4iw_create_qp()
1791 qhp->wq.rq.memsize, attrs->cap.max_recv_wr); c4iw_create_qp()
1804 remove_handle(rhp, &rhp->qpidr, qhp->wq.sq.qid); c4iw_create_qp()
1806 destroy_qp(&rhp->rdev, &qhp->wq, c4iw_create_qp()
H A Ddevice.c117 void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe) c4iw_log_wr_stats() argument
122 if (!wq->rdev->wr_log) c4iw_log_wr_stats()
125 idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) & c4iw_log_wr_stats()
126 (wq->rdev->wr_log_size - 1); c4iw_log_wr_stats()
127 le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]); c4iw_log_wr_stats()
132 le.qid = wq->sq.qid; c4iw_log_wr_stats()
134 le.post_host_ts = wq->sq.sw_sq[wq->sq.cidx].host_ts; c4iw_log_wr_stats()
135 le.post_sge_ts = wq->sq.sw_sq[wq->sq.cidx].sge_ts; c4iw_log_wr_stats()
138 le.qid = wq->rq.qid; c4iw_log_wr_stats()
140 le.post_host_ts = wq->rq.sw_rq[wq->rq.cidx].host_ts; c4iw_log_wr_stats()
141 le.post_sge_ts = wq->rq.sw_rq[wq->rq.cidx].sge_ts; c4iw_log_wr_stats()
144 wq->rdev->wr_log[idx] = le; c4iw_log_wr_stats()
235 if (id != qp->wq.sq.qid) dump_qp()
257 qp->wq.sq.qid, qp->wq.rq.qid, dump_qp()
259 qp->wq.sq.flags & T4_SQ_ONCHIP, dump_qp()
281 qp->wq.sq.qid, qp->wq.rq.qid, dump_qp()
283 qp->wq.sq.flags & T4_SQ_ONCHIP, dump_qp()
295 qp->wq.sq.qid, qp->wq.rq.qid, dump_qp()
297 qp->wq.sq.flags & T4_SQ_ONCHIP); dump_qp()
1249 t4_disable_wq_db(&qp->wq); disable_qp_db()
1271 t4_enable_wq_db(&qp->wq); enable_qp_db()
1278 t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc, resume_rc_qp()
1280 qp->wq.sq.wq_pidx_inc = 0; resume_rc_qp()
1281 t4_ring_rq_db(&qp->wq, qp->wq.rq.wq_pidx_inc, resume_rc_qp()
1283 qp->wq.rq.wq_pidx_inc = 0; resume_rc_qp()
1385 qp->wq.sq.qid, recover_lost_dbs()
1386 t4_sq_host_wq_pidx(&qp->wq), recover_lost_dbs()
1387 t4_sq_wq_size(&qp->wq)); recover_lost_dbs()
1392 pci_name(ctx->lldi.pdev), qp->wq.sq.qid); recover_lost_dbs()
1397 qp->wq.sq.wq_pidx_inc = 0; recover_lost_dbs()
1400 qp->wq.rq.qid, recover_lost_dbs()
1401 t4_rq_host_wq_pidx(&qp->wq), recover_lost_dbs()
1402 t4_rq_wq_size(&qp->wq)); recover_lost_dbs()
1408 pci_name(ctx->lldi.pdev), qp->wq.rq.qid); recover_lost_dbs()
1413 qp->wq.rq.wq_pidx_inc = 0; recover_lost_dbs()
/linux-4.1.27/drivers/infiniband/hw/cxgb3/
H A Dcxio_hal.c275 struct t3_wq *wq, struct cxio_ucontext *uctx) cxio_create_qp()
277 int depth = 1UL << wq->size_log2; cxio_create_qp()
278 int rqsize = 1UL << wq->rq_size_log2; cxio_create_qp()
280 wq->qpid = get_qpid(rdev_p, uctx); cxio_create_qp()
281 if (!wq->qpid) cxio_create_qp()
284 wq->rq = kzalloc(depth * sizeof(struct t3_swrq), GFP_KERNEL); cxio_create_qp()
285 if (!wq->rq) cxio_create_qp()
288 wq->rq_addr = cxio_hal_rqtpool_alloc(rdev_p, rqsize); cxio_create_qp()
289 if (!wq->rq_addr) cxio_create_qp()
292 wq->sq = kzalloc(depth * sizeof(struct t3_swsq), GFP_KERNEL); cxio_create_qp()
293 if (!wq->sq) cxio_create_qp()
296 wq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), cxio_create_qp()
298 &(wq->dma_addr), GFP_KERNEL); cxio_create_qp()
299 if (!wq->queue) cxio_create_qp()
302 memset(wq->queue, 0, depth * sizeof(union t3_wr)); cxio_create_qp()
303 dma_unmap_addr_set(wq, mapping, wq->dma_addr); cxio_create_qp()
304 wq->doorbell = (void __iomem *)rdev_p->rnic_info.kdb_addr; cxio_create_qp()
306 wq->udb = (u64)rdev_p->rnic_info.udbell_physbase + cxio_create_qp()
307 (wq->qpid << rdev_p->qpshift); cxio_create_qp()
308 wq->rdev = rdev_p; cxio_create_qp()
310 wq->qpid, wq->doorbell, (unsigned long long) wq->udb); cxio_create_qp()
313 kfree(wq->sq); cxio_create_qp()
315 cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, rqsize); cxio_create_qp()
317 kfree(wq->rq); cxio_create_qp()
319 put_qpid(rdev_p, wq->qpid, uctx); cxio_create_qp()
336 int cxio_destroy_qp(struct cxio_rdev *rdev_p, struct t3_wq *wq, cxio_destroy_qp() argument
340 (1UL << (wq->size_log2)) cxio_destroy_qp()
341 * sizeof(union t3_wr), wq->queue, cxio_destroy_qp()
342 dma_unmap_addr(wq, mapping)); cxio_destroy_qp()
343 kfree(wq->sq); cxio_destroy_qp()
344 cxio_hal_rqtpool_free(rdev_p, wq->rq_addr, (1UL << wq->rq_size_log2)); cxio_destroy_qp()
345 kfree(wq->rq); cxio_destroy_qp()
346 put_qpid(rdev_p, wq->qpid, uctx); cxio_destroy_qp()
350 static void insert_recv_cqe(struct t3_wq *wq, struct t3_cq *cq) insert_recv_cqe() argument
354 PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__, insert_recv_cqe()
355 wq, cq, cq->sw_rptr, cq->sw_wptr); insert_recv_cqe()
361 V_CQE_QPID(wq->qpid) | insert_recv_cqe()
368 int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count) cxio_flush_rq() argument
373 PDBG("%s wq %p cq %p\n", __func__, wq, cq); cxio_flush_rq()
377 wq->rq_rptr, wq->rq_wptr, count); cxio_flush_rq()
378 ptr = wq->rq_rptr + count; cxio_flush_rq()
379 while (ptr++ != wq->rq_wptr) { cxio_flush_rq()
380 insert_recv_cqe(wq, cq); cxio_flush_rq()
386 static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq, insert_sq_cqe() argument
391 PDBG("%s wq %p cq %p sw_rptr 0x%x sw_wptr 0x%x\n", __func__, insert_sq_cqe()
392 wq, cq, cq->sw_rptr, cq->sw_wptr); insert_sq_cqe()
398 V_CQE_QPID(wq->qpid) | insert_sq_cqe()
407 int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count) cxio_flush_sq() argument
411 struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2); cxio_flush_sq()
413 ptr = wq->sq_rptr + count; cxio_flush_sq()
414 sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); cxio_flush_sq()
415 while (ptr != wq->sq_wptr) { cxio_flush_sq()
417 insert_sq_cqe(wq, cq, sqp); cxio_flush_sq()
419 sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); cxio_flush_sq()
446 static int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq) cqe_completes_wr() argument
458 Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) cqe_completes_wr()
464 void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count) cxio_count_scqes() argument
474 ((CQE_OPCODE(*cqe) == T3_READ_RESP) && wq->oldest_read)) && cxio_count_scqes()
475 (CQE_QPID(*cqe) == wq->qpid)) cxio_count_scqes()
482 void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count) cxio_count_rcqes() argument
493 (CQE_QPID(*cqe) == wq->qpid) && cqe_completes_wr(cqe, wq)) cxio_count_rcqes()
1073 static void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq) flush_completed_wrs() argument
1076 __u32 ptr = wq->sq_rptr; flush_completed_wrs()
1077 int count = Q_COUNT(wq->sq_rptr, wq->sq_wptr); flush_completed_wrs()
1079 sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); flush_completed_wrs()
1083 sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); flush_completed_wrs()
1090 __func__, Q_PTR2IDX(ptr, wq->sq_size_log2), flush_completed_wrs()
1102 static void create_read_req_cqe(struct t3_wq *wq, struct t3_cqe *hw_cqe, create_read_req_cqe() argument
1105 read_cqe->u.scqe.wrid_hi = wq->oldest_read->sq_wptr; create_read_req_cqe()
1106 read_cqe->len = wq->oldest_read->read_len; create_read_req_cqe()
1116 static void advance_oldest_read(struct t3_wq *wq) advance_oldest_read() argument
1119 u32 rptr = wq->oldest_read - wq->sq + 1; advance_oldest_read()
1120 u32 wptr = Q_PTR2IDX(wq->sq_wptr, wq->sq_size_log2); advance_oldest_read()
1122 while (Q_PTR2IDX(rptr, wq->sq_size_log2) != wptr) { advance_oldest_read()
1123 wq->oldest_read = wq->sq + Q_PTR2IDX(rptr, wq->sq_size_log2); advance_oldest_read()
1125 if (wq->oldest_read->opcode == T3_READ_REQ) advance_oldest_read()
1129 wq->oldest_read = NULL; advance_oldest_read()
1137 * supply the wq assicated with the qpid.
1147 int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe, cxio_poll_cq() argument
1167 if (wq == NULL) { cxio_poll_cq()
1186 if (!wq->oldest_read) { cxio_poll_cq()
1188 wq->error = 1; cxio_poll_cq()
1197 create_read_req_cqe(wq, hw_cqe, &read_cqe); cxio_poll_cq()
1199 advance_oldest_read(wq); cxio_poll_cq()
1207 wq->error = 1; cxio_poll_cq()
1211 if (CQE_STATUS(*hw_cqe) || wq->error) { cxio_poll_cq()
1212 *cqe_flushed = wq->error; cxio_poll_cq()
1213 wq->error = 1; cxio_poll_cq()
1233 Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) { cxio_poll_cq()
1249 * then we complete this with TPT_ERR_MSN and mark the wq in cxio_poll_cq()
1253 if (Q_EMPTY(wq->rq_rptr, wq->rq_wptr)) { cxio_poll_cq()
1254 wq->error = 1; cxio_poll_cq()
1259 if (unlikely((CQE_WRID_MSN(*hw_cqe) != (wq->rq_rptr + 1)))) { cxio_poll_cq()
1260 wq->error = 1; cxio_poll_cq()
1278 if (!SW_CQE(*hw_cqe) && (CQE_WRID_SQ_WPTR(*hw_cqe) != wq->sq_rptr)) { cxio_poll_cq()
1283 Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2)); cxio_poll_cq()
1284 sqp = wq->sq + cxio_poll_cq()
1285 Q_PTR2IDX(CQE_WRID_SQ_WPTR(*hw_cqe), wq->sq_size_log2); cxio_poll_cq()
1300 wq->sq_rptr = CQE_WRID_SQ_WPTR(*hw_cqe); cxio_poll_cq()
1302 Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2)); cxio_poll_cq()
1303 *cookie = wq->sq[Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2)].wr_id; cxio_poll_cq()
1304 wq->sq_rptr++; cxio_poll_cq()
1307 Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)); cxio_poll_cq()
1308 *cookie = wq->rq[Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)].wr_id; cxio_poll_cq()
1309 if (wq->rq[Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)].pbl_addr) cxio_poll_cq()
1310 cxio_hal_pblpool_free(wq->rdev, cxio_poll_cq()
1311 wq->rq[Q_PTR2IDX(wq->rq_rptr, cxio_poll_cq()
1312 wq->rq_size_log2)].pbl_addr, T3_STAG0_PBL_SIZE); cxio_poll_cq()
1313 BUG_ON(Q_EMPTY(wq->rq_rptr, wq->rq_wptr)); cxio_poll_cq()
1314 wq->rq_rptr++; cxio_poll_cq()
1321 flush_completed_wrs(wq, cq); cxio_poll_cq()
274 cxio_create_qp(struct cxio_rdev *rdev_p, u32 kernel_domain, struct t3_wq *wq, struct cxio_ucontext *uctx) cxio_create_qp() argument
H A Diwch_qp.c150 u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq) build_fastreg()
174 wqe = (union t3_wr *)(wq->queue + build_fastreg()
175 Q_PTR2IDX((wq->wptr+1), wq->size_log2)); build_fastreg()
177 Q_GENBIT(wq->wptr + 1, wq->size_log2), build_fastreg()
280 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr, build_rdma_recv()
281 qhp->wq.rq_size_log2)].wr_id = wr->wr_id; build_rdma_recv()
282 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr, build_rdma_recv()
283 qhp->wq.rq_size_log2)].pbl_addr = 0; build_rdma_recv()
343 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr, build_zero_stag_recv()
344 qhp->wq.rq_size_log2)].wr_id = wr->wr_id; build_zero_stag_recv()
345 qhp->wq.rq[Q_PTR2IDX(qhp->wq.rq_wptr, build_zero_stag_recv()
346 qhp->wq.rq_size_log2)].pbl_addr = pbl_addr; build_zero_stag_recv()
372 num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr, iwch_post_send()
373 qhp->wq.sq_size_log2); iwch_post_send()
384 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2); iwch_post_send()
385 wqe = (union t3_wr *) (qhp->wq.queue + idx); iwch_post_send()
391 sqp = qhp->wq.sq + iwch_post_send()
392 Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2); iwch_post_send()
414 if (!qhp->wq.oldest_read) iwch_post_send()
415 qhp->wq.oldest_read = sqp; iwch_post_send()
420 &wr_cnt, &qhp->wq); iwch_post_send()
435 wqe->send.wrid.id0.hi = qhp->wq.sq_wptr; iwch_post_send()
438 sqp->sq_wptr = qhp->wq.sq_wptr; iwch_post_send()
443 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), iwch_post_send()
446 PDBG("%s cookie 0x%llx wq idx 0x%x swsq idx %ld opcode %d\n", iwch_post_send()
448 Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2), iwch_post_send()
452 qhp->wq.wptr += wr_cnt; iwch_post_send()
453 ++(qhp->wq.sq_wptr); iwch_post_send()
456 if (cxio_wq_db_enabled(&qhp->wq)) iwch_post_send()
457 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); iwch_post_send()
482 num_wrs = Q_FREECNT(qhp->wq.rq_rptr, qhp->wq.rq_wptr, iwch_post_receive()
483 qhp->wq.rq_size_log2) - 1; iwch_post_receive()
494 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2); iwch_post_receive()
495 wqe = (union t3_wr *) (qhp->wq.queue + idx); iwch_post_receive()
508 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), iwch_post_receive()
512 idx, qhp->wq.rq_wptr, qhp->wq.rq_rptr, wqe); iwch_post_receive()
513 ++(qhp->wq.rq_wptr); iwch_post_receive()
514 ++(qhp->wq.wptr); iwch_post_receive()
519 if (cxio_wq_db_enabled(&qhp->wq)) iwch_post_receive()
520 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); iwch_post_receive()
555 num_wrs = Q_FREECNT(qhp->wq.sq_rptr, qhp->wq.sq_wptr, iwch_bind_mw()
556 qhp->wq.sq_size_log2); iwch_bind_mw()
561 idx = Q_PTR2IDX(qhp->wq.wptr, qhp->wq.size_log2); iwch_bind_mw()
564 wqe = (union t3_wr *) (qhp->wq.queue + idx); iwch_bind_mw()
588 wqe->send.wrid.id0.hi = qhp->wq.sq_wptr; iwch_bind_mw()
589 sqp = qhp->wq.sq + Q_PTR2IDX(qhp->wq.sq_wptr, qhp->wq.sq_size_log2); iwch_bind_mw()
592 sqp->sq_wptr = qhp->wq.sq_wptr; iwch_bind_mw()
598 Q_GENBIT(qhp->wq.wptr, qhp->wq.size_log2), 0, iwch_bind_mw()
600 ++(qhp->wq.wptr); iwch_bind_mw()
601 ++(qhp->wq.sq_wptr); iwch_bind_mw()
604 if (cxio_wq_db_enabled(&qhp->wq)) iwch_bind_mw()
605 ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid); iwch_bind_mw()
822 cxio_count_rcqes(&rchp->cq, &qhp->wq, &count); __flush_qp()
823 flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count); __flush_qp()
836 cxio_count_scqes(&schp->cq, &qhp->wq, &count); __flush_qp()
837 flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count); __flush_qp()
861 cxio_set_wq_in_error(&qhp->wq); flush_qp()
884 union t3_wr *wqe = qhp->wq.queue; iwch_rqes_posted()
903 init_attr.qpid = qhp->wq.qpid; rdma_init()
907 init_attr.rq_addr = qhp->wq.rq_addr; rdma_init()
908 init_attr.rq_size = 1 << qhp->wq.rq_size_log2; rdma_init()
924 init_attr.qp_dma_addr = qhp->wq.dma_addr; rdma_init()
925 init_attr.qp_dma_size = (1UL << qhp->wq.size_log2); rdma_init()
962 qhp, qhp->wq.qpid, qhp->ep, qhp->attr.state, iwch_modify_qp()
1057 cxio_set_wq_in_error(&qhp->wq); iwch_modify_qp()
1103 if (!Q_EMPTY(qhp->wq.sq_rptr, qhp->wq.sq_wptr) || iwch_modify_qp()
1104 !Q_EMPTY(qhp->wq.rq_rptr, qhp->wq.rq_wptr)) { iwch_modify_qp()
1127 qhp->wq.qpid); iwch_modify_qp()
149 build_fastreg(union t3_wr *wqe, struct ib_send_wr *wr, u8 *flit_cnt, int *wr_cnt, struct t3_wq *wq) build_fastreg() argument
H A Dcxio_hal.h165 int cxio_create_qp(struct cxio_rdev *rdev, u32 kernel_domain, struct t3_wq *wq,
167 int cxio_destroy_qp(struct cxio_rdev *rdev, struct t3_wq *wq,
190 int cxio_flush_rq(struct t3_wq *wq, struct t3_cq *cq, int count);
191 int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count);
192 void cxio_count_rcqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
193 void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count);
195 int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
H A Diwch_cq.c49 struct t3_wq *wq; iwch_poll_cq_one() local
62 wq = NULL; iwch_poll_cq_one()
65 wq = &(qhp->wq); iwch_poll_cq_one()
67 ret = cxio_poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, iwch_poll_cq_one()
190 if (wq) iwch_poll_cq_one()
H A Diwch_provider.c872 remove_handle(rhp, &rhp->qpidr, qhp->wq.qpid); iwch_destroy_qp()
879 cxio_destroy_qp(&rhp->rdev, &qhp->wq, iwch_destroy_qp()
883 ib_qp, qhp->wq.qpid, qhp); iwch_destroy_qp()
935 * Kernel users need more wq space for fastreg WRs which can take iwch_create_qp()
947 qhp->wq.size_log2 = ilog2(wqsize); iwch_create_qp()
948 qhp->wq.rq_size_log2 = ilog2(rqsize); iwch_create_qp()
949 qhp->wq.sq_size_log2 = ilog2(sqsize); iwch_create_qp()
950 if (cxio_create_qp(&rhp->rdev, !udata, &qhp->wq, iwch_create_qp()
987 if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) { iwch_create_qp()
988 cxio_destroy_qp(&rhp->rdev, &qhp->wq, iwch_create_qp()
1011 uresp.qpid = qhp->wq.qpid; iwch_create_qp()
1012 uresp.size_log2 = qhp->wq.size_log2; iwch_create_qp()
1013 uresp.sq_size_log2 = qhp->wq.sq_size_log2; iwch_create_qp()
1014 uresp.rq_size_log2 = qhp->wq.rq_size_log2; iwch_create_qp()
1028 mm1->addr = virt_to_phys(qhp->wq.queue); iwch_create_qp()
1032 mm2->addr = qhp->wq.udb & PAGE_MASK; iwch_create_qp()
1036 qhp->ibqp.qp_num = qhp->wq.qpid; iwch_create_qp()
1041 qhp->wq.qpid, qhp, (unsigned long long) qhp->wq.dma_addr, iwch_create_qp()
1042 1 << qhp->wq.size_log2, qhp->wq.rq_addr); iwch_create_qp()
H A Diwch_ev.c66 qhp->attr.state, qhp->wq.qpid, CQE_STATUS(rsp_msg->cqe)); post_qp_event()
141 __func__, qhp->wq.qpid, qhp->ep); iwch_ev_dispatch()
145 qhp->wq.qpid); iwch_ev_dispatch()
222 CQE_STATUS(rsp_msg->cqe), qhp->wq.qpid); iwch_ev_dispatch()
H A Dcxio_wr.h698 u32 size_log2; /* total wq size */
747 static inline void cxio_set_wq_in_error(struct t3_wq *wq) cxio_set_wq_in_error() argument
749 wq->queue->wq_in_err.err |= 1; cxio_set_wq_in_error()
752 static inline void cxio_disable_wq_db(struct t3_wq *wq) cxio_disable_wq_db() argument
754 wq->queue->wq_in_err.err |= 2; cxio_disable_wq_db()
757 static inline void cxio_enable_wq_db(struct t3_wq *wq) cxio_enable_wq_db() argument
759 wq->queue->wq_in_err.err &= ~2; cxio_enable_wq_db()
762 static inline int cxio_wq_db_enabled(struct t3_wq *wq) cxio_wq_db_enabled() argument
764 return !(wq->queue->wq_in_err.err & 2); cxio_wq_db_enabled()
H A Diwch.c70 cxio_disable_wq_db(&qhp->wq); disable_qp_db()
79 ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell, qhp->wq.qpid); enable_qp_db()
80 cxio_enable_wq_db(&qhp->wq); enable_qp_db()
/linux-4.1.27/include/linux/
H A Dwait.h212 #define ___wait_event(wq, condition, state, exclusive, ret, cmd) \
225 long __int = prepare_to_wait_event(&wq, &__wait, state);\
233 abort_exclusive_wait(&wq, &__wait, \
242 finish_wait(&wq, &__wait); \
246 #define __wait_event(wq, condition) \
247 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
252 * @wq: the waitqueue to wait on
257 * the waitqueue @wq is woken up.
262 #define wait_event(wq, condition) \
267 __wait_event(wq, condition); \
270 #define __io_wait_event(wq, condition) \
271 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
277 #define io_wait_event(wq, condition) \
282 __io_wait_event(wq, condition); \
285 #define __wait_event_freezable(wq, condition) \
286 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
291 * @wq: the waitqueue to wait on
296 * @condition is checked each time the waitqueue @wq is woken up.
301 #define wait_event_freezable(wq, condition) \
306 __ret = __wait_event_freezable(wq, condition); \
310 #define __wait_event_timeout(wq, condition, timeout) \
311 ___wait_event(wq, ___wait_cond_timeout(condition), \
317 * @wq: the waitqueue to wait on
323 * the waitqueue @wq is woken up.
334 #define wait_event_timeout(wq, condition, timeout) \
339 __ret = __wait_event_timeout(wq, condition, timeout); \
343 #define __wait_event_freezable_timeout(wq, condition, timeout) \
344 ___wait_event(wq, ___wait_cond_timeout(condition), \
352 #define wait_event_freezable_timeout(wq, condition, timeout) \
357 __ret = __wait_event_freezable_timeout(wq, condition, timeout); \
361 #define __wait_event_cmd(wq, condition, cmd1, cmd2) \
362 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
367 * @wq: the waitqueue to wait on
374 * the waitqueue @wq is woken up.
379 #define wait_event_cmd(wq, condition, cmd1, cmd2) \
383 __wait_event_cmd(wq, condition, cmd1, cmd2); \
386 #define __wait_event_interruptible(wq, condition) \
387 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
392 * @wq: the waitqueue to wait on
397 * The @condition is checked each time the waitqueue @wq is woken up.
405 #define wait_event_interruptible(wq, condition) \
410 __ret = __wait_event_interruptible(wq, condition); \
414 #define __wait_event_interruptible_timeout(wq, condition, timeout) \
415 ___wait_event(wq, ___wait_cond_timeout(condition), \
421 * @wq: the waitqueue to wait on
427 * The @condition is checked each time the waitqueue @wq is woken up.
439 #define wait_event_interruptible_timeout(wq, condition, timeout) \
444 __ret = __wait_event_interruptible_timeout(wq, \
449 #define __wait_event_hrtimeout(wq, condition, timeout, state) \
462 __ret = ___wait_event(wq, condition, state, 0, 0, \
476 * @wq: the waitqueue to wait on
482 * The @condition is checked each time the waitqueue @wq is woken up.
490 #define wait_event_hrtimeout(wq, condition, timeout) \
495 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
502 * @wq: the waitqueue to wait on
508 * The @condition is checked each time the waitqueue @wq is woken up.
516 #define wait_event_interruptible_hrtimeout(wq, condition, timeout) \
521 __ret = __wait_event_hrtimeout(wq, condition, timeout, \
526 #define __wait_event_interruptible_exclusive(wq, condition) \
527 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
530 #define wait_event_interruptible_exclusive(wq, condition) \
535 __ret = __wait_event_interruptible_exclusive(wq, condition);\
540 #define __wait_event_freezable_exclusive(wq, condition) \
541 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \
544 #define wait_event_freezable_exclusive(wq, condition) \
549 __ret = __wait_event_freezable_exclusive(wq, condition);\
554 #define __wait_event_interruptible_locked(wq, condition, exclusive, irq) \
562 __add_wait_queue_tail(&(wq), &__wait); \
569 spin_unlock_irq(&(wq).lock); \
571 spin_unlock(&(wq).lock); \
574 spin_lock_irq(&(wq).lock); \
576 spin_lock(&(wq).lock); \
578 __remove_wait_queue(&(wq), &__wait); \
586 * @wq: the waitqueue to wait on
591 * The @condition is checked each time the waitqueue @wq is woken up.
593 * It must be called with wq.lock being held. This spinlock is
607 #define wait_event_interruptible_locked(wq, condition) \
609 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 0))
613 * @wq: the waitqueue to wait on
618 * The @condition is checked each time the waitqueue @wq is woken up.
620 * It must be called with wq.lock being held. This spinlock is
634 #define wait_event_interruptible_locked_irq(wq, condition) \
636 ? 0 : __wait_event_interruptible_locked(wq, condition, 0, 1))
640 * @wq: the waitqueue to wait on
645 * The @condition is checked each time the waitqueue @wq is woken up.
647 * It must be called with wq.lock being held. This spinlock is
665 #define wait_event_interruptible_exclusive_locked(wq, condition) \
667 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 0))
671 * @wq: the waitqueue to wait on
676 * The @condition is checked each time the waitqueue @wq is woken up.
678 * It must be called with wq.lock being held. This spinlock is
696 #define wait_event_interruptible_exclusive_locked_irq(wq, condition) \
698 ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1))
701 #define __wait_event_killable(wq, condition) \
702 ___wait_event(wq, condition, TASK_KILLABLE, 0, 0, schedule())
706 * @wq: the waitqueue to wait on
711 * The @condition is checked each time the waitqueue @wq is woken up.
719 #define wait_event_killable(wq, condition) \
724 __ret = __wait_event_killable(wq, condition); \
729 #define __wait_event_lock_irq(wq, condition, lock, cmd) \
730 (void)___wait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
741 * @wq: the waitqueue to wait on
750 * the waitqueue @wq is woken up.
759 #define wait_event_lock_irq_cmd(wq, condition, lock, cmd) \
763 __wait_event_lock_irq(wq, condition, lock, cmd); \
771 * @wq: the waitqueue to wait on
778 * the waitqueue @wq is woken up.
786 #define wait_event_lock_irq(wq, condition, lock) \
790 __wait_event_lock_irq(wq, condition, lock, ); \
794 #define __wait_event_interruptible_lock_irq(wq, condition, lock, cmd) \
795 ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 0, 0, \
805 * @wq: the waitqueue to wait on
814 * checked each time the waitqueue @wq is woken up.
826 #define wait_event_interruptible_lock_irq_cmd(wq, condition, lock, cmd) \
830 __ret = __wait_event_interruptible_lock_irq(wq, \
839 * @wq: the waitqueue to wait on
846 * checked each time the waitqueue @wq is woken up.
857 #define wait_event_interruptible_lock_irq(wq, condition, lock) \
861 __ret = __wait_event_interruptible_lock_irq(wq, \
866 #define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
868 ___wait_event(wq, ___wait_cond_timeout(condition), \
878 * @wq: the waitqueue to wait on
886 * checked each time the waitqueue @wq is woken up.
898 #define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
904 wq, condition, lock, timeout); \
H A Dworkqueue.h118 struct workqueue_struct *wq; member in struct:delayed_work
316 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
320 /* unbound wq's aren't per-cpu, scale max_active according to #cpus */
421 extern void destroy_workqueue(struct workqueue_struct *wq);
425 int apply_workqueue_attrs(struct workqueue_struct *wq,
428 extern bool queue_work_on(int cpu, struct workqueue_struct *wq,
430 extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
432 extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
435 extern void flush_workqueue(struct workqueue_struct *wq);
436 extern void drain_workqueue(struct workqueue_struct *wq);
450 extern void workqueue_set_max_active(struct workqueue_struct *wq,
453 extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
461 * @wq: workqueue to use
469 static inline bool queue_work(struct workqueue_struct *wq, queue_work() argument
472 return queue_work_on(WORK_CPU_UNBOUND, wq, work); queue_work()
477 * @wq: workqueue to use
483 static inline bool queue_delayed_work(struct workqueue_struct *wq, queue_delayed_work() argument
487 return queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); queue_delayed_work()
492 * @wq: workqueue to use
498 static inline bool mod_delayed_work(struct workqueue_struct *wq, mod_delayed_work() argument
502 return mod_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay); mod_delayed_work()
586 int workqueue_sysfs_register(struct workqueue_struct *wq);
588 static inline int workqueue_sysfs_register(struct workqueue_struct *wq) workqueue_sysfs_register() argument
H A Dpadata.h143 * @wq: The workqueue in use.
155 struct workqueue_struct *wq; member in struct:padata_instance
168 struct workqueue_struct *wq);
169 extern struct padata_instance *padata_alloc(struct workqueue_struct *wq,
H A Dfreezer.h250 #define wait_event_freezekillable_unsafe(wq, condition) \
254 __retval = wait_event_killable(wq, (condition)); \
296 #define wait_event_freezekillable_unsafe(wq, condition) \
297 wait_event_killable(wq, condition)
H A Dnet.h103 * @wq: wait queue for several uses
114 struct socket_wq __rcu *wq; member in struct:socket
/linux-4.1.27/kernel/
H A Dworkqueue.c130 * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
132 * PWR: wq_pool_mutex and wq->mutex protected for writes. Either or
135 * WQ: wq->mutex protected.
137 * WR: wq->mutex protected for writes. Sched-RCU protected for reads.
200 struct workqueue_struct *wq; /* I: the owning workqueue */ member in struct:pool_workqueue
209 struct list_head pwqs_node; /* WR: node on wq->pwqs */
210 struct list_head mayday_node; /* MD: node on wq->maydays */
216 * determined without grabbing wq->mutex.
238 struct list_head pwqs; /* WR: all pwqs of this wq */
241 struct mutex mutex; /* protects this wq */
302 static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
340 static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
350 #define assert_rcu_or_wq_mutex(wq) \
352 lockdep_is_held(&wq->mutex), \
353 "sched RCU or wq->mutex should be held")
355 #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \
357 lockdep_is_held(&wq->mutex) || \
359 "sched RCU, wq->mutex or wq_pool_mutex should be held")
401 * @wq: the target workqueue
403 * This must be called either with wq->mutex held or sched RCU read locked.
410 #define for_each_pwq(pwq, wq) \
411 list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node) \
412 if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
562 * @wq: the target workqueue
565 * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU
572 static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq, unbound_pwq_by_node() argument
575 assert_rcu_or_wq_mutex_or_pool_mutex(wq); unbound_pwq_by_node()
584 return wq->dfl_pwq; unbound_pwq_by_node()
586 return rcu_dereference_raw(wq->numa_pwq_tbl[node]); unbound_pwq_by_node()
1093 if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND))) put_pwq()
1184 if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush)) pwq_dec_nr_in_flight()
1185 complete(&pwq->wq->first_flusher->done); pwq_dec_nr_in_flight()
1253 * item is queued on pwq->wq, and both updating work->data to point try_to_grab_pending()
1329 static bool is_chained_work(struct workqueue_struct *wq) is_chained_work() argument
1335 * Return %true iff I'm a worker execuing a work item on @wq. If is_chained_work()
1338 return worker && worker->current_pwq->wq == wq; is_chained_work()
1341 static void __queue_work(int cpu, struct workqueue_struct *wq, __queue_work() argument
1361 if (unlikely(wq->flags & __WQ_DRAINING) && __queue_work()
1362 WARN_ON_ONCE(!is_chained_work(wq))) __queue_work()
1369 if (!(wq->flags & WQ_UNBOUND)) __queue_work()
1370 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); __queue_work()
1372 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); __queue_work()
1387 if (worker && worker->current_pwq->wq == wq) { __queue_work()
1407 if (wq->flags & WQ_UNBOUND) { __queue_work()
1414 wq->name, cpu); __queue_work()
1445 * @wq: workqueue to use
1453 bool queue_work_on(int cpu, struct workqueue_struct *wq, queue_work_on() argument
1462 __queue_work(cpu, wq, work); queue_work_on()
1476 __queue_work(dwork->cpu, dwork->wq, &dwork->work); delayed_work_timer_fn()
1480 static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, __queue_delayed_work() argument
1498 __queue_work(cpu, wq, &dwork->work); __queue_delayed_work()
1504 dwork->wq = wq; __queue_delayed_work()
1517 * @wq: workqueue to use
1525 bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, queue_delayed_work_on() argument
1536 __queue_delayed_work(cpu, wq, dwork, delay); queue_delayed_work_on()
1548 * @wq: workqueue to use
1563 bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, mod_delayed_work_on() argument
1574 __queue_delayed_work(cpu, wq, dwork, delay); mod_delayed_work_on()
1842 struct workqueue_struct *wq = pwq->wq; send_mayday() local
1846 if (!wq->rescuer) send_mayday()
1852 * If @pwq is for an unbound wq, its base ref may be put at send_mayday()
1857 list_add_tail(&pwq->mayday_node, &wq->maydays); send_mayday()
1858 wake_up_process(wq->rescuer->task); send_mayday()
1868 spin_lock(&wq_mayday_lock); /* for wq->maydays */ pool_mayday_timeout()
2003 bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
2073 lock_map_acquire_read(&pwq->wq->lockdep_map);
2083 lock_map_release(&pwq->wq->lockdep_map);
2257 struct workqueue_struct *wq = rescuer->rescue_wq; rescuer_thread() local
2273 * shouldn't have any work pending, but @wq->maydays may still have rescuer_thread()
2276 * @wq->maydays processing before acting on should_stop so that the rescuer_thread()
2284 while (!list_empty(&wq->maydays)) { rescuer_thread()
2285 struct pool_workqueue *pwq = list_first_entry(&wq->maydays, rescuer_thread()
2324 list_move_tail(&pwq->mayday_node, &wq->maydays); rescuer_thread()
2441 * @wq: workqueue being flushed
2451 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
2454 * The caller should have initialized @wq->first_flusher prior to
2464 * mutex_lock(wq->mutex).
2470 static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, flush_workqueue_prep_pwqs() argument
2477 WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush)); flush_workqueue_prep_pwqs()
2478 atomic_set(&wq->nr_pwqs_to_flush, 1); flush_workqueue_prep_pwqs()
2481 for_each_pwq(pwq, wq) { for_each_pwq()
2491 atomic_inc(&wq->nr_pwqs_to_flush); for_each_pwq()
2504 if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
2505 complete(&wq->first_flusher->done);
2512 * @wq: workqueue to flush
2517 void flush_workqueue(struct workqueue_struct *wq) flush_workqueue() argument
2526 lock_map_acquire(&wq->lockdep_map); flush_workqueue()
2527 lock_map_release(&wq->lockdep_map); flush_workqueue()
2529 mutex_lock(&wq->mutex); flush_workqueue()
2534 next_color = work_next_color(wq->work_color); flush_workqueue()
2536 if (next_color != wq->flush_color) { flush_workqueue()
2542 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow)); flush_workqueue()
2543 this_flusher.flush_color = wq->work_color; flush_workqueue()
2544 wq->work_color = next_color; flush_workqueue()
2546 if (!wq->first_flusher) { flush_workqueue()
2548 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); flush_workqueue()
2550 wq->first_flusher = &this_flusher; flush_workqueue()
2552 if (!flush_workqueue_prep_pwqs(wq, wq->flush_color, flush_workqueue()
2553 wq->work_color)) { flush_workqueue()
2555 wq->flush_color = next_color; flush_workqueue()
2556 wq->first_flusher = NULL; flush_workqueue()
2561 WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color); flush_workqueue()
2562 list_add_tail(&this_flusher.list, &wq->flusher_queue); flush_workqueue()
2563 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); flush_workqueue()
2571 list_add_tail(&this_flusher.list, &wq->flusher_overflow); flush_workqueue()
2574 mutex_unlock(&wq->mutex); flush_workqueue()
2584 if (wq->first_flusher != &this_flusher) flush_workqueue()
2587 mutex_lock(&wq->mutex); flush_workqueue()
2590 if (wq->first_flusher != &this_flusher) flush_workqueue()
2593 wq->first_flusher = NULL; flush_workqueue()
2596 WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color); flush_workqueue()
2602 list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) { flush_workqueue()
2603 if (next->flush_color != wq->flush_color) flush_workqueue()
2609 WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) && flush_workqueue()
2610 wq->flush_color != work_next_color(wq->work_color)); flush_workqueue()
2613 wq->flush_color = work_next_color(wq->flush_color); flush_workqueue()
2616 if (!list_empty(&wq->flusher_overflow)) { flush_workqueue()
2623 list_for_each_entry(tmp, &wq->flusher_overflow, list) flush_workqueue()
2624 tmp->flush_color = wq->work_color; flush_workqueue()
2626 wq->work_color = work_next_color(wq->work_color); flush_workqueue()
2628 list_splice_tail_init(&wq->flusher_overflow, flush_workqueue()
2629 &wq->flusher_queue); flush_workqueue()
2630 flush_workqueue_prep_pwqs(wq, -1, wq->work_color); flush_workqueue()
2633 if (list_empty(&wq->flusher_queue)) { flush_workqueue()
2634 WARN_ON_ONCE(wq->flush_color != wq->work_color); flush_workqueue()
2642 WARN_ON_ONCE(wq->flush_color == wq->work_color); flush_workqueue()
2643 WARN_ON_ONCE(wq->flush_color != next->flush_color); flush_workqueue()
2646 wq->first_flusher = next; flush_workqueue()
2648 if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1)) flush_workqueue()
2655 wq->first_flusher = NULL; flush_workqueue()
2659 mutex_unlock(&wq->mutex); flush_workqueue()
2665 * @wq: workqueue to drain
2669 * work items on @wq can queue further work items on it. @wq is flushed
2674 void drain_workqueue(struct workqueue_struct *wq) drain_workqueue() argument
2681 * hotter than drain_workqueue() and already looks at @wq->flags. drain_workqueue()
2684 mutex_lock(&wq->mutex); drain_workqueue()
2685 if (!wq->nr_drainers++) drain_workqueue()
2686 wq->flags |= __WQ_DRAINING; drain_workqueue()
2687 mutex_unlock(&wq->mutex); drain_workqueue()
2689 flush_workqueue(wq); drain_workqueue()
2691 mutex_lock(&wq->mutex); drain_workqueue()
2693 for_each_pwq(pwq, wq) { for_each_pwq()
2706 wq->name, flush_cnt); for_each_pwq()
2708 mutex_unlock(&wq->mutex); for_each_pwq()
2712 if (!--wq->nr_drainers)
2713 wq->flags &= ~__WQ_DRAINING;
2714 mutex_unlock(&wq->mutex);
2755 if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer) start_flush_work()
2756 lock_map_acquire(&pwq->wq->lockdep_map); start_flush_work()
2758 lock_map_acquire_read(&pwq->wq->lockdep_map); start_flush_work()
2759 lock_map_release(&pwq->wq->lockdep_map); start_flush_work()
2907 __queue_work(dwork->cpu, dwork->wq, &dwork->work); flush_delayed_work()
3103 * ->no_numa as it is used for both pool and wq attrs. Instead, copy_workqueue_attrs()
3176 struct workqueue_struct *wq = rcu_free_wq() local
3179 if (!(wq->flags & WQ_UNBOUND)) rcu_free_wq()
3180 free_percpu(wq->cpu_pwqs); rcu_free_wq()
3182 free_workqueue_attrs(wq->unbound_attrs); rcu_free_wq()
3184 kfree(wq->rescuer); rcu_free_wq()
3185 kfree(wq); rcu_free_wq()
3345 struct workqueue_struct *wq = pwq->wq; pwq_unbound_release_workfn() local
3349 if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND))) pwq_unbound_release_workfn()
3352 mutex_lock(&wq->mutex); pwq_unbound_release_workfn()
3354 is_last = list_empty(&wq->pwqs); pwq_unbound_release_workfn()
3355 mutex_unlock(&wq->mutex); pwq_unbound_release_workfn()
3364 * If we're the last pwq going away, @wq is already dead and no one pwq_unbound_release_workfn()
3368 call_rcu_sched(&wq->rcu, rcu_free_wq); pwq_unbound_release_workfn()
3381 struct workqueue_struct *wq = pwq->wq; pwq_adjust_max_active() local
3382 bool freezable = wq->flags & WQ_FREEZABLE; pwq_adjust_max_active()
3384 /* for @wq->saved_max_active */ pwq_adjust_max_active()
3385 lockdep_assert_held(&wq->mutex); pwq_adjust_max_active()
3388 if (!freezable && pwq->max_active == wq->saved_max_active) pwq_adjust_max_active()
3399 pwq->max_active = wq->saved_max_active; pwq_adjust_max_active()
3406 * Need to kick a worker after thawed or an unbound wq's pwq_adjust_max_active()
3417 /* initialize newly alloced @pwq which is associated with @wq and @pool */ init_pwq()
3418 static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq, init_pwq() argument
3426 pwq->wq = wq; init_pwq()
3435 /* sync @pwq with the current state of its associated wq and link it */ link_pwq()
3438 struct workqueue_struct *wq = pwq->wq; link_pwq() local
3440 lockdep_assert_held(&wq->mutex); link_pwq()
3447 pwq->work_color = wq->work_color; link_pwq()
3453 list_add_rcu(&pwq->pwqs_node, &wq->pwqs); link_pwq()
3456 /* obtain a pool matching @attr and create a pwq associating the pool and @wq */ alloc_unbound_pwq()
3457 static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq, alloc_unbound_pwq() argument
3475 init_pwq(pwq, wq, pool); alloc_unbound_pwq()
3524 /* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */ numa_pwq_tbl_install()
3525 static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq, numa_pwq_tbl_install() argument
3532 lockdep_assert_held(&wq->mutex); numa_pwq_tbl_install()
3537 old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]); numa_pwq_tbl_install()
3538 rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq); numa_pwq_tbl_install()
3544 struct workqueue_struct *wq; /* target workqueue */ member in struct:apply_wqattrs_ctx
3568 apply_wqattrs_prepare(struct workqueue_struct *wq, apply_wqattrs_prepare() argument
3601 ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs); apply_wqattrs_prepare()
3607 ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs); for_each_node()
3617 ctx->wq = wq;
3634 mutex_lock(&ctx->wq->mutex); apply_wqattrs_commit()
3636 copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs); apply_wqattrs_commit()
3640 ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node, apply_wqattrs_commit()
3645 swap(ctx->wq->dfl_pwq, ctx->dfl_pwq); apply_wqattrs_commit()
3647 mutex_unlock(&ctx->wq->mutex); apply_wqattrs_commit()
3652 * @wq: the target workqueue
3655 * Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA
3666 int apply_workqueue_attrs(struct workqueue_struct *wq, apply_workqueue_attrs() argument
3673 if (WARN_ON(!(wq->flags & WQ_UNBOUND))) apply_workqueue_attrs()
3677 if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs))) apply_workqueue_attrs()
3688 ctx = apply_wqattrs_prepare(wq, attrs); apply_workqueue_attrs()
3705 * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
3706 * @wq: the target workqueue
3712 * @wq accordingly.
3715 * falls back to @wq->dfl_pwq which may not be optimal but is always
3726 static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu, wq_update_unbound_numa() argument
3737 if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND)) wq_update_unbound_numa()
3741 * We don't wanna alloc/free wq_attrs for each wq for each CPU. wq_update_unbound_numa()
3748 mutex_lock(&wq->mutex); wq_update_unbound_numa()
3749 if (wq->unbound_attrs->no_numa) wq_update_unbound_numa()
3752 copy_workqueue_attrs(target_attrs, wq->unbound_attrs); wq_update_unbound_numa()
3753 pwq = unbound_pwq_by_node(wq, node); wq_update_unbound_numa()
3757 * different from wq's, we need to compare it to @pwq's and create wq_update_unbound_numa()
3759 * wq's, the default pwq should be used. wq_update_unbound_numa()
3761 if (wq_calc_node_cpumask(wq->unbound_attrs, node, cpu_off, cpumask)) { wq_update_unbound_numa()
3768 mutex_unlock(&wq->mutex); wq_update_unbound_numa()
3771 pwq = alloc_unbound_pwq(wq, target_attrs); wq_update_unbound_numa()
3774 wq->name); wq_update_unbound_numa()
3775 mutex_lock(&wq->mutex); wq_update_unbound_numa()
3782 * get/put_online_cpus(), @wq->unbound_attrs couldn't have changed wq_update_unbound_numa()
3785 mutex_lock(&wq->mutex); wq_update_unbound_numa()
3786 old_pwq = numa_pwq_tbl_install(wq, node, pwq); wq_update_unbound_numa()
3790 spin_lock_irq(&wq->dfl_pwq->pool->lock); wq_update_unbound_numa()
3791 get_pwq(wq->dfl_pwq); wq_update_unbound_numa()
3792 spin_unlock_irq(&wq->dfl_pwq->pool->lock); wq_update_unbound_numa()
3793 old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq); wq_update_unbound_numa()
3795 mutex_unlock(&wq->mutex); wq_update_unbound_numa()
3799 static int alloc_and_link_pwqs(struct workqueue_struct *wq) alloc_and_link_pwqs() argument
3801 bool highpri = wq->flags & WQ_HIGHPRI; alloc_and_link_pwqs()
3804 if (!(wq->flags & WQ_UNBOUND)) { alloc_and_link_pwqs()
3805 wq->cpu_pwqs = alloc_percpu(struct pool_workqueue); alloc_and_link_pwqs()
3806 if (!wq->cpu_pwqs) alloc_and_link_pwqs()
3811 per_cpu_ptr(wq->cpu_pwqs, cpu); for_each_possible_cpu()
3815 init_pwq(pwq, wq, &cpu_pools[highpri]); for_each_possible_cpu()
3817 mutex_lock(&wq->mutex); for_each_possible_cpu()
3819 mutex_unlock(&wq->mutex); for_each_possible_cpu()
3822 } else if (wq->flags & __WQ_ORDERED) {
3823 ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
3825 WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
3826 wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
3827 "ordering guarantee broken for workqueue %s\n", wq->name);
3830 return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
3854 struct workqueue_struct *wq; __alloc_workqueue_key() local
3861 /* allocate wq and format name */ __alloc_workqueue_key()
3863 tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]); __alloc_workqueue_key()
3865 wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL); __alloc_workqueue_key()
3866 if (!wq) __alloc_workqueue_key()
3870 wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL); __alloc_workqueue_key()
3871 if (!wq->unbound_attrs) __alloc_workqueue_key()
3876 vsnprintf(wq->name, sizeof(wq->name), fmt, args); __alloc_workqueue_key()
3880 max_active = wq_clamp_max_active(max_active, flags, wq->name); __alloc_workqueue_key()
3882 /* init wq */ __alloc_workqueue_key()
3883 wq->flags = flags; __alloc_workqueue_key()
3884 wq->saved_max_active = max_active; __alloc_workqueue_key()
3885 mutex_init(&wq->mutex); __alloc_workqueue_key()
3886 atomic_set(&wq->nr_pwqs_to_flush, 0); __alloc_workqueue_key()
3887 INIT_LIST_HEAD(&wq->pwqs); __alloc_workqueue_key()
3888 INIT_LIST_HEAD(&wq->flusher_queue); __alloc_workqueue_key()
3889 INIT_LIST_HEAD(&wq->flusher_overflow); __alloc_workqueue_key()
3890 INIT_LIST_HEAD(&wq->maydays); __alloc_workqueue_key()
3892 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); __alloc_workqueue_key()
3893 INIT_LIST_HEAD(&wq->list); __alloc_workqueue_key()
3895 if (alloc_and_link_pwqs(wq) < 0) __alloc_workqueue_key()
3909 rescuer->rescue_wq = wq; __alloc_workqueue_key()
3911 wq->name); __alloc_workqueue_key()
3917 wq->rescuer = rescuer; __alloc_workqueue_key()
3922 if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq)) __alloc_workqueue_key()
3927 * Grab it, adjust max_active and add the new @wq to workqueues __alloc_workqueue_key()
3932 mutex_lock(&wq->mutex); __alloc_workqueue_key()
3933 for_each_pwq(pwq, wq) __alloc_workqueue_key()
3935 mutex_unlock(&wq->mutex); __alloc_workqueue_key()
3937 list_add_tail_rcu(&wq->list, &workqueues); __alloc_workqueue_key()
3941 return wq; __alloc_workqueue_key()
3944 free_workqueue_attrs(wq->unbound_attrs); __alloc_workqueue_key()
3945 kfree(wq); __alloc_workqueue_key()
3948 destroy_workqueue(wq); __alloc_workqueue_key()
3955 * @wq: target workqueue
3959 void destroy_workqueue(struct workqueue_struct *wq) destroy_workqueue() argument
3965 drain_workqueue(wq); destroy_workqueue()
3968 mutex_lock(&wq->mutex); for_each_pwq()
3969 for_each_pwq(pwq, wq) { for_each_pwq()
3974 mutex_unlock(&wq->mutex); for_each_pwq()
3979 if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) || for_each_pwq()
3982 mutex_unlock(&wq->mutex); for_each_pwq()
3986 mutex_unlock(&wq->mutex);
3989 * wq list is used to freeze wq, remove from list after
3993 list_del_rcu(&wq->list);
3996 workqueue_sysfs_unregister(wq);
3998 if (wq->rescuer)
3999 kthread_stop(wq->rescuer->task);
4001 if (!(wq->flags & WQ_UNBOUND)) {
4006 call_rcu_sched(&wq->rcu, rcu_free_wq);
4009 * We're the sole accessor of @wq at this point. Directly
4011 * @wq will be freed when the last pwq is released.
4014 pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]); for_each_node()
4015 RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL); for_each_node()
4020 * Put dfl_pwq. @wq may be freed any time after dfl_pwq is
4023 pwq = wq->dfl_pwq;
4024 wq->dfl_pwq = NULL;
4032 * @wq: target workqueue
4035 * Set max_active of @wq to @max_active.
4040 void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) workqueue_set_max_active() argument
4045 if (WARN_ON(wq->flags & __WQ_ORDERED)) workqueue_set_max_active()
4048 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); workqueue_set_max_active()
4050 mutex_lock(&wq->mutex); workqueue_set_max_active()
4052 wq->saved_max_active = max_active; workqueue_set_max_active()
4054 for_each_pwq(pwq, wq) workqueue_set_max_active()
4057 mutex_unlock(&wq->mutex); workqueue_set_max_active()
4079 * @wq: target workqueue
4081 * Test whether @wq's cpu workqueue for @cpu is congested. There is
4094 bool workqueue_congested(int cpu, struct workqueue_struct *wq) workqueue_congested() argument
4104 if (!(wq->flags & WQ_UNBOUND)) workqueue_congested()
4105 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); workqueue_congested()
4107 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); workqueue_congested()
4192 struct workqueue_struct *wq = NULL; print_worker_info() local
4211 probe_kernel_read(&wq, &pwq->wq, sizeof(wq)); print_worker_info()
4212 probe_kernel_read(name, wq->name, sizeof(name) - 1); print_worker_info()
4279 worker == pwq->wq->rescuer ? "(RESCUER)" : "", show_pwq()
4328 struct workqueue_struct *wq; show_workqueue_state() local
4337 list_for_each_entry_rcu(wq, &workqueues, list) { show_workqueue_state()
4341 for_each_pwq(pwq, wq) { for_each_pwq()
4350 pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
4352 for_each_pwq(pwq, wq) { for_each_pwq()
4573 struct workqueue_struct *wq; workqueue_cpu_up_callback() local
4602 list_for_each_entry(wq, &workqueues, list)
4603 wq_update_unbound_numa(wq, cpu, true);
4621 struct workqueue_struct *wq; workqueue_cpu_down_callback() local
4631 list_for_each_entry(wq, &workqueues, list) workqueue_cpu_down_callback()
4632 wq_update_unbound_numa(wq, cpu, false); workqueue_cpu_down_callback()
4693 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
4697 struct workqueue_struct *wq; freeze_workqueues_begin() local
4705 list_for_each_entry(wq, &workqueues, list) { freeze_workqueues_begin()
4706 mutex_lock(&wq->mutex); freeze_workqueues_begin()
4707 for_each_pwq(pwq, wq) freeze_workqueues_begin()
4709 mutex_unlock(&wq->mutex); freeze_workqueues_begin()
4731 struct workqueue_struct *wq; freeze_workqueues_busy() local
4738 list_for_each_entry(wq, &workqueues, list) { freeze_workqueues_busy()
4739 if (!(wq->flags & WQ_FREEZABLE)) freeze_workqueues_busy()
4746 for_each_pwq(pwq, wq) { for_each_pwq()
4768 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
4772 struct workqueue_struct *wq; thaw_workqueues() local
4783 list_for_each_entry(wq, &workqueues, list) { thaw_workqueues()
4784 mutex_lock(&wq->mutex); thaw_workqueues()
4785 for_each_pwq(pwq, wq) thaw_workqueues()
4787 mutex_unlock(&wq->mutex); thaw_workqueues()
4811 struct workqueue_struct *wq; member in struct:wq_device
4819 return wq_dev->wq; dev_to_wq()
4825 struct workqueue_struct *wq = dev_to_wq(dev); per_cpu_show() local
4827 return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND)); per_cpu_show()
4834 struct workqueue_struct *wq = dev_to_wq(dev); max_active_show() local
4836 return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active); max_active_show()
4843 struct workqueue_struct *wq = dev_to_wq(dev); max_active_store() local
4849 workqueue_set_max_active(wq, val); max_active_store()
4864 struct workqueue_struct *wq = dev_to_wq(dev); wq_pool_ids_show() local
4872 unbound_pwq_by_node(wq, node)->pool->id); for_each_node()
4884 struct workqueue_struct *wq = dev_to_wq(dev); wq_nice_show() local
4887 mutex_lock(&wq->mutex); wq_nice_show()
4888 written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice); wq_nice_show()
4889 mutex_unlock(&wq->mutex); wq_nice_show()
4895 static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq) wq_sysfs_prep_attrs() argument
4903 mutex_lock(&wq->mutex); wq_sysfs_prep_attrs()
4904 copy_workqueue_attrs(attrs, wq->unbound_attrs); wq_sysfs_prep_attrs()
4905 mutex_unlock(&wq->mutex); wq_sysfs_prep_attrs()
4912 struct workqueue_struct *wq = dev_to_wq(dev); wq_nice_store() local
4916 attrs = wq_sysfs_prep_attrs(wq); wq_nice_store()
4922 ret = apply_workqueue_attrs(wq, attrs); wq_nice_store()
4933 struct workqueue_struct *wq = dev_to_wq(dev); wq_cpumask_show() local
4936 mutex_lock(&wq->mutex); wq_cpumask_show()
4938 cpumask_pr_args(wq->unbound_attrs->cpumask)); wq_cpumask_show()
4939 mutex_unlock(&wq->mutex); wq_cpumask_show()
4947 struct workqueue_struct *wq = dev_to_wq(dev); wq_cpumask_store() local
4951 attrs = wq_sysfs_prep_attrs(wq); wq_cpumask_store()
4957 ret = apply_workqueue_attrs(wq, attrs); wq_cpumask_store()
4966 struct workqueue_struct *wq = dev_to_wq(dev); wq_numa_show() local
4969 mutex_lock(&wq->mutex); wq_numa_show()
4971 !wq->unbound_attrs->no_numa); wq_numa_show()
4972 mutex_unlock(&wq->mutex); wq_numa_show()
4980 struct workqueue_struct *wq = dev_to_wq(dev); wq_numa_store() local
4984 attrs = wq_sysfs_prep_attrs(wq); wq_numa_store()
4991 ret = apply_workqueue_attrs(wq, attrs); wq_numa_store()
5026 * @wq: the workqueue to register
5028 * Expose @wq in sysfs under /sys/bus/workqueue/devices.
5039 int workqueue_sysfs_register(struct workqueue_struct *wq) workqueue_sysfs_register() argument
5049 if (WARN_ON(wq->flags & __WQ_ORDERED)) workqueue_sysfs_register()
5052 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); workqueue_sysfs_register()
5056 wq_dev->wq = wq; workqueue_sysfs_register()
5058 wq_dev->dev.init_name = wq->name; workqueue_sysfs_register()
5070 wq->wq_dev = NULL; workqueue_sysfs_register()
5074 if (wq->flags & WQ_UNBOUND) { workqueue_sysfs_register()
5081 wq->wq_dev = NULL; workqueue_sysfs_register()
5094 * @wq: the workqueue to unregister
5096 * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
5098 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) workqueue_sysfs_unregister() argument
5100 struct wq_device *wq_dev = wq->wq_dev; workqueue_sysfs_unregister()
5102 if (!wq->wq_dev) workqueue_sysfs_unregister()
5105 wq->wq_dev = NULL; workqueue_sysfs_unregister()
5109 static void workqueue_sysfs_unregister(struct workqueue_struct *wq) { } workqueue_sysfs_unregister() argument
5197 /* create default unbound and ordered wq attrs */
5206 * An ordered wq should have only one pwq as ordering is
H A Dcpu.c63 wait_queue_head_t wq; member in struct:__anon13965
77 .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
128 if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq)) put_online_cpus()
129 wake_up(&cpu_hotplug.wq); put_online_cpus()
167 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE); cpu_hotplug_begin()
173 finish_wait(&cpu_hotplug.wq, &wait); cpu_hotplug_begin()
H A Dpadata.c143 queue_work_on(target_cpu, pinst->wq, &queue->work); padata_do_parallel()
266 queue_work_on(cb_cpu, pinst->wq, &squeue->work); padata_reorder()
1020 * @wq: workqueue to use for the allocated padata instance
1022 struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq) padata_alloc_possible() argument
1024 return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask); padata_alloc_possible()
1032 * @wq: workqueue to use for the allocated padata instance
1036 struct padata_instance *padata_alloc(struct workqueue_struct *wq, padata_alloc() argument
1064 pinst->wq = wq; padata_alloc()
/linux-4.1.27/drivers/infiniband/hw/ipath/
H A Dipath_srq.c52 struct ipath_rwq *wq; ipath_post_srq_receive() local
68 wq = srq->rq.wq; ipath_post_srq_receive()
69 next = wq->head + 1; ipath_post_srq_receive()
72 if (next == wq->tail) { ipath_post_srq_receive()
79 wqe = get_rwqe_ptr(&srq->rq, wq->head); ipath_post_srq_receive()
86 wq->head = next; ipath_post_srq_receive()
139 srq->rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + srq->rq.size * sz); ipath_create_srq()
140 if (!srq->rq.wq) { ipath_create_srq()
156 srq->rq.wq); ipath_create_srq()
175 srq->rq.wq->head = 0; ipath_create_srq()
176 srq->rq.wq->tail = 0; ipath_create_srq()
201 vfree(srq->rq.wq); ipath_create_srq()
220 struct ipath_rwq *wq; ipath_modify_srq() local
239 wq = vmalloc_user(sizeof(struct ipath_rwq) + size * sz); ipath_modify_srq()
240 if (!wq) { ipath_modify_srq()
267 owq = srq->rq.wq; ipath_modify_srq()
284 p = wq->wq; ipath_modify_srq()
299 srq->rq.wq = wq; ipath_modify_srq()
301 wq->head = n; ipath_modify_srq()
302 wq->tail = 0; ipath_modify_srq()
314 ipath_update_mmap_info(dev, ip, s, wq); ipath_modify_srq()
346 vfree(wq); ipath_modify_srq()
376 vfree(srq->rq.wq); ipath_destroy_srq()
H A Dipath_qp.c360 if (qp->r_rq.wq) { ipath_reset_qp()
361 qp->r_rq.wq->head = 0; ipath_reset_qp()
362 qp->r_rq.wq->tail = 0; ipath_reset_qp()
410 if (qp->r_rq.wq) { ipath_error_qp()
411 struct ipath_rwq *wq; ipath_error_qp() local
418 wq = qp->r_rq.wq; ipath_error_qp()
419 head = wq->head; ipath_error_qp()
422 tail = wq->tail; ipath_error_qp()
431 wq->tail = tail; ipath_error_qp()
690 struct ipath_rwq *wq = qp->r_rq.wq; ipath_compute_aeth() local
695 head = wq->head; ipath_compute_aeth()
698 tail = wq->tail; ipath_compute_aeth()
824 qp->r_rq.wq = NULL; ipath_create_qp()
832 qp->r_rq.wq = vmalloc_user(sizeof(struct ipath_rwq) + ipath_create_qp()
834 if (!qp->r_rq.wq) { ipath_create_qp()
865 vfree(qp->r_rq.wq); ipath_create_qp()
886 if (!qp->r_rq.wq) { ipath_create_qp()
902 qp->r_rq.wq); ipath_create_qp()
940 vfree(qp->r_rq.wq); ipath_create_qp()
1008 vfree(qp->r_rq.wq); ipath_destroy_qp()
H A Dipath_ruc.c170 struct ipath_rwq *wq; ipath_get_rwqe() local
193 wq = rq->wq; ipath_get_rwqe()
194 tail = wq->tail; ipath_get_rwqe()
199 if (unlikely(tail == wq->head)) { ipath_get_rwqe()
213 wq->tail = tail; ipath_get_rwqe()
224 n = wq->head; ipath_get_rwqe()
H A Dipath_ud.c60 struct ipath_rwq *wq; ipath_ud_loopback() local
123 wq = rq->wq; ipath_ud_loopback()
124 tail = wq->tail; ipath_ud_loopback()
128 if (unlikely(tail == wq->head)) { ipath_ud_loopback()
148 wq->tail = tail; ipath_ud_loopback()
157 n = wq->head; ipath_ud_loopback()
H A Dipath_verbs.h303 * Note that the wq array elements are variable size so you can't
310 struct ipath_rwqe wq[0]; member in struct:ipath_rwq
314 struct ipath_rwq *wq; member in struct:ipath_rq
491 * struct ipath_rwq.wq. This function does the array index computation.
497 ((char *) rq->wq->wq + get_rwqe_ptr()
/linux-4.1.27/drivers/infiniband/hw/qib/
H A Dqib_srq.c52 struct qib_rwq *wq; qib_post_srq_receive() local
68 wq = srq->rq.wq; qib_post_srq_receive()
69 next = wq->head + 1; qib_post_srq_receive()
72 if (next == wq->tail) { qib_post_srq_receive()
79 wqe = get_rwqe_ptr(&srq->rq, wq->head); qib_post_srq_receive()
86 wq->head = next; qib_post_srq_receive()
136 srq->rq.wq = vmalloc_user(sizeof(struct qib_rwq) + srq->rq.size * sz); qib_create_srq()
137 if (!srq->rq.wq) { qib_create_srq()
152 srq->rq.wq); qib_create_srq()
171 srq->rq.wq->head = 0; qib_create_srq()
172 srq->rq.wq->tail = 0; qib_create_srq()
197 vfree(srq->rq.wq); qib_create_srq()
216 struct qib_rwq *wq; qib_modify_srq() local
235 wq = vmalloc_user(sizeof(struct qib_rwq) + size * sz); qib_modify_srq()
236 if (!wq) { qib_modify_srq()
263 owq = srq->rq.wq; qib_modify_srq()
280 p = wq->wq; qib_modify_srq()
295 srq->rq.wq = wq; qib_modify_srq()
297 wq->head = n; qib_modify_srq()
298 wq->tail = 0; qib_modify_srq()
310 qib_update_mmap_info(dev, ip, s, wq); qib_modify_srq()
346 vfree(wq); qib_modify_srq()
376 vfree(srq->rq.wq); qib_destroy_srq()
H A Dqib_qp.c411 if (qp->r_rq.wq) { qib_reset_qp()
412 qp->r_rq.wq->head = 0; qib_reset_qp()
413 qp->r_rq.wq->tail = 0; qib_reset_qp()
529 if (qp->r_rq.wq) { qib_error_qp()
530 struct qib_rwq *wq; qib_error_qp() local
537 wq = qp->r_rq.wq; qib_error_qp()
538 head = wq->head; qib_error_qp()
541 tail = wq->tail; qib_error_qp()
550 wq->tail = tail; qib_error_qp()
924 struct qib_rwq *wq = qp->r_rq.wq; qib_compute_aeth() local
929 head = wq->head; qib_compute_aeth()
932 tail = wq->tail; qib_compute_aeth()
1072 qp->r_rq.wq = vmalloc_user( qib_create_qp()
1076 qp->r_rq.wq = __vmalloc( qib_create_qp()
1081 if (!qp->r_rq.wq) { qib_create_qp()
1114 vfree(qp->r_rq.wq); qib_create_qp()
1135 if (!qp->r_rq.wq) { qib_create_qp()
1149 qp->r_rq.wq); qib_create_qp()
1187 vfree(qp->r_rq.wq); qib_create_qp()
1244 vfree(qp->r_rq.wq); qib_destroy_qp()
H A Dqib_ruc.c142 struct qib_rwq *wq; qib_get_rwqe() local
165 wq = rq->wq; qib_get_rwqe()
166 tail = wq->tail; qib_get_rwqe()
170 if (unlikely(tail == wq->head)) { qib_get_rwqe()
184 wq->tail = tail; qib_get_rwqe()
200 n = wq->head; qib_get_rwqe()
H A Dqib_verbs.h364 * Note that the wq array elements are variable size so you can't
371 struct qib_rwqe wq[0]; member in struct:qib_rwq
375 struct qib_rwq *wq; member in struct:qib_rq
620 * struct qib_rwq.wq. This function does the array index computation.
625 ((char *) rq->wq->wq + get_rwqe_ptr()
/linux-4.1.27/drivers/md/bcache/
H A Drequest.h8 struct workqueue_struct *wq; member in struct:data_insert_op
H A Dclosure.h151 struct workqueue_struct *wq; member in struct:closure::__anon5371::__anon5372
239 struct workqueue_struct *wq) set_closure_fn()
244 cl->wq = wq; set_closure_fn()
251 struct workqueue_struct *wq = cl->wq; closure_queue() local
252 if (wq) { closure_queue()
254 BUG_ON(!queue_work(wq, &cl->work)); closure_queue()
311 * of @wq (or, if @wq is NULL, @fn will be called by closure_put() directly).
339 * Causes @fn to be executed out of @cl, in @wq context (or called directly if
340 * @wq is NULL).
379 struct workqueue_struct *wq, closure_call()
383 continue_at_nobarrier(cl, fn, wq); closure_call()
238 set_closure_fn(struct closure *cl, closure_fn *fn, struct workqueue_struct *wq) set_closure_fn() argument
378 closure_call(struct closure *cl, closure_fn fn, struct workqueue_struct *wq, struct closure *parent) closure_call() argument
H A Dmovinggc.c116 continue_at(cl, write_moving_finish, op->wq); write_moving()
126 continue_at(cl, write_moving, io->op.wq); read_moving_submit()
161 io->op.wq = c->moving_gc_wq; read_moving()
H A Drequest.c91 continue_at(cl, bch_data_insert_start, op->wq); bch_data_insert_keys()
140 continue_at(cl, bch_data_insert_keys, op->wq); bch_data_invalidate()
183 set_closure_fn(cl, bch_data_insert_error, op->wq); bch_data_insert_endio()
219 continue_at(cl, bch_data_insert_keys, op->wq); bch_data_insert_start()
256 continue_at(cl, bch_data_insert_keys, op->wq); bch_data_insert_start()
285 continue_at(cl, bch_data_insert_keys, op->wq); bch_data_insert_start()
663 s->iop.wq = bcache_wq; search_alloc()
/linux-4.1.27/fs/jfs/
H A Djfs_lock.h35 #define __SLEEP_COND(wq, cond, lock_cmd, unlock_cmd) \
39 add_wait_queue(&wq, &__wait); \
49 remove_wait_queue(&wq, &__wait); \
/linux-4.1.27/drivers/usb/chipidea/
H A Dotg.c121 ci->wq = create_freezable_workqueue("ci_otg"); ci_hdrc_otg_init()
122 if (!ci->wq) { ci_hdrc_otg_init()
139 if (ci->wq) { ci_hdrc_otg_destroy()
140 flush_workqueue(ci->wq); ci_hdrc_otg_destroy()
141 destroy_workqueue(ci->wq); ci_hdrc_otg_destroy()
H A Dotg.h23 queue_work(ci->wq, &ci->work); ci_otg_queue_work()
H A Dci.h170 * @wq: workqueue thread
216 struct workqueue_struct *wq; member in struct:ci_hdrc
/linux-4.1.27/net/core/
H A Dstream.c31 struct socket_wq *wq; sk_stream_write_space() local
37 wq = rcu_dereference(sk->sk_wq); sk_stream_write_space()
38 if (wq_has_sleeper(wq)) sk_stream_write_space()
39 wake_up_interruptible_poll(&wq->wait, POLLOUT | sk_stream_write_space()
41 if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) sk_stream_write_space()
H A Dsock.c1917 prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
1925 finish_wait(&sk->sk_lock.wq, &wait);
2213 struct socket_wq *wq; sock_def_wakeup() local
2216 wq = rcu_dereference(sk->sk_wq); sock_def_wakeup()
2217 if (wq_has_sleeper(wq)) sock_def_wakeup()
2218 wake_up_interruptible_all(&wq->wait); sock_def_wakeup()
2224 struct socket_wq *wq; sock_def_error_report() local
2227 wq = rcu_dereference(sk->sk_wq); sock_def_error_report()
2228 if (wq_has_sleeper(wq)) sock_def_error_report()
2229 wake_up_interruptible_poll(&wq->wait, POLLERR); sock_def_error_report()
2236 struct socket_wq *wq; sock_def_readable() local
2239 wq = rcu_dereference(sk->sk_wq); sock_def_readable()
2240 if (wq_has_sleeper(wq)) sock_def_readable()
2241 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | POLLPRI | sock_def_readable()
2249 struct socket_wq *wq; sock_def_write_space() local
2257 wq = rcu_dereference(sk->sk_wq); sock_def_write_space()
2258 if (wq_has_sleeper(wq)) sock_def_write_space()
2259 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | sock_def_write_space()
2318 sk->sk_wq = sock->wq; sock_init_data()
2399 if (waitqueue_active(&sk->sk_lock.wq)) release_sock()
2400 wake_up(&sk->sk_lock.wq); release_sock()
/linux-4.1.27/drivers/gpu/drm/radeon/
H A Dradeon_sa.c56 init_waitqueue_head(&sa_manager->wq); radeon_sa_bo_manager_init()
333 spin_lock(&sa_manager->wq.lock); radeon_sa_bo_new()
345 spin_unlock(&sa_manager->wq.lock); radeon_sa_bo_new()
355 spin_unlock(&sa_manager->wq.lock); radeon_sa_bo_new()
359 spin_lock(&sa_manager->wq.lock); radeon_sa_bo_new()
363 sa_manager->wq, radeon_sa_bo_new()
370 spin_unlock(&sa_manager->wq.lock); radeon_sa_bo_new()
386 spin_lock(&sa_manager->wq.lock); radeon_sa_bo_free()
394 wake_up_all_locked(&sa_manager->wq); radeon_sa_bo_free()
395 spin_unlock(&sa_manager->wq.lock); radeon_sa_bo_free()
405 spin_lock(&sa_manager->wq.lock); radeon_sa_bo_dump_debug_info()
422 spin_unlock(&sa_manager->wq.lock); radeon_sa_bo_dump_debug_info()
/linux-4.1.27/drivers/power/
H A Dipaq_micro_battery.c43 struct workqueue_struct *wq; member in struct:micro_battery
91 queue_delayed_work(mb->wq, &mb->update, msecs_to_jiffies(BATT_PERIOD)); micro_battery_work()
238 mb->wq = create_singlethread_workqueue("ipaq-battery-wq"); micro_batt_probe()
239 if (!mb->wq) micro_batt_probe()
244 queue_delayed_work(mb->wq, &mb->update, 1); micro_batt_probe()
267 destroy_workqueue(mb->wq); micro_batt_probe()
279 destroy_workqueue(mb->wq); micro_batt_remove()
296 queue_delayed_work(mb->wq, &mb->update, msecs_to_jiffies(BATT_PERIOD)); micro_batt_resume()
/linux-4.1.27/kernel/sched/
H A Dwait.c309 * add_wait_queue(&wq, &wait);
323 * remove_wait_queue(&wq, &wait);
387 __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q, __wait_on_bit() argument
393 prepare_to_wait(wq, &q->wait, mode); __wait_on_bit()
397 finish_wait(wq, &q->wait); __wait_on_bit()
405 wait_queue_head_t *wq = bit_waitqueue(word, bit); out_of_line_wait_on_bit() local
408 return __wait_on_bit(wq, &wait, action, mode); out_of_line_wait_on_bit()
416 wait_queue_head_t *wq = bit_waitqueue(word, bit); out_of_line_wait_on_bit_timeout() local
420 return __wait_on_bit(wq, &wait, action, mode); out_of_line_wait_on_bit_timeout()
425 __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q, __wait_on_bit_lock() argument
431 prepare_to_wait_exclusive(wq, &q->wait, mode); __wait_on_bit_lock()
437 abort_exclusive_wait(wq, &q->wait, mode, &q->key); __wait_on_bit_lock()
440 finish_wait(wq, &q->wait); __wait_on_bit_lock()
448 wait_queue_head_t *wq = bit_waitqueue(word, bit); out_of_line_wait_on_bit_lock() local
451 return __wait_on_bit_lock(wq, &wait, action, mode); out_of_line_wait_on_bit_lock()
455 void __wake_up_bit(wait_queue_head_t *wq, void *word, int bit) __wake_up_bit() argument
458 if (waitqueue_active(wq)) __wake_up_bit()
459 __wake_up(wq, TASK_NORMAL, 1, &key); __wake_up_bit()
531 int __wait_on_atomic_t(wait_queue_head_t *wq, struct wait_bit_queue *q, __wait_on_atomic_t() argument
538 prepare_to_wait(wq, &q->wait, mode); __wait_on_atomic_t()
544 finish_wait(wq, &q->wait); __wait_on_atomic_t()
562 wait_queue_head_t *wq = atomic_t_waitqueue(p); out_of_line_wait_on_atomic_t() local
565 return __wait_on_atomic_t(wq, &wait, action, mode); out_of_line_wait_on_atomic_t()
/linux-4.1.27/drivers/infiniband/ulp/ipoib/
H A Dipoib_verbs.c164 priv->wq = create_singlethread_workqueue("ipoib_wq"); ipoib_transport_dev_init()
165 if (!priv->wq) { ipoib_transport_dev_init()
250 destroy_workqueue(priv->wq); ipoib_transport_dev_init()
251 priv->wq = NULL; ipoib_transport_dev_init()
282 if (priv->wq) { ipoib_transport_dev_cleanup()
283 flush_workqueue(priv->wq); ipoib_transport_dev_cleanup()
284 destroy_workqueue(priv->wq); ipoib_transport_dev_cleanup()
285 priv->wq = NULL; ipoib_transport_dev_cleanup()
H A Dipoib_multicast.c97 queue_delayed_work(priv->wq, &priv->mcast_task, 0); __ipoib_mcast_schedule_join_thread()
104 queue_delayed_work(priv->wq, &priv->mcast_task, HZ); __ipoib_mcast_schedule_join_thread()
106 queue_delayed_work(priv->wq, &priv->mcast_task, 0); __ipoib_mcast_schedule_join_thread()
383 * Defer carrier on work to priv->wq to avoid a ipoib_mcast_join_complete()
391 queue_work(priv->wq, &priv->carrier_on_task); ipoib_mcast_join_complete()
612 queue_delayed_work(priv->wq, &priv->mcast_task, ipoib_mcast_join_task()
649 flush_workqueue(priv->wq); ipoib_mcast_stop_thread()
H A Dipoib_cm.c477 queue_delayed_work(priv->wq, ipoib_cm_req_handler()
579 queue_work(priv->wq, &priv->cm.rx_reap_task); ipoib_cm_handle_rx_wc()
606 queue_work(priv->wq, &priv->cm.rx_reap_task); ipoib_cm_handle_rx_wc()
830 queue_work(priv->wq, &priv->cm.reap_task); ipoib_cm_handle_tx_wc()
1258 queue_work(priv->wq, &priv->cm.reap_task); ipoib_cm_tx_handler()
1287 queue_work(priv->wq, &priv->cm.start_task); ipoib_cm_create_tx()
1298 queue_work(priv->wq, &priv->cm.reap_task); ipoib_cm_destroy_tx()
1420 queue_work(priv->wq, &priv->cm.skb_task); ipoib_cm_skb_too_long()
1453 queue_delayed_work(priv->wq, ipoib_cm_stale_task()
/linux-4.1.27/drivers/hid/
H A Dhid-elo.c35 static struct workqueue_struct *wq; variable in typeref:struct:workqueue_struct
175 queue_delayed_work(wq, &priv->work, ELO_PERIODIC_READ_INTERVAL); elo_work()
248 queue_delayed_work(wq, &priv->work, ELO_PERIODIC_READ_INTERVAL); elo_probe()
262 flush_workqueue(wq); elo_remove()
286 wq = create_singlethread_workqueue("elousb"); elo_driver_init()
287 if (!wq) elo_driver_init()
292 destroy_workqueue(wq); elo_driver_init()
301 destroy_workqueue(wq); elo_driver_exit()
/linux-4.1.27/drivers/i2c/busses/
H A Di2c-taos-evm.c38 static DECLARE_WAIT_QUEUE_HEAD(wq);
112 wait_event_interruptible_timeout(wq, taos->state == TAOS_STATE_IDLE, taos_smbus_xfer()
163 wake_up_interruptible(&wq); taos_interrupt()
168 wake_up_interruptible(&wq); taos_interrupt()
175 wake_up_interruptible(&wq); taos_interrupt()
228 wait_event_interruptible_timeout(wq, taos->state == TAOS_STATE_IDLE, taos_connect()
250 wait_event_interruptible_timeout(wq, taos->state == TAOS_STATE_IDLE, taos_connect()
H A Di2c-ibm_iic.h48 wait_queue_head_t wq; member in struct:ibm_iic_private
H A Di2c-ibm_iic.c336 wake_up_interruptible(&dev->wq); iic_handler()
417 ret = wait_event_interruptible_timeout(dev->wq, iic_wait_for_tc()
717 init_waitqueue_head(&dev->wq); iic_probe()
/linux-4.1.27/drivers/infiniband/hw/mlx5/
H A Dcq.c102 static enum ib_wc_opcode get_umr_comp(struct mlx5_ib_wq *wq, int idx) get_umr_comp() argument
104 switch (wq->wr_data[idx]) { get_umr_comp()
121 struct mlx5_ib_wq *wq, int idx) handle_good_req()
160 wc->opcode = get_umr_comp(wq, idx); handle_good_req()
175 struct mlx5_ib_wq *wq; handle_responder() local
197 wq = &qp->rq; handle_responder()
198 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; handle_responder()
199 ++wq->tail; handle_responder()
410 struct mlx5_ib_wq *wq; mlx5_poll_one() local
466 wq = &(*cur_qp)->sq; mlx5_poll_one()
468 idx = wqe_ctr & (wq->wqe_cnt - 1); mlx5_poll_one()
469 handle_good_req(wc, cqe64, wq, idx); mlx5_poll_one()
470 handle_atomics(*cur_qp, cqe64, wq->last_poll, idx); mlx5_poll_one()
471 wc->wr_id = wq->wrid[idx]; mlx5_poll_one()
472 wq->tail = wq->wqe_head[idx] + 1; mlx5_poll_one()
494 wq = &(*cur_qp)->sq; mlx5_poll_one()
496 idx = wqe_ctr & (wq->wqe_cnt - 1); mlx5_poll_one()
497 wc->wr_id = wq->wrid[idx]; mlx5_poll_one()
498 wq->tail = wq->wqe_head[idx] + 1; mlx5_poll_one()
508 wq = &(*cur_qp)->rq; mlx5_poll_one()
509 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; mlx5_poll_one()
510 ++wq->tail; mlx5_poll_one()
120 handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, struct mlx5_ib_wq *wq, int idx) handle_good_req() argument
H A Dmr.c372 queue_delayed_work(cache->wq, &ent->dwork, __cache_work_func()
377 queue_delayed_work(cache->wq, &ent->dwork, __cache_work_func()
380 queue_work(cache->wq, &ent->work); __cache_work_func()
388 queue_work(cache->wq, &ent->work); __cache_work_func()
390 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ); __cache_work_func()
438 queue_work(cache->wq, &ent->work); alloc_cached_mr()
443 queue_work(cache->wq, &ent->work); alloc_cached_mr()
476 queue_work(cache->wq, &ent->work); free_cached_mr()
573 cache->wq = create_singlethread_workqueue("mkey_cache"); mlx5_mr_cache_init()
574 if (!cache->wq) { mlx5_mr_cache_init()
598 queue_work(cache->wq, &ent->work); mlx5_mr_cache_init()
613 flush_workqueue(dev->cache.wq); mlx5_mr_cache_cleanup()
620 destroy_workqueue(dev->cache.wq); mlx5_mr_cache_cleanup()
H A Dqp.c126 struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq; mlx5_ib_read_user_wqe() local
134 if (wq->wqe_cnt == 0) { mlx5_ib_read_user_wqe()
140 offset = wq->offset + ((wqe_index % wq->wqe_cnt) << wq->wqe_shift); mlx5_ib_read_user_wqe()
141 wq_end = wq->offset + (wq->wqe_cnt << wq->wqe_shift); mlx5_ib_read_user_wqe()
161 wqe_length = 1 << wq->wqe_shift; mlx5_ib_read_user_wqe()
167 ret = ib_umem_copy_from(buffer + first_copy_length, umem, wq->offset, mlx5_ib_read_user_wqe()
1825 static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq) mlx5_wq_overflow() argument
1830 cur = wq->head - wq->tail; mlx5_wq_overflow()
1831 if (likely(cur + nreq < wq->max_post)) mlx5_wq_overflow()
1836 cur = wq->head - wq->tail; mlx5_wq_overflow()
1839 return cur + nreq >= wq->max_post; mlx5_wq_overflow()
/linux-4.1.27/drivers/nfc/
H A Dnfcsim.c63 static struct workqueue_struct *wq; variable in typeref:struct:workqueue_struct
211 queue_delayed_work(wq, &dev->poll_work, 0); nfcsim_start_poll()
326 queue_delayed_work(wq, &peer->recv_work, msecs_to_jiffies(5)); nfcsim_tx()
427 * Because the wq is ordered and only 1 work item is executed at a time, nfcsim_wq_poll()
431 queue_delayed_work(wq, &dev->poll_work, msecs_to_jiffies(200)); nfcsim_wq_poll()
488 /* We need an ordered wq to ensure that poll_work items are executed nfcsim_init()
491 wq = alloc_ordered_workqueue("nfcsim", 0); nfcsim_init()
492 if (!wq) { nfcsim_init()
533 destroy_workqueue(wq); nfcsim_exit()
H A Dpn533.c357 struct workqueue_struct *wq; member in struct:pn533
749 queue_work(dev->wq, &dev->cmd_complete_work); pn533_recv_response()
803 queue_work(dev->wq, &dev->cmd_complete_work); pn533_recv_ack()
1066 queue_work(dev->wq, &dev->cmd_work); pn533_wq_cmd_complete()
1651 queue_work(dev->wq, &dev->mi_tm_rx_work); pn533_tm_get_data_complete()
1733 queue_work(dev->wq, &dev->cmd_work); pn533_wq_tm_mi_send()
1794 queue_work(dev->wq, &dev->tg_work); pn533_init_target_complete()
1809 queue_delayed_work(dev->wq, &dev->poll_work, pn533_listen_mode_timer()
1828 queue_delayed_work(dev->wq, &dev->poll_work, pn533_rf_complete()
1876 queue_work(dev->wq, &dev->rf_work); pn533_poll_dep_complete()
1927 queue_work(dev->wq, &dev->rf_work); pn533_poll_dep()
2017 queue_work(dev->wq, &dev->rf_work); pn533_poll_complete()
2544 queue_work(dev->wq, &dev->mi_rx_work); pn533_data_exchange_complete()
2551 queue_work(dev->wq, &dev->mi_tx_work); pn533_data_exchange_complete()
2706 queue_work(dev->wq, &dev->mi_tm_tx_work); pn533_tm_send_complete()
2719 queue_work(dev->wq, &dev->tg_work); pn533_tm_send_complete()
2807 queue_work(dev->wq, &dev->cmd_work); pn533_wq_mi_recv()
2862 queue_work(dev->wq, &dev->cmd_work); pn533_wq_mi_send()
3170 dev->wq = alloc_ordered_workqueue("pn533", 0); pn533_probe()
3171 if (dev->wq == NULL) pn533_probe()
3257 destroy_workqueue(dev->wq); pn533_probe()
3281 destroy_workqueue(dev->wq); pn533_disconnect()
/linux-4.1.27/drivers/staging/lustre/lustre/include/
H A Dlustre_lib.h449 * l_wait_event(&obj->wq, ....); (1)
451 * wake_up(&obj->wq): (2)
531 #define __l_wait_event(wq, condition, info, ret, l_add_wait) \
543 l_add_wait(&wq, &__wait); \
609 remove_wait_queue(&wq, &__wait); \
614 #define l_wait_event(wq, condition, info) \
619 __l_wait_event(wq, condition, __info, \
624 #define l_wait_event_exclusive(wq, condition, info) \
629 __l_wait_event(wq, condition, __info, \
634 #define l_wait_event_exclusive_head(wq, condition, info) \
639 __l_wait_event(wq, condition, __info, \
644 #define l_wait_condition(wq, condition) \
647 l_wait_event(wq, condition, &lwi); \
650 #define l_wait_condition_exclusive(wq, condition) \
653 l_wait_event_exclusive(wq, condition, &lwi); \
656 #define l_wait_condition_exclusive_head(wq, condition) \
659 l_wait_event_exclusive_head(wq, condition, &lwi); \
/linux-4.1.27/drivers/thunderbolt/
H A Dtb.c214 * Executes on tb->wq.
296 queue_work(tb->wq, &ev->work); tb_schedule_hotplug_handler()
332 if (tb->wq) { thunderbolt_shutdown_and_free()
333 flush_workqueue(tb->wq); thunderbolt_shutdown_and_free()
334 destroy_workqueue(tb->wq); thunderbolt_shutdown_and_free()
335 tb->wq = NULL; thunderbolt_shutdown_and_free()
366 tb->wq = alloc_ordered_workqueue("thunderbolt", 0); thunderbolt_alloc_and_start()
367 if (!tb->wq) thunderbolt_alloc_and_start()
H A Dtb.h106 struct workqueue_struct *wq; /* ordered workqueue for plug events */ member in struct:tb
113 * wq after cfg has been paused.
/linux-4.1.27/include/trace/events/
H A Dbtrfs.h997 __field( void *, wq )
1006 __entry->wq = work->wq;
1013 TP_printk("work=%p (normal_work=%p), wq=%p, func=%pf, ordered_func=%p,"
1015 __entry->work, __entry->normal_work, __entry->wq,
1067 TP_PROTO(struct __btrfs_workqueue *wq, const char *name, int high),
1069 TP_ARGS(wq, name, high),
1072 __field( void *, wq )
1078 __entry->wq = wq;
1083 TP_printk("name=%s%s, wq=%p", __get_str(name),
1086 __entry->wq)
1091 TP_PROTO(struct __btrfs_workqueue *wq, const char *name, int high),
1093 TP_ARGS(wq, name, high)
1098 TP_PROTO(struct __btrfs_workqueue *wq),
1100 TP_ARGS(wq),
1103 __field( void *, wq )
1107 __entry->wq = wq;
1110 TP_printk("wq=%p", __entry->wq)
1115 TP_PROTO(struct __btrfs_workqueue *wq),
1117 TP_ARGS(wq)
H A Dworkqueue.h55 __entry->workqueue = pwq->wq;
/linux-4.1.27/drivers/infiniband/hw/mlx4/
H A Dcq.c601 struct mlx4_ib_wq *wq; mlx4_ib_qp_sw_comp() local
605 wq = is_send ? &qp->sq : &qp->rq; mlx4_ib_qp_sw_comp()
606 cur = wq->head - wq->tail; mlx4_ib_qp_sw_comp()
612 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; mlx4_ib_qp_sw_comp()
615 wq->tail++; mlx4_ib_qp_sw_comp()
653 struct mlx4_ib_wq *wq; mlx4_ib_poll_one() local
740 wq = &(*cur_qp)->sq; mlx4_ib_poll_one()
743 wq->tail += (u16) (wqe_ctr - (u16) wq->tail); mlx4_ib_poll_one()
745 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; mlx4_ib_poll_one()
746 ++wq->tail; mlx4_ib_poll_one()
758 wq = &(*cur_qp)->rq; mlx4_ib_poll_one()
759 tail = wq->tail & (wq->wqe_cnt - 1); mlx4_ib_poll_one()
760 wc->wr_id = wq->wrid[tail]; mlx4_ib_poll_one()
761 ++wq->tail; mlx4_ib_poll_one()
H A Dalias_GUID.c433 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port_index].wq, aliasguid_query_handler()
565 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq, set_guid_rec()
627 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port - 1].wq, mlx4_ib_invalidate_all_guid_record()
786 queue_delayed_work(dev->sriov.alias_guid.ports_guid[port].wq, mlx4_ib_init_alias_guid_work()
822 flush_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); mlx4_ib_destroy_alias_guid_service()
823 destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); mlx4_ib_destroy_alias_guid_service()
878 dev->sriov.alias_guid.ports_guid[i].wq = mlx4_ib_init_alias_guid_service()
880 if (!dev->sriov.alias_guid.ports_guid[i].wq) { mlx4_ib_init_alias_guid_service()
891 destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); mlx4_ib_init_alias_guid_service()
892 dev->sriov.alias_guid.ports_guid[i].wq = NULL; mlx4_ib_init_alias_guid_service()
H A Dmad.c1113 queue_work(ctx->wq, &ctx->work); mlx4_ib_tunnel_comp_handler()
1847 ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq; create_pv_resources()
1858 ctx->wq = NULL; create_pv_resources()
1899 flush_workqueue(ctx->wq); destroy_pv_resources()
1998 ctx->wq = create_singlethread_workqueue(name); mlx4_ib_alloc_demux_ctx()
1999 if (!ctx->wq) { mlx4_ib_alloc_demux_ctx()
2016 destroy_workqueue(ctx->wq); mlx4_ib_alloc_demux_ctx()
2017 ctx->wq = NULL; mlx4_ib_alloc_demux_ctx()
2033 flush_workqueue(sqp_ctx->wq); mlx4_ib_free_sqp_ctx()
2064 flush_workqueue(ctx->wq); mlx4_ib_free_demux_ctx()
2071 destroy_workqueue(ctx->wq); mlx4_ib_free_demux_ctx()
/linux-4.1.27/drivers/mtd/chips/
H A Dcfi_cmdset_0020.c159 init_waitqueue_head(&(cfi->chips[i].wq)); cfi_cmdset_0020()
297 wake_up(&chip->wq); do_read_onechip()
352 add_wait_queue(&chip->wq, &wait); do_read_onechip()
355 remove_wait_queue(&chip->wq, &wait); do_read_onechip()
377 wake_up(&chip->wq); do_read_onechip()
485 add_wait_queue(&chip->wq, &wait); do_write_buffer()
488 remove_wait_queue(&chip->wq, &wait); do_write_buffer()
542 add_wait_queue(&chip->wq, &wait); do_write_buffer()
545 remove_wait_queue(&chip->wq, &wait); do_write_buffer()
595 wake_up(&chip->wq); do_write_buffer()
599 wake_up(&chip->wq); do_write_buffer()
778 add_wait_queue(&chip->wq, &wait); do_erase_oneblock()
781 remove_wait_queue(&chip->wq, &wait); do_erase_oneblock()
807 add_wait_queue(&chip->wq, &wait); do_erase_oneblock()
810 remove_wait_queue(&chip->wq, &wait); do_erase_oneblock()
884 wake_up(&chip->wq); do_erase_oneblock()
1007 add_wait_queue(&chip->wq, &wait); cfi_staa_sync()
1011 remove_wait_queue(&chip->wq, &wait); cfi_staa_sync()
1026 wake_up(&chip->wq); cfi_staa_sync()
1077 add_wait_queue(&chip->wq, &wait); do_lock_oneblock()
1080 remove_wait_queue(&chip->wq, &wait); do_lock_oneblock()
1123 wake_up(&chip->wq); do_lock_oneblock()
1223 add_wait_queue(&chip->wq, &wait); do_unlock_oneblock()
1226 remove_wait_queue(&chip->wq, &wait); do_unlock_oneblock()
1269 wake_up(&chip->wq); do_unlock_oneblock()
1359 wake_up(&chip->wq); cfi_staa_suspend()
1385 wake_up(&chip->wq); cfi_staa_resume()
H A Dcfi_cmdset_0002.c666 init_waitqueue_head(&(cfi->chips[i].wq)); cfi_cmdset_0002()
872 add_wait_queue(&chip->wq, &wait); get_chip()
875 remove_wait_queue(&chip->wq, &wait); get_chip()
907 wake_up(&chip->wq); put_chip()
1022 add_wait_queue(&chip->wq, &wait); xip_udelay()
1025 remove_wait_queue(&chip->wq, &wait); xip_udelay()
1222 add_wait_queue(&chip->wq, &wait); do_read_secsi_onechip()
1227 remove_wait_queue(&chip->wq, &wait); do_read_secsi_onechip()
1241 wake_up(&chip->wq); do_read_secsi_onechip()
1619 add_wait_queue(&chip->wq, &wait); do_write_oneword()
1622 remove_wait_queue(&chip->wq, &wait); do_write_oneword()
1691 add_wait_queue(&cfi->chips[chipnum].wq, &wait); cfi_amdstd_write_words()
1696 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); cfi_amdstd_write_words()
1762 add_wait_queue(&cfi->chips[chipnum].wq, &wait); cfi_amdstd_write_words()
1767 remove_wait_queue(&cfi->chips[chipnum].wq, &wait); cfi_amdstd_write_words()
1868 add_wait_queue(&chip->wq, &wait); do_write_buffer()
1871 remove_wait_queue(&chip->wq, &wait); do_write_buffer()
2279 add_wait_queue(&chip->wq, &wait); do_erase_chip()
2282 remove_wait_queue(&chip->wq, &wait); do_erase_chip()
2368 add_wait_queue(&chip->wq, &wait); do_erase_oneblock()
2371 remove_wait_queue(&chip->wq, &wait); do_erase_oneblock()
2760 add_wait_queue(&chip->wq, &wait); cfi_amdstd_sync()
2766 remove_wait_queue(&chip->wq, &wait); cfi_amdstd_sync()
2781 wake_up(&chip->wq); cfi_amdstd_sync()
2832 wake_up(&chip->wq); cfi_amdstd_suspend()
2858 wake_up(&chip->wq); cfi_amdstd_resume()
H A Dcfi_cmdset_0001.c573 init_waitqueue_head(&(cfi->chips[i].wq)); cfi_cmdset_0001()
762 init_waitqueue_head(&chip->wq); cfi_intelext_partition_fixup()
883 add_wait_queue(&chip->wq, &wait); chip_ready()
886 remove_wait_queue(&chip->wq, &wait); chip_ready()
967 add_wait_queue(&chip->wq, &wait); get_chip()
970 remove_wait_queue(&chip->wq, &wait); get_chip()
1007 wake_up(&chip->wq); put_chip()
1021 wake_up(&chip->wq); put_chip()
1056 wake_up(&chip->wq); put_chip()
1187 add_wait_queue(&chip->wq, &wait); xip_wait_for_operation()
1190 remove_wait_queue(&chip->wq, &wait); xip_wait_for_operation()
1263 add_wait_queue(&chip->wq, &wait); inval_cache_and_wait_for_operation()
1266 remove_wait_queue(&chip->wq, &wait); inval_cache_and_wait_for_operation()
2041 wake_up(&chip->wq); cfi_intelext_sync()
2548 wake_up(&chip->wq); cfi_intelext_suspend()
2597 wake_up(&chip->wq); cfi_intelext_resume()
/linux-4.1.27/drivers/staging/android/
H A Dsync.c169 init_waitqueue_head(&fence->wq); sync_fence_alloc()
187 wake_up_all(&fence->wq); fence_check_cb_func()
342 spin_lock_irqsave(&fence->wq.lock, flags); sync_fence_wait_async()
345 __add_wait_queue_tail(&fence->wq, &waiter->work); sync_fence_wait_async()
346 spin_unlock_irqrestore(&fence->wq.lock, flags); sync_fence_wait_async()
361 spin_lock_irqsave(&fence->wq.lock, flags); sync_fence_cancel_async()
366 spin_unlock_irqrestore(&fence->wq.lock, flags); sync_fence_cancel_async()
384 ret = wait_event_interruptible_timeout(fence->wq, sync_fence_wait()
549 poll_wait(file, &fence->wq, wait); sync_fence_poll()
H A Dsync_debug.c163 spin_lock_irqsave(&fence->wq.lock, flags); sync_print_fence()
164 list_for_each_entry(pos, &fence->wq.task_list, task_list) { sync_print_fence()
174 spin_unlock_irqrestore(&fence->wq.lock, flags); sync_print_fence()
H A Dsync.h151 * @wq: wait queue for fence signaling
163 wait_queue_head_t wq; member in struct:sync_fence
/linux-4.1.27/net/sunrpc/
H A Dsvcsock.c408 wait_queue_head_t *wq = sk_sleep(sk); svc_udp_data_ready() local
417 if (wq && waitqueue_active(wq)) svc_udp_data_ready()
418 wake_up_interruptible(wq); svc_udp_data_ready()
427 wait_queue_head_t *wq = sk_sleep(sk); svc_write_space() local
435 if (wq && waitqueue_active(wq)) { svc_write_space()
438 wake_up_interruptible(wq); svc_write_space()
766 wait_queue_head_t *wq; svc_tcp_listen_data_ready() local
789 wq = sk_sleep(sk); svc_tcp_listen_data_ready()
790 if (wq && waitqueue_active(wq)) svc_tcp_listen_data_ready()
791 wake_up_interruptible_all(wq); svc_tcp_listen_data_ready()
800 wait_queue_head_t *wq = sk_sleep(sk); svc_tcp_state_change() local
811 if (wq && waitqueue_active(wq)) svc_tcp_state_change()
812 wake_up_interruptible_all(wq); svc_tcp_state_change()
818 wait_queue_head_t *wq = sk_sleep(sk); svc_tcp_data_ready() local
826 if (wq && waitqueue_active(wq)) svc_tcp_data_ready()
827 wake_up_interruptible(wq); svc_tcp_data_ready()
1587 wait_queue_head_t *wq; svc_sock_detach() local
1596 wq = sk_sleep(sk); svc_sock_detach()
1597 if (wq && waitqueue_active(wq)) svc_sock_detach()
1598 wake_up_interruptible(wq); svc_sock_detach()
H A Dsched.c289 wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE); rpc_complete_task() local
296 spin_lock_irqsave(&wq->lock, flags); rpc_complete_task()
299 if (waitqueue_active(wq)) rpc_complete_task()
300 __wake_up_locked_key(wq, TASK_NORMAL, &k); rpc_complete_task()
301 spin_unlock_irqrestore(&wq->lock, flags); rpc_complete_task()
309 * to enforce taking of the wq->lock and hence avoid races with
1066 struct workqueue_struct *wq; rpciod_start() local
1073 wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); rpciod_start()
1074 rpciod_workqueue = wq; rpciod_start()
1080 struct workqueue_struct *wq = NULL; rpciod_stop() local
1086 wq = rpciod_workqueue; rpciod_stop()
1088 destroy_workqueue(wq); rpciod_stop()
/linux-4.1.27/drivers/media/pci/ddbridge/
H A Dddbridge.h85 wait_queue_head_t wq; member in struct:ddb_input
113 wait_queue_head_t wq; member in struct:ddb_output
132 wait_queue_head_t wq; member in struct:ddb_i2c
H A Dddbridge-core.c89 stat = wait_event_timeout(i2c->wq, i2c->done == 1, HZ); ddb_i2c_cmd()
186 init_waitqueue_head(&i2c->wq); ddb_i2c_init()
911 output->wq, ddb_output_free(output) >= 188) < 0) ts_write()
938 input->wq, ddb_input_avail(input) >= 188) < 0) ts_read()
1019 wake_up(&input->wq); input_tasklet()
1034 wake_up(&output->wq); output_tasklet()
1216 init_waitqueue_head(&input->wq); ddb_input_init()
1232 init_waitqueue_head(&output->wq); ddb_output_init()
1280 wake_up(&i2c->wq); irq_handle_i2c()
/linux-4.1.27/drivers/gpu/drm/
H A Ddrm_flip_work.c91 * @wq: the work-queue to run the queued work on
99 struct workqueue_struct *wq) drm_flip_work_commit()
107 queue_work(wq, &work->worker); drm_flip_work_commit()
98 drm_flip_work_commit(struct drm_flip_work *work, struct workqueue_struct *wq) drm_flip_work_commit() argument
/linux-4.1.27/fs/jffs2/
H A Dos-linux.h40 #define sleep_on_spinunlock(wq, s) \
43 add_wait_queue((wq), &__wait); \
47 remove_wait_queue((wq), &__wait); \
/linux-4.1.27/drivers/usb/misc/
H A Dappledisplay.c88 static struct workqueue_struct *wq; variable in typeref:struct:workqueue_struct
125 queue_delayed_work(wq, &pdata->work, 0); appledisplay_complete()
368 wq = create_singlethread_workqueue("appledisplay"); appledisplay_init()
369 if (!wq) { appledisplay_init()
379 flush_workqueue(wq); appledisplay_exit()
380 destroy_workqueue(wq); appledisplay_exit()
/linux-4.1.27/crypto/
H A Dalgif_aead.c100 struct socket_wq *wq; aead_wmem_wakeup() local
106 wq = rcu_dereference(sk->sk_wq); aead_wmem_wakeup()
107 if (wq_has_sleeper(wq)) aead_wmem_wakeup()
108 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | aead_wmem_wakeup()
149 struct socket_wq *wq; aead_data_wakeup() local
157 wq = rcu_dereference(sk->sk_wq); aead_data_wakeup()
158 if (wq_has_sleeper(wq)) aead_data_wakeup()
159 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | aead_data_wakeup()
H A Dpcrypt.c34 struct workqueue_struct *wq; member in struct:padata_pcrypt
458 pcrypt->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, pcrypt_init_padata()
460 if (!pcrypt->wq) pcrypt_init_padata()
463 pcrypt->pinst = padata_alloc_possible(pcrypt->wq); pcrypt_init_padata()
499 destroy_workqueue(pcrypt->wq); pcrypt_init_padata()
513 destroy_workqueue(pcrypt->wq); pcrypt_fini_padata()
H A Dalgif_skcipher.c239 struct socket_wq *wq; skcipher_wmem_wakeup() local
245 wq = rcu_dereference(sk->sk_wq); skcipher_wmem_wakeup()
246 if (wq_has_sleeper(wq)) skcipher_wmem_wakeup()
247 wake_up_interruptible_sync_poll(&wq->wait, POLLIN | skcipher_wmem_wakeup()
289 struct socket_wq *wq; skcipher_data_wakeup() local
295 wq = rcu_dereference(sk->sk_wq); skcipher_data_wakeup()
296 if (wq_has_sleeper(wq)) skcipher_data_wakeup()
297 wake_up_interruptible_sync_poll(&wq->wait, POLLOUT | skcipher_data_wakeup()
/linux-4.1.27/drivers/gpu/host1x/
H A Dcdma.h53 struct delayed_work wq; /* work queue */ member in struct:buffer_timeout
78 struct buffer_timeout timeout; /* channel's timeout state/wq */
H A Dintr.c124 wait_queue_head_t *wq = waiter->data; action_wakeup() local
125 wake_up(wq); action_wakeup()
130 wait_queue_head_t *wq = waiter->data; action_wakeup_interruptible() local
131 wake_up_interruptible(wq); action_wakeup_interruptible()
H A Dsyncpt.c191 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); host1x_syncpt_wait()
230 &wq, waiter, &ref); host1x_syncpt_wait()
242 int remain = wait_event_interruptible_timeout(wq, host1x_syncpt_wait()
/linux-4.1.27/drivers/sbus/char/
H A Dbbc_i2c.h61 wait_queue_head_t wq; member in struct:bbc_i2c_bus
H A Dbbc_i2c.c129 add_wait_queue(&bp->wq, &wait); wait_for_pin()
134 bp->wq, wait_for_pin()
143 remove_wait_queue(&bp->wq, &wait); wait_for_pin()
279 wake_up_interruptible(&bp->wq); bbc_i2c_interrupt()
317 init_waitqueue_head(&bp->wq); attach_one_i2c()
/linux-4.1.27/drivers/staging/rtl8192e/rtl8192e/
H A Drtl_ps.c69 queue_delayed_work_rsl(priv->rtllib->wq, rtl8192_hw_wakeup()
115 queue_delayed_work_rsl(priv->rtllib->wq, rtl8192_hw_to_sleep()
117 queue_delayed_work_rsl(priv->rtllib->wq, rtl8192_hw_to_sleep()
208 queue_work_rsl(priv->rtllib->wq, rtllib_ips_leave_wq()
/linux-4.1.27/drivers/block/
H A Dloop.h57 struct workqueue_struct *wq; member in struct:loop_device
/linux-4.1.27/drivers/char/tpm/
H A Dtpm_ibmvtpm.h45 wait_queue_head_t wq; member in struct:ibmvtpm_dev
H A Dtpm_ibmvtpm.c93 sig = wait_event_interruptible(ibmvtpm->wq, ibmvtpm->res_len != 0); tpm_ibmvtpm_recv()
518 wake_up_interruptible(&ibmvtpm->wq); ibmvtpm_crq_process()
624 init_waitqueue_head(&ibmvtpm->wq); tpm_ibmvtpm_probe()
/linux-4.1.27/fs/logfs/
H A Ddev_bdev.c54 static DECLARE_WAIT_QUEUE_HEAD(wq);
73 wake_up(&wq);
167 wake_up(&wq); erase_end_io()
246 wait_event(wq, atomic_read(&super->s_pending_writes) == 0); bdev_sync()
/linux-4.1.27/fs/nfs/blocklayout/
H A Drpc_pipefs.c62 DECLARE_WAITQUEUE(wq, current); bl_resolve_deviceid()
87 add_wait_queue(&nn->bl_wq, &wq); bl_resolve_deviceid()
90 remove_wait_queue(&nn->bl_wq, &wq); bl_resolve_deviceid()
96 remove_wait_queue(&nn->bl_wq, &wq); bl_resolve_deviceid()
/linux-4.1.27/drivers/pci/hotplug/
H A Dpciehp_ctrl.c52 queue_work(p_slot->wq, &info->work); queue_interrupt_event()
361 queue_work(p_slot->wq, &info->work); pciehp_queue_pushbutton_work()
389 queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ); handle_button_press_event()
450 queue_work(p_slot->wq, &info->work); handle_surprise_event()
479 queue_work(p_slot->wq, &info->work); handle_link_event()
492 queue_work(p_slot->wq, &info->work); handle_link_event()
501 queue_work(p_slot->wq, &info->work); handle_link_event()
H A Dshpchp_core.c131 slot->wq = alloc_workqueue("shpchp-%d", 0, 0, slot->number); init_slots()
132 if (!slot->wq) { init_slots()
168 destroy_workqueue(slot->wq); init_slots()
189 destroy_workqueue(slot->wq); cleanup_slots()
H A Dpciehp_hpc.c726 slot->wq = alloc_workqueue("pciehp-%u", 0, 0, PSN(ctrl)); pcie_init_slot()
727 if (!slot->wq) pcie_init_slot()
745 destroy_workqueue(slot->wq); pcie_cleanup_slot()
H A Dshpchp_ctrl.c54 queue_work(p_slot->wq, &info->work); queue_interrupt_event()
458 queue_work(p_slot->wq, &info->work); shpchp_queue_pushbutton_work()
506 queue_delayed_work(p_slot->wq, &p_slot->work, 5*HZ); handle_button_press_event()
/linux-4.1.27/arch/x86/kernel/
H A Dkvm.c94 wait_queue_head_t wq; member in struct:kvm_task_sleep_node
144 init_waitqueue_head(&n.wq); kvm_async_pf_task_wait()
150 prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE); kvm_async_pf_task_wait()
169 finish_wait(&n.wq, &wait); kvm_async_pf_task_wait()
181 else if (waitqueue_active(&n->wq)) apf_task_wake_one()
182 wake_up(&n->wq); apf_task_wake_one()
234 init_waitqueue_head(&n->wq); kvm_async_pf_task_wake()
/linux-4.1.27/drivers/mtd/lpddr/
H A Dlpddr_cmds.c101 init_waitqueue_head(&chip->wq); lpddr_cmdset()
159 add_wait_queue(&chip->wq, &wait); wait_for_ready()
162 remove_wait_queue(&chip->wq, &wait); wait_for_ready()
258 add_wait_queue(&chip->wq, &wait); get_chip()
261 remove_wait_queue(&chip->wq, &wait); get_chip()
325 add_wait_queue(&chip->wq, &wait); chip_ready()
328 remove_wait_queue(&chip->wq, &wait); chip_ready()
351 wake_up(&chip->wq); put_chip()
365 wake_up(&chip->wq); put_chip()
386 wake_up(&chip->wq); put_chip()
/linux-4.1.27/drivers/net/wireless/cw1200/
H A Dcw1200_spi.c43 wait_queue_head_t wq; member in struct:hwbus_priv
205 add_wait_queue(&self->wq, &wait); cw1200_spi_lock()
218 remove_wait_queue(&self->wq, &wait); cw1200_spi_lock()
230 wake_up(&self->wq); cw1200_spi_unlock()
413 init_waitqueue_head(&self->wq); cw1200_spi_probe()
/linux-4.1.27/drivers/gpu/drm/atmel-hlcdc/
H A Datmel_hlcdc_dc.c300 dc->wq = alloc_ordered_workqueue("atmel-hlcdc-dc", 0); atmel_hlcdc_dc_load()
301 if (!dc->wq) atmel_hlcdc_dc_load()
352 destroy_workqueue(dc->wq); atmel_hlcdc_dc_load()
363 flush_workqueue(dc->wq); atmel_hlcdc_dc_unload()
376 destroy_workqueue(dc->wq); atmel_hlcdc_dc_unload()
H A Datmel_hlcdc_dc.h130 * @wq: display controller workqueue
139 struct workqueue_struct *wq; member in struct:atmel_hlcdc_dc
H A Datmel_hlcdc_layer.c62 drm_flip_work_commit(&layer->gc, layer->wq); atmel_hlcdc_layer_fb_flip_release_queue()
611 layer->wq = dc->wq; atmel_hlcdc_layer_init()
/linux-4.1.27/drivers/infiniband/hw/mthca/
H A Dmthca_cq.c489 struct mthca_wq *wq; mthca_poll_one() local
539 wq = &(*cur_qp)->sq; mthca_poll_one()
541 >> wq->wqe_shift); mthca_poll_one()
547 wq = NULL; mthca_poll_one()
553 wq = &(*cur_qp)->rq; mthca_poll_one()
555 wqe_index = wqe >> wq->wqe_shift; mthca_poll_one()
562 wqe_index = wq->max - 1; mthca_poll_one()
566 if (wq) { mthca_poll_one()
567 if (wq->last_comp < wqe_index) mthca_poll_one()
568 wq->tail += wqe_index - wq->last_comp; mthca_poll_one()
570 wq->tail += wqe_index + wq->max - wq->last_comp; mthca_poll_one()
572 wq->last_comp = wqe_index; mthca_poll_one()
H A Dmthca_qp.c229 static void mthca_wq_reset(struct mthca_wq *wq) mthca_wq_reset() argument
231 wq->next_ind = 0; mthca_wq_reset()
232 wq->last_comp = wq->max - 1; mthca_wq_reset()
233 wq->head = 0; mthca_wq_reset()
234 wq->tail = 0; mthca_wq_reset()
1545 static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, mthca_wq_overflow() argument
1551 cur = wq->head - wq->tail; mthca_wq_overflow()
1552 if (likely(cur + nreq < wq->max)) mthca_wq_overflow()
1557 cur = wq->head - wq->tail; mthca_wq_overflow()
1560 return cur + nreq >= wq->max; mthca_wq_overflow()
/linux-4.1.27/drivers/net/ethernet/qlogic/qlcnic/
H A Dqlcnic_dcb.c289 if (dcb->wq) { __qlcnic_dcb_free()
290 destroy_workqueue(dcb->wq); __qlcnic_dcb_free()
291 dcb->wq = NULL; __qlcnic_dcb_free()
314 dcb->wq = create_singlethread_workqueue("qlcnic-dcb"); __qlcnic_dcb_attach()
315 if (!dcb->wq) { __qlcnic_dcb_attach()
339 destroy_workqueue(dcb->wq); __qlcnic_dcb_attach()
340 dcb->wq = NULL; __qlcnic_dcb_attach()
539 queue_delayed_work(dcb->wq, &dcb->aen_work, 0); qlcnic_82xx_dcb_aen_handler()
653 queue_delayed_work(dcb->wq, &dcb->aen_work, 0); qlcnic_83xx_dcb_aen_handler()
H A Dqlcnic_dcb.h39 struct workqueue_struct *wq; member in struct:qlcnic_dcb
H A Dqlcnic_sriov_common.c139 struct workqueue_struct *wq; qlcnic_sriov_init() local
161 wq = create_singlethread_workqueue("bc-trans"); qlcnic_sriov_init()
162 if (wq == NULL) { qlcnic_sriov_init()
169 bc->bc_trans_wq = wq; qlcnic_sriov_init()
171 wq = create_singlethread_workqueue("async"); qlcnic_sriov_init()
172 if (wq == NULL) { qlcnic_sriov_init()
178 bc->bc_async_wq = wq; qlcnic_sriov_init()
/linux-4.1.27/drivers/media/i2c/
H A Dsaa7110.c63 wait_queue_head_t wq; member in struct:saa7110
199 prepare_to_wait(&decoder->wq, &wait, TASK_UNINTERRUPTIBLE); determine_norm()
201 finish_wait(&decoder->wq, &wait); determine_norm()
234 prepare_to_wait(&decoder->wq, &wait, TASK_UNINTERRUPTIBLE); determine_norm()
236 finish_wait(&decoder->wq, &wait); determine_norm()
426 init_waitqueue_head(&decoder->wq); saa7110_probe()
H A Dmsp3400-driver.h102 wait_queue_head_t wq; member in struct:msp_state
H A Dmsp3400-driver.c323 wake_up_interruptible(&state->wq); msp_wake_thread()
330 add_wait_queue(&state->wq, &wait); msp_sleep()
341 remove_wait_queue(&state->wq, &wait); msp_sleep()
712 init_waitqueue_head(&state->wq); msp_probe()
/linux-4.1.27/drivers/gpu/drm/tilcdc/
H A Dtilcdc_drv.c144 flush_workqueue(priv->wq); tilcdc_unload()
145 destroy_workqueue(priv->wq); tilcdc_unload()
174 priv->wq = alloc_ordered_workqueue("tilcdc", 0); tilcdc_load()
175 if (!priv->wq) { tilcdc_load()
326 flush_workqueue(priv->wq); tilcdc_load()
327 destroy_workqueue(priv->wq); tilcdc_load()
H A Dtilcdc_drv.h77 struct workqueue_struct *wq; member in struct:tilcdc_drm_private
/linux-4.1.27/drivers/iommu/
H A Damd_iommu_v2.c59 wait_queue_head_t wq; /* To wait for count == 0 */ member in struct:pasid_state
74 wait_queue_head_t wq; member in struct:device_state
151 wake_up(&dev_state->wq); put_device_state()
264 wake_up(&pasid_state->wq); put_pasid_state()
270 wait_event(pasid_state->wq, !atomic_read(&pasid_state->count)); put_pasid_state_wait()
636 init_waitqueue_head(&pasid_state->wq); amd_iommu_bind_pasid()
760 init_waitqueue_head(&dev_state->wq); amd_iommu_init_device()
848 wait_event(dev_state->wq, !atomic_read(&dev_state->count)); amd_iommu_free_device()
/linux-4.1.27/drivers/scsi/libsas/
H A Dsas_event.c59 struct workqueue_struct *wq = ha->core.shost->work_q; __sas_drain_work() local
67 drain_workqueue(wq); __sas_drain_work()
/linux-4.1.27/drivers/staging/i2o/
H A Dexec-osm.c49 wait_queue_head_t *wq; /* Pointer to Wait queue */ member in struct:i2o_exec_wait
125 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); i2o_msg_post_wait_mem()
152 wait->wq = &wq; i2o_msg_post_wait_mem()
165 wait_event_interruptible_timeout(wq, wait->complete, timeout * HZ); i2o_msg_post_wait_mem()
169 wait->wq = NULL; i2o_msg_post_wait_mem()
240 if (wait->wq) i2o_msg_post_wait_complete()
257 wake_up_interruptible(wait->wq); i2o_msg_post_wait_complete()
/linux-4.1.27/net/atm/
H A Dcommon.c95 struct socket_wq *wq; vcc_def_wakeup() local
98 wq = rcu_dereference(sk->sk_wq); vcc_def_wakeup()
99 if (wq_has_sleeper(wq)) vcc_def_wakeup()
100 wake_up(&wq->wait); vcc_def_wakeup()
114 struct socket_wq *wq; vcc_write_space() local
119 wq = rcu_dereference(sk->sk_wq); vcc_write_space()
120 if (wq_has_sleeper(wq)) vcc_write_space()
121 wake_up_interruptible(&wq->wait); vcc_write_space()
/linux-4.1.27/virt/kvm/
H A Dasync_pf.c97 if (waitqueue_active(&vcpu->wq)) async_pf_execute()
98 wake_up_interruptible(&vcpu->wq); async_pf_execute()
/linux-4.1.27/fs/nfs/
H A Dcallback.c123 DEFINE_WAIT(wq); nfs41_callback_svc()
131 prepare_to_wait(&serv->sv_cb_waitq, &wq, TASK_INTERRUPTIBLE); nfs41_callback_svc()
138 finish_wait(&serv->sv_cb_waitq, &wq); nfs41_callback_svc()
146 finish_wait(&serv->sv_cb_waitq, &wq); nfs41_callback_svc()
/linux-4.1.27/drivers/gpu/host1x/hw/
H A Dcdma_hw.c243 timeout.wq); cdma_timeout_handler()
296 INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler); cdma_timeout_init()
308 cancel_delayed_work(&cdma->timeout.wq); cdma_timeout_destroy()
/linux-4.1.27/arch/arm/kvm/
H A Dpsci.c71 wait_queue_head_t *wq; kvm_psci_vcpu_on() local
119 wq = kvm_arch_vcpu_wq(vcpu); kvm_psci_vcpu_on()
120 wake_up_interruptible(wq); kvm_psci_vcpu_on()
/linux-4.1.27/drivers/target/tcm_fc/
H A Dtfc_conf.c303 struct workqueue_struct *wq; ft_add_tpg() local
334 wq = alloc_workqueue("tcm_fc", 0, 1); ft_add_tpg()
335 if (!wq) { ft_add_tpg()
343 destroy_workqueue(wq); ft_add_tpg()
347 tpg->workqueue = wq; ft_add_tpg()
/linux-4.1.27/drivers/scsi/bfa/
H A Dbfad_im.c161 wait_queue_head_t *wq; bfa_cb_tskim_done() local
165 wq = (wait_queue_head_t *) cmnd->SCp.ptr; bfa_cb_tskim_done()
168 if (wq) bfa_cb_tskim_done()
169 wake_up(wq); bfa_cb_tskim_done()
298 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); bfad_im_reset_lun_handler()
326 cmnd->SCp.ptr = (char *)&wq; bfad_im_reset_lun_handler()
334 wait_event(wq, test_bit(IO_DONE_BIT, bfad_im_reset_lun_handler()
361 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); bfad_im_reset_bus_handler()
368 cmnd->SCp.ptr = (char *)&wq; bfad_im_reset_bus_handler()
377 wait_event(wq, test_bit(IO_DONE_BIT, bfad_im_reset_bus_handler()
/linux-4.1.27/net/
H A Dsocket.c248 struct socket_wq *wq; sock_alloc_inode() local
253 wq = kmalloc(sizeof(*wq), GFP_KERNEL); sock_alloc_inode()
254 if (!wq) { sock_alloc_inode()
258 init_waitqueue_head(&wq->wait); sock_alloc_inode()
259 wq->fasync_list = NULL; sock_alloc_inode()
260 RCU_INIT_POINTER(ei->socket.wq, wq); sock_alloc_inode()
274 struct socket_wq *wq; sock_destroy_inode() local
277 wq = rcu_dereference_protected(ei->socket.wq, 1); sock_destroy_inode()
278 kfree_rcu(wq, rcu); sock_destroy_inode()
576 if (rcu_dereference_protected(sock->wq, 1)->fasync_list) sock_release()
1044 struct socket_wq *wq; sock_fasync() local
1050 wq = rcu_dereference_protected(sock->wq, sock_owned_by_user(sk)); sock_fasync()
1051 fasync_helper(fd, filp, on, &wq->fasync_list); sock_fasync()
1053 if (!wq->fasync_list) sock_fasync()
1066 struct socket_wq *wq; sock_wake_async() local
1071 wq = rcu_dereference(sock->wq); sock_wake_async()
1072 if (!wq || !wq->fasync_list) { sock_wake_async()
1087 kill_fasync(&wq->fasync_list, SIGIO, band); sock_wake_async()
1090 kill_fasync(&wq->fasync_list, SIGURG, band); sock_wake_async()
/linux-4.1.27/drivers/ps3/
H A Dps3av.c47 struct workqueue_struct *wq; member in struct:ps3av
488 queue_work(ps3av->wq, &ps3av->work); ps3av_set_videomode()
959 ps3av->wq = create_singlethread_workqueue("ps3avd"); ps3av_probe()
960 if (!ps3av->wq) { ps3av_probe()
1021 if (ps3av->wq) ps3av_remove()
1022 destroy_workqueue(ps3av->wq); ps3av_remove()
/linux-4.1.27/arch/powerpc/platforms/cell/spufs/
H A Dspufs.h321 #define spufs_wait(wq, condition) \
326 prepare_to_wait(&(wq), &__wait, TASK_INTERRUPTIBLE); \
339 finish_wait(&(wq), &__wait); \
/linux-4.1.27/drivers/mtd/
H A Dmtd_blkdevs.c188 queue_work(dev->wq, &dev->work); mtd_blktrans_request()
432 new->wq = alloc_workqueue("%s%d", 0, 0, add_mtd_blktrans_dev()
434 if (!new->wq) add_mtd_blktrans_dev()
478 destroy_workqueue(old->wq); del_mtd_blktrans_dev()
/linux-4.1.27/drivers/net/wireless/ath/ar5523/
H A Dar5523.h93 struct workqueue_struct *wq; member in struct:ar5523
H A Dar5523.c612 queue_work(ar->wq, &ar->rx_refill_work); ar5523_data_rx_cb()
1041 queue_work(ar->wq, &ar->rx_refill_work); ar5523_start()
1625 ar->wq = create_singlethread_workqueue("ar5523"); ar5523_probe()
1626 if (!ar->wq) { ar5523_probe()
1627 ar5523_err(ar, "Could not create wq\n"); ar5523_probe()
1718 destroy_workqueue(ar->wq); ar5523_probe()
1740 destroy_workqueue(ar->wq); ar5523_disconnect()
/linux-4.1.27/drivers/media/platform/vsp1/
H A Dvsp1_video.h69 wait_queue_head_t wq; member in struct:vsp1_pipeline
/linux-4.1.27/drivers/misc/cxl/
H A Dfile.c256 poll_wait(file, &ctx->wq, poll); afu_poll()
296 prepare_to_wait(&ctx->wq, &wait, TASK_INTERRUPTIBLE); afu_read()
317 finish_wait(&ctx->wq, &wait); afu_read()
357 finish_wait(&ctx->wq, &wait); afu_read()
H A Dcontext.c62 init_waitqueue_head(&ctx->wq); cxl_context_init()
191 wake_up_all(&ctx->wq); __detach_context()
/linux-4.1.27/drivers/net/wireless/ath/wcn36xx/
H A Dtxrx.h27 /* broadcast wq ID */
/linux-4.1.27/drivers/gpu/drm/msm/
H A Dmsm_gpu.c242 queue_work(priv->wq, &gpu->inactive_work); inactive_handler()
311 queue_work(priv->wq, &gpu->recover_work); hangcheck_handler()
319 queue_work(priv->wq, &gpu->retire_work); hangcheck_handler()
458 queue_work(priv->wq, &gpu->retire_work); msm_gpu_retire()
H A Dmsm_drv.h103 struct workqueue_struct *wq; member in struct:msm_drm_private
146 /* callback from wq once fence has passed: */
H A Dmsm_drv.c137 flush_workqueue(priv->wq); msm_unload()
138 destroy_workqueue(priv->wq); msm_unload()
279 priv->wq = alloc_ordered_workqueue("msm", 0); msm_load()
697 queue_work(priv->wq, &cb->work); msm_queue_fence_cb()
722 queue_work(priv->wq, &cb->work); msm_update_fence()
/linux-4.1.27/net/nfc/hci/
H A Dhci.h39 wait_queue_head_t *wq; member in struct:hcp_exec_waiter
H A Dcommand.c64 wake_up(hcp_ew->wq); nfc_hci_execute_cb()
73 hcp_ew.wq = &ew_wq; nfc_hci_execute_cmd()
/linux-4.1.27/include/drm/
H A Ddrm_flip_work.h87 struct workqueue_struct *wq);
/linux-4.1.27/include/linux/mtd/
H A Dblktrans.h47 struct workqueue_struct *wq; member in struct:mtd_blktrans_dev
H A Dflashchip.h90 wait_queue_head_t wq; /* Wait on here when we're waiting for the chip member in struct:flchip
H A Donenand.h77 * @wq: [INTERN] wait queue to sleep on if a OneNAND
128 wait_queue_head_t wq; member in struct:onenand_chip
/linux-4.1.27/drivers/net/caif/
H A Dcaif_hsi.c78 queue_work(cfhsi->wq, &cfhsi->wake_down_work); cfhsi_inactivity_tout()
988 queue_work(cfhsi->wq, &cfhsi->wake_up_work); cfhsi_wake_up_cb()
1107 queue_work(cfhsi->wq, &cfhsi->wake_up_work); cfhsi_xmit()
1204 cfhsi->wq = create_singlethread_workqueue(cfhsi->ndev->name); cfhsi_open()
1205 if (!cfhsi->wq) { cfhsi_open()
1251 destroy_workqueue(cfhsi->wq); cfhsi_open()
1271 flush_workqueue(cfhsi->wq); cfhsi_close()
1282 destroy_workqueue(cfhsi->wq); cfhsi_close()
H A Dcaif_spi.c641 cfspi->wq = create_singlethread_workqueue(dev->name); cfspi_init()
642 if (!cfspi->wq) { cfspi_init()
665 queue_work(cfspi->wq, &cfspi->work); cfspi_init()
692 destroy_workqueue(cfspi->wq); cfspi_uninit()
/linux-4.1.27/drivers/staging/nvec/
H A Dnvec.h122 * @wq: The work queue in which @rx_work and @tx_work are executed
149 struct workqueue_struct *wq; member in struct:nvec_chip
/linux-4.1.27/include/linux/power/
H A Dcharger-manager.h53 * @wq: the workqueue to control charger according to the state of
70 struct work_struct wq; member in struct:charger_cable
/linux-4.1.27/net/9p/
H A Dtrans_fd.c121 * @wq: current write work
144 struct work_struct wq; member in struct:p9_conn
451 m = container_of(work, struct p9_conn, wq); p9_write_work()
510 schedule_work(&m->wq); p9_write_work()
592 INIT_WORK(&m->wq, p9_write_work); p9_conn_create()
644 schedule_work(&m->wq); p9_poll_mux()
682 schedule_work(&m->wq); p9_fd_request()
853 cancel_work_sync(&m->wq); p9_conn_destroy()
H A Dclient.c271 if (!req->wq) { p9_tag_alloc()
272 req->wq = kmalloc(sizeof(wait_queue_head_t), GFP_NOFS); p9_tag_alloc()
273 if (!req->wq) p9_tag_alloc()
275 init_waitqueue_head(req->wq); p9_tag_alloc()
297 kfree(req->wq); p9_tag_alloc()
299 req->wq = NULL; p9_tag_alloc()
387 kfree(c->reqs[row][col].wq); p9_tag_cleanup()
430 wake_up(req->wq); p9_client_cb()
752 err = wait_event_interruptible(*req->wq, p9_client_rpc()
/linux-4.1.27/drivers/mtd/ubi/
H A Dblock.c92 struct workqueue_struct *wq; member in struct:ubiblock
332 queue_work(dev->wq, &pdu->work); ubiblock_queue_rq()
428 dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name); ubiblock_create()
429 if (!dev->wq) { ubiblock_create()
461 destroy_workqueue(dev->wq); ubiblock_cleanup()
/linux-4.1.27/drivers/bluetooth/
H A Dbluecard_cs.c283 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); bluecard_write_wakeup()
306 prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE); bluecard_write_wakeup()
308 finish_wait(&wq, &wait); bluecard_write_wakeup()
320 prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE); bluecard_write_wakeup()
322 finish_wait(&wq, &wait); bluecard_write_wakeup()
/linux-4.1.27/drivers/md/
H A Ddm-era-target.c1152 struct workqueue_struct *wq; member in struct:era
1207 queue_work(era->wq, &era->worker); wake_worker()
1374 flush_workqueue(era->wq); stop_worker()
1397 if (era->wq) era_destroy()
1398 destroy_workqueue(era->wq); era_destroy()
1500 era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); era_ctr()
1501 if (!era->wq) { era_ctr()
H A Ddm.c181 struct workqueue_struct *wq; member in struct:mapped_device
697 queue_work(md->wq, &md->work); queue_io()
2324 md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); alloc_dev()
2325 if (!md->wq) alloc_dev()
2348 destroy_workqueue(md->wq); alloc_dev()
2372 destroy_workqueue(md->wq); free_dev()
3047 queue_work(md->wq, &md->work); dm_queue_flush()
3176 * flush_workqueue(md->wq). __dm_suspend()
3183 * Stop md->queue before flushing md->wq in case request-based __dm_suspend()
3184 * dm defers requests to md->wq from md->queue. __dm_suspend()
3192 flush_workqueue(md->wq); __dm_suspend()
3418 flush_workqueue(md->wq); dm_internal_suspend_fast()
/linux-4.1.27/drivers/net/can/spi/
H A Dmcp251x.c258 struct workqueue_struct *wq; member in struct:mcp251x_priv
530 queue_work(priv->wq, &priv->tx_work); mcp251x_hard_start_xmit()
547 queue_work(priv->wq, &priv->restart_work); mcp251x_do_set_mode()
701 destroy_workqueue(priv->wq); mcp251x_stop()
702 priv->wq = NULL; mcp251x_stop()
963 priv->wq = create_freezable_workqueue("mcp251x_wq"); mcp251x_open()
1229 queue_work(priv->wq, &priv->restart_work); mcp251x_can_resume()
/linux-4.1.27/drivers/net/ethernet/mellanox/mlx5/core/
H A Dcmd.c627 } else if (!queue_work(cmd->wq, &ent->work)) { mlx5_cmd_invoke()
1054 flush_workqueue(cmd->wq); mlx5_cmd_use_events()
1073 flush_workqueue(cmd->wq); mlx5_cmd_use_polling()
1453 cmd->wq = create_singlethread_workqueue(cmd->wq_name); mlx5_cmd_init()
1454 if (!cmd->wq) { mlx5_cmd_init()
1469 destroy_workqueue(cmd->wq); mlx5_cmd_init()
1489 destroy_workqueue(cmd->wq); mlx5_cmd_cleanup()
/linux-4.1.27/drivers/mtd/nand/
H A Dtmio_nand.c175 if (unlikely(!waitqueue_active(&nand_chip->controller->wq))) tmio_irq()
178 wake_up(&nand_chip->controller->wq); tmio_irq()
198 timeout = wait_event_timeout(nand_chip->controller->wq, tmio_nand_wait()
/linux-4.1.27/drivers/net/ethernet/sfc/
H A Dmcdi.h46 * @state: Request handling state. Waited for by @wq.
48 * @wq: Wait queue for threads waiting for @state != %MCDI_STATE_RUNNING
66 wait_queue_head_t wq; member in struct:efx_mcdi_iface
/linux-4.1.27/fs/ocfs2/dlm/
H A Ddlmconvert.c94 wake_up(&res->wq); dlmconvert_master()
352 wake_up(&res->wq); dlmconvert_remote()
539 wake_up(&res->wq); dlm_convert_lock_handler()
/linux-4.1.27/fs/ext4/
H A Dpage-io.c216 struct workqueue_struct *wq; ext4_add_complete_io() local
223 wq = sbi->rsv_conversion_wq; ext4_add_complete_io()
225 queue_work(wq, &ei->i_rsv_conversion_work); ext4_add_complete_io()
/linux-4.1.27/sound/soc/intel/atom/sst/
H A Dsst.h362 * @ipc_post_msg_wq : wq to post IPC messages context
364 * @mad_wq : MAD driver wq
365 * @post_msg_wq : wq to post IPC messages
/linux-4.1.27/drivers/infiniband/core/
H A Dmad_priv.h208 struct workqueue_struct *wq; member in struct:ib_mad_port_private
H A Dmad_rmpp.c104 flush_workqueue(agent->qp_info->port_priv->wq); ib_cancel_rmpp_recvs()
457 queue_delayed_work(rmpp_recv->agent->qp_info->port_priv->wq, complete_rmpp()
550 queue_delayed_work(agent->qp_info->port_priv->wq, start_rmpp()
/linux-4.1.27/drivers/mfd/
H A Ddln2.c87 wait_queue_head_t wq; member in struct:dln2_mod_rx_slots
388 ret = wait_event_interruptible(dln2->mod_rx_slots[handle].wq, alloc_rx_slot()
426 wake_up_interruptible(&rxs->wq); free_rx_slot()
753 init_waitqueue_head(&dln2->mod_rx_slots[i].wq); dln2_probe()
/linux-4.1.27/drivers/staging/lustre/lustre/ptlrpc/
H A Dniobuf.c246 wait_queue_head_t *wq; ptlrpc_unregister_bulk() local
279 wq = &req->rq_set->set_waitq; ptlrpc_unregister_bulk()
281 wq = &req->rq_reply_waitq; ptlrpc_unregister_bulk()
288 rc = l_wait_event(*wq, !ptlrpc_client_bulk_active(req), &lwi); ptlrpc_unregister_bulk()
/linux-4.1.27/drivers/gpu/drm/i2c/
H A Dadv7511.c38 wait_queue_head_t wq; member in struct:adv7511
448 wake_up_all(&adv7511->wq); adv7511_irq_process()
472 ret = wait_event_interruptible_timeout(adv7511->wq, adv7511_wait_for_edid()
917 init_waitqueue_head(&adv7511->wq); adv7511_probe()
/linux-4.1.27/drivers/gpu/drm/omapdrm/
H A Domap_crtc.c292 queue_work(priv->wq, &omap_crtc->apply_work); omap_crtc_apply_irq()
349 queue_work(priv->wq, &omap_crtc->apply_work); apply_worker()
379 queue_work(priv->wq, &omap_crtc->apply_work); omap_crtc_apply()
602 queue_work(priv->wq, &omap_crtc->page_flip_work); page_flip_cb()
H A Domap_plane.c75 * omap_framebuffer_pin/unpin are always called from priv->wq, omap_plane_unpin_worker()
176 drm_flip_work_commit(&omap_plane->unpin_work, priv->wq); omap_plane_post_apply()
/linux-4.1.27/drivers/char/
H A Dtlclk.c200 static DECLARE_WAIT_QUEUE_HEAD(wq);
253 wait_event_interruptible(wq, got_event); tlclk_read()
873 wake_up(&wq); switchover_timeout()
929 wake_up(&wq); tlclk_interrupt()
/linux-4.1.27/drivers/gpu/drm/via/
H A Dvia_dmablit.h77 struct work_struct wq; member in struct:_drm_via_blitq
/linux-4.1.27/drivers/vfio/
H A Dvirqfd.c213 * Even if we don't queue the job, flush the wq to be sure it's vfio_virqfd_disable()
/linux-4.1.27/fs/ncpfs/
H A Dsock.c60 wait_queue_head_t wq; member in struct:ncp_request_reply
82 init_waitqueue_head(&req->wq); ncp_alloc_req()
140 wake_up_all(&req->wq); ncp_finish_request()
722 if (wait_event_interruptible(req->wq, req->status == RQ_DONE)) { do_ncp_rpc_call()
/linux-4.1.27/include/net/caif/
H A Dcaif_hsi.h165 struct workqueue_struct *wq; member in struct:cfhsi
H A Dcaif_spi.h115 struct workqueue_struct *wq; member in struct:cfspi
/linux-4.1.27/arch/arm/common/
H A DbL_switcher.c263 wait_queue_head_t wq; member in struct:bL_thread
286 wait_event_interruptible(t->wq, bL_switcher_thread()
372 wake_up(&t->wq); bL_switch_request_cb()
584 init_waitqueue_head(&t->wq); for_each_online_cpu()
/linux-4.1.27/net/dccp/
H A Doutput.c200 struct socket_wq *wq; dccp_write_space() local
203 wq = rcu_dereference(sk->sk_wq); dccp_write_space()
204 if (wq_has_sleeper(wq)) dccp_write_space()
205 wake_up_interruptible(&wq->wait); dccp_write_space()
/linux-4.1.27/drivers/uwb/
H A Duwbd.c276 rc->uwbd.wq, uwbd()
341 wake_up_all(&rc->uwbd.wq); uwbd_event_queue()
/linux-4.1.27/include/net/9p/
H A Dclient.h95 * @wq: wait_queue for the client to block on for this request
115 wait_queue_head_t *wq; member in struct:p9_req_t
/linux-4.1.27/arch/cris/mm/
H A Dfault.c223 DECLARE_WAIT_QUEUE_HEAD(wq); do_page_fault()
235 wait_event_interruptible(wq, 0 == 1); do_page_fault()
/linux-4.1.27/drivers/staging/rtl8192e/
H A Drtllib_softmac.c637 queue_delayed_work_rsl(ieee->wq, &ieee->softmac_scan_wq, rtllib_softmac_scan_wq()
767 queue_delayed_work_rsl(ieee->wq, rtllib_start_scan()
1435 * ASSOC response. Just wait for the retry wq to be scheduled. rtllib_associate_abort()
1449 queue_delayed_work_rsl(ieee->wq, &ieee->associate_retry_wq, rtllib_associate_abort()
1601 queue_work_rsl(ieee->wq, &ieee->associate_complete_wq); rtllib_associate_complete()
1623 "=============>%s():Rf state is eRfOff, schedule ipsleave wq again,return\n", rtllib_associate_procedure_wq()
1751 queue_delayed_work_rsl(ieee->wq, rtllib_softmac_new_net()
2302 queue_delayed_work_rsl(ieee->wq, rtllib_rx_assoc_resp()
2412 queue_delayed_work_rsl(ieee->wq, rtllib_rx_deauth()
2685 * (abort) this wq (when syncro scanning) before sleeping rtllib_start_ibss_wq()
2809 queue_delayed_work_rsl(ieee->wq, &ieee->start_ibss_wq, rtllib_start_ibss()
2864 queue_delayed_work_rsl(ieee->wq, &ieee->link_change_wq, 0); rtllib_disassociate()
3128 ieee->wq = create_workqueue(DRV_NAME); rtllib_softmac_init()
3168 destroy_workqueue(ieee->wq); rtllib_softmac_free()
/linux-4.1.27/fs/
H A Deventpoll.c193 wait_queue_head_t wq; member in struct:eventpoll
506 static void ep_poll_safewake(wait_queue_head_t *wq) ep_poll_safewake() argument
511 ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu); ep_poll_safewake()
665 if (waitqueue_active(&ep->wq)) ep_scan_ready_list()
666 wake_up_locked(&ep->wq); ep_scan_ready_list()
949 init_waitqueue_head(&ep->wq); ep_alloc()
1069 if (waitqueue_active(&ep->wq)) ep_poll_callback()
1070 wake_up_locked(&ep->wq); ep_poll_callback()
1343 if (waitqueue_active(&ep->wq)) ep_insert()
1344 wake_up_locked(&ep->wq); ep_insert()
1451 if (waitqueue_active(&ep->wq)) ep_modify()
1452 wake_up_locked(&ep->wq); ep_modify()
1620 __add_wait_queue_exclusive(&ep->wq, &wait); ep_poll()
1643 __remove_wait_queue(&ep->wq, &wait); ep_poll()
/linux-4.1.27/drivers/gpu/drm/ttm/
H A Dttm_bo.c466 schedule_delayed_work(&bdev->wq, ttm_bo_cleanup_refs_or_queue()
613 container_of(work, struct ttm_bo_device, wq.work); ttm_bo_delayed_workqueue()
616 schedule_delayed_work(&bdev->wq, ttm_bo_delayed_workqueue()
647 return cancel_delayed_work_sync(&bdev->wq); ttm_bo_lock_delayed_workqueue()
654 schedule_delayed_work(&bdev->wq, ttm_bo_unlock_delayed_workqueue()
1434 cancel_delayed_work_sync(&bdev->wq); ttm_bo_device_release()
1476 INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); ttm_bo_device_init()
/linux-4.1.27/drivers/media/pci/dm1105/
H A Ddm1105.c360 struct workqueue_struct *wq; member in struct:dm1105_dev
730 queue_work(dev->wq, &dev->work); dm1105_irq()
1140 dev->wq = create_singlethread_workqueue(dev->wqn); dm1105_probe()
1141 if (!dev->wq) { dm1105_probe()
1154 destroy_workqueue(dev->wq); dm1105_probe()
/linux-4.1.27/drivers/net/
H A Dmacvtap.c38 struct socket_wq wq; member in struct:macvtap_queue
483 RCU_INIT_POINTER(q->sock.wq, &q->wq); macvtap_open()
484 init_waitqueue_head(&q->wq.wait); macvtap_open()
533 poll_wait(file, &q->wq.wait, wait); macvtap_poll()
/linux-4.1.27/drivers/gpu/drm/i915/
H A Di915_dma.c892 dev_priv->wq = alloc_ordered_workqueue("i915", 0); i915_driver_load()
893 if (dev_priv->wq == NULL) { i915_driver_load()
997 destroy_workqueue(dev_priv->wq); i915_driver_load()
1071 flush_workqueue(dev_priv->wq); i915_driver_unload()
1084 destroy_workqueue(dev_priv->wq); i915_driver_unload()
/linux-4.1.27/drivers/staging/rtl8192u/ieee80211/
H A Dieee80211_softmac.c523 queue_delayed_work(ieee->wq, &ieee->softmac_scan_wq, IEEE80211_SOFTMAC_SCAN_TIME); ieee80211_softmac_scan_wq()
622 queue_delayed_work(ieee->wq, &ieee->softmac_scan_wq, 0); ieee80211_start_scan()
1232 * ASSOC response. Just wait for the retry wq to be scheduled. ieee80211_associate_abort()
1246 queue_delayed_work(ieee->wq, &ieee->associate_retry_wq, \ ieee80211_associate_abort()
1390 queue_work(ieee->wq, &ieee->associate_complete_wq); ieee80211_associate_complete()
1493 queue_work(ieee->wq, &ieee->associate_procedure_wq); ieee80211_softmac_new_net()
2054 queue_work(ieee->wq, &ieee->associate_procedure_wq); ieee80211_rx_frame_softmac()
2110 queue_work(ieee->wq, &ieee->associate_procedure_wq); ieee80211_rx_frame_softmac()
2349 * (abort) this wq (when syncro scanning) before sleeping ieee80211_start_ibss_wq()
2452 queue_delayed_work(ieee->wq, &ieee->start_ibss_wq, 150); ieee80211_start_ibss()
2735 ieee->wq = create_workqueue(DRV_NAME); ieee80211_softmac_init()
2765 destroy_workqueue(ieee->wq); ieee80211_softmac_free()
/linux-4.1.27/drivers/net/wireless/ath/wil6210/
H A Dmain.c274 if (wait_event_interruptible(wil->wq, wil->recovery_state != wil_wait_for_recovery()
293 wake_up_interruptible(&wil->wq); wil_set_recovery_state()
458 init_waitqueue_head(&wil->wq); wil_priv_init()

Completed in 6566 milliseconds

123