Searched refs:queue (Results 1 - 200 of 3311) sorted by relevance

1234567891011>>

/linux-4.1.27/drivers/scsi/arm/
H A Dqueue.h2 * linux/drivers/acorn/scsi/queue.h: queue handling
21 * Function: void queue_initialise (Queue_t *queue)
22 * Purpose : initialise a queue
23 * Params : queue - queue to initialise
25 extern int queue_initialise (Queue_t *queue);
28 * Function: void queue_free (Queue_t *queue)
29 * Purpose : free a queue
30 * Params : queue - queue to free
32 extern void queue_free (Queue_t *queue);
35 * Function: struct scsi_cmnd *queue_remove (queue)
36 * Purpose : removes first SCSI command from a queue
37 * Params : queue - queue to remove command from
40 extern struct scsi_cmnd *queue_remove (Queue_t *queue);
43 * Function: struct scsi_cmnd *queue_remove_exclude_ref (queue, exclude)
44 * Purpose : remove a SCSI command from a queue
45 * Params : queue - queue to remove command from
49 extern struct scsi_cmnd *queue_remove_exclude(Queue_t *queue,
52 #define queue_add_cmd_ordered(queue,SCpnt) \
53 __queue_add(queue,SCpnt,(SCpnt)->cmnd[0] == REQUEST_SENSE)
54 #define queue_add_cmd_tail(queue,SCpnt) \
55 __queue_add(queue,SCpnt,0)
57 * Function: int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head)
58 * Purpose : Add a new command onto a queue
59 * Params : queue - destination queue
61 * head - add command to head of queue
64 extern int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head);
67 * Function: struct scsi_cmnd *queue_remove_tgtluntag (queue, target, lun, tag)
68 * Purpose : remove a SCSI command from the queue for a specified target/lun/tag
69 * Params : queue - queue to remove command from
75 extern struct scsi_cmnd *queue_remove_tgtluntag(Queue_t *queue, int target,
79 * Function: queue_remove_all_target(queue, target)
80 * Purpose : remove all SCSI commands from the queue for a specified target
81 * Params : queue - queue to remove command from
85 extern void queue_remove_all_target(Queue_t *queue, int target);
88 * Function: int queue_probetgtlun (queue, target, lun)
89 * Purpose : check to see if we have a command in the queue for the specified
91 * Params : queue - queue to look in
96 extern int queue_probetgtlun (Queue_t *queue, int target, int lun);
99 * Function: int queue_remove_cmd (Queue_t *queue, struct scsi_cmnd *SCpnt)
101 * Params : queue - queue to look in
105 int queue_remove_cmd(Queue_t *queue, struct scsi_cmnd *SCpnt);
H A Dqueue.c2 * linux/drivers/acorn/scsi/queue.c: queue handling primitives
49 #include "queue.h"
54 * Function: void queue_initialise (Queue_t *queue)
55 * Purpose : initialise a queue
56 * Params : queue - queue to initialise
58 int queue_initialise (Queue_t *queue) queue_initialise() argument
63 spin_lock_init(&queue->queue_lock); queue_initialise()
64 INIT_LIST_HEAD(&queue->head); queue_initialise()
65 INIT_LIST_HEAD(&queue->free); queue_initialise()
73 queue->alloc = q = kmalloc(sizeof(QE_t) * nqueues, GFP_KERNEL); queue_initialise()
78 list_add(&q->list, &queue->free); queue_initialise()
82 return queue->alloc != NULL; queue_initialise()
86 * Function: void queue_free (Queue_t *queue)
87 * Purpose : free a queue
88 * Params : queue - queue to free
90 void queue_free (Queue_t *queue) queue_free() argument
92 if (!list_empty(&queue->head)) queue_free()
93 printk(KERN_WARNING "freeing non-empty queue %p\n", queue); queue_free()
94 kfree(queue->alloc); queue_free()
99 * Function: int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head)
100 * Purpose : Add a new command onto a queue, adding REQUEST_SENSE to head.
101 * Params : queue - destination queue
103 * head - add command to head of queue
106 int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head) __queue_add() argument
113 spin_lock_irqsave(&queue->queue_lock, flags); __queue_add()
114 if (list_empty(&queue->free)) __queue_add()
117 l = queue->free.next; __queue_add()
127 list_add(l, &queue->head); __queue_add()
129 list_add_tail(l, &queue->head); __queue_add()
133 spin_unlock_irqrestore(&queue->queue_lock, flags); __queue_add()
137 static struct scsi_cmnd *__queue_remove(Queue_t *queue, struct list_head *ent) __queue_remove() argument
149 list_add(ent, &queue->free); __queue_remove()
155 * Function: struct scsi_cmnd *queue_remove_exclude (queue, exclude)
156 * Purpose : remove a SCSI command from a queue
157 * Params : queue - queue to remove command from
161 struct scsi_cmnd *queue_remove_exclude(Queue_t *queue, unsigned long *exclude) queue_remove_exclude() argument
167 spin_lock_irqsave(&queue->queue_lock, flags); queue_remove_exclude()
168 list_for_each(l, &queue->head) { queue_remove_exclude()
172 SCpnt = __queue_remove(queue, l); queue_remove_exclude()
176 spin_unlock_irqrestore(&queue->queue_lock, flags); queue_remove_exclude()
182 * Function: struct scsi_cmnd *queue_remove (queue)
183 * Purpose : removes first SCSI command from a queue
184 * Params : queue - queue to remove command from
187 struct scsi_cmnd *queue_remove(Queue_t *queue) queue_remove() argument
192 spin_lock_irqsave(&queue->queue_lock, flags); queue_remove()
193 if (!list_empty(&queue->head)) queue_remove()
194 SCpnt = __queue_remove(queue, queue->head.next); queue_remove()
195 spin_unlock_irqrestore(&queue->queue_lock, flags); queue_remove()
201 * Function: struct scsi_cmnd *queue_remove_tgtluntag (queue, target, lun, tag)
202 * Purpose : remove a SCSI command from the queue for a specified target/lun/tag
203 * Params : queue - queue to remove command from
209 struct scsi_cmnd *queue_remove_tgtluntag(Queue_t *queue, int target, int lun, queue_remove_tgtluntag() argument
216 spin_lock_irqsave(&queue->queue_lock, flags); queue_remove_tgtluntag()
217 list_for_each(l, &queue->head) { queue_remove_tgtluntag()
221 SCpnt = __queue_remove(queue, l); queue_remove_tgtluntag()
225 spin_unlock_irqrestore(&queue->queue_lock, flags); queue_remove_tgtluntag()
231 * Function: queue_remove_all_target(queue, target)
232 * Purpose : remove all SCSI commands from the queue for a specified target
233 * Params : queue - queue to remove command from
237 void queue_remove_all_target(Queue_t *queue, int target) queue_remove_all_target() argument
242 spin_lock_irqsave(&queue->queue_lock, flags); queue_remove_all_target()
243 list_for_each(l, &queue->head) { queue_remove_all_target()
246 __queue_remove(queue, l); queue_remove_all_target()
248 spin_unlock_irqrestore(&queue->queue_lock, flags); queue_remove_all_target()
252 * Function: int queue_probetgtlun (queue, target, lun)
253 * Purpose : check to see if we have a command in the queue for the specified
255 * Params : queue - queue to look in
260 int queue_probetgtlun (Queue_t *queue, int target, int lun) queue_probetgtlun() argument
266 spin_lock_irqsave(&queue->queue_lock, flags); queue_probetgtlun()
267 list_for_each(l, &queue->head) { queue_probetgtlun()
274 spin_unlock_irqrestore(&queue->queue_lock, flags); queue_probetgtlun()
280 * Function: int queue_remove_cmd(Queue_t *queue, struct scsi_cmnd *SCpnt)
282 * Params : queue - queue to look in
286 int queue_remove_cmd(Queue_t *queue, struct scsi_cmnd *SCpnt) queue_remove_cmd() argument
292 spin_lock_irqsave(&queue->queue_lock, flags); queue_remove_cmd()
293 list_for_each(l, &queue->head) { queue_remove_cmd()
296 __queue_remove(queue, l); queue_remove_cmd()
301 spin_unlock_irqrestore(&queue->queue_lock, flags); queue_remove_cmd()
H A DMakefile7 obj-$(CONFIG_SCSI_ACORNSCSI_3) += acornscsi_mod.o queue.o msgqueue.o
8 obj-$(CONFIG_SCSI_ARXESCSI) += arxescsi.o fas216.o queue.o msgqueue.o
10 obj-$(CONFIG_SCSI_CUMANA_2) += cumana_2.o fas216.o queue.o msgqueue.o
12 obj-$(CONFIG_SCSI_POWERTECSCSI) += powertec.o fas216.o queue.o msgqueue.o
13 obj-$(CONFIG_SCSI_EESOXSCSI) += eesox.o fas216.o queue.o msgqueue.o
H A Dmsgqueue.h10 * message queue handling
36 * Purpose : initialise a message queue
37 * Params : msgq - queue to initialise
43 * Purpose : free a queue
44 * Params : msgq - queue to free
50 * Purpose : calculate the total length of all messages on the message queue
51 * Params : msgq - queue to examine
52 * Returns : number of bytes of messages in queue
59 * Params : msgq - queue to obtain message from
67 * Purpose : add a message onto a message queue
68 * Params : msgq - queue to add message on
77 * Purpose : flush all messages from message queue
78 * Params : msgq - queue to flush
H A Dmsgqueue.c10 * message queue handling
21 * Purpose : Allocate a message queue entry
22 * Params : msgq - message queue to claim entry for
23 * Returns : message queue entry or NULL.
37 * Purpose : free a message queue entry
38 * Params : msgq - message queue to free entry from
39 * mq - message queue entry to free
51 * Purpose : initialise a message queue
52 * Params : msgq - queue to initialise
70 * Purpose : free a queue
71 * Params : msgq - queue to free
79 * Purpose : calculate the total length of all messages on the message queue
80 * Params : msgq - queue to examine
81 * Returns : number of bytes of messages in queue
97 * Params : msgq - queue to obtain message from
112 * Purpose : add a message onto a message queue
113 * Params : msgq - queue to add message on
148 * Purpose : flush all messages from message queue
149 * Params : msgq - queue to flush
170 MODULE_DESCRIPTION("SCSI message queue handling");
/linux-4.1.27/arch/arm/mach-ixp4xx/include/mach/
H A Dqmgr.h21 #define QUEUE_STAT1_EMPTY 1 /* queue status bits */
37 /* queue interrupt request conditions */
51 u32 statne_h; /* 0x418 - queue nearly empty */
52 u32 statf_h; /* 0x41C - queue full */
60 void qmgr_set_irq(unsigned int queue, int src,
62 void qmgr_enable_irq(unsigned int queue);
63 void qmgr_disable_irq(unsigned int queue);
70 int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
75 int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
78 #define qmgr_request_queue(queue, len, nearly_empty_watermark, \
80 __qmgr_request_queue(queue, len, nearly_empty_watermark, \
84 void qmgr_release_queue(unsigned int queue);
87 static inline void qmgr_put_entry(unsigned int queue, u32 val) qmgr_put_entry() argument
91 BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */ qmgr_put_entry()
94 qmgr_queue_descs[queue], queue, val); qmgr_put_entry()
96 __raw_writel(val, &qmgr_regs->acc[queue][0]); qmgr_put_entry()
99 static inline u32 qmgr_get_entry(unsigned int queue) qmgr_get_entry() argument
103 val = __raw_readl(&qmgr_regs->acc[queue][0]); qmgr_get_entry()
105 BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */ qmgr_get_entry()
108 qmgr_queue_descs[queue], queue, val); qmgr_get_entry()
113 static inline int __qmgr_get_stat1(unsigned int queue) __qmgr_get_stat1() argument
116 return (__raw_readl(&qmgr_regs->stat1[queue >> 3]) __qmgr_get_stat1()
117 >> ((queue & 7) << 2)) & 0xF; __qmgr_get_stat1()
120 static inline int __qmgr_get_stat2(unsigned int queue) __qmgr_get_stat2() argument
123 BUG_ON(queue >= HALF_QUEUES); __qmgr_get_stat2()
124 return (__raw_readl(&qmgr_regs->stat2[queue >> 4]) __qmgr_get_stat2()
125 >> ((queue & 0xF) << 1)) & 0x3; __qmgr_get_stat2()
129 * qmgr_stat_empty() - checks if a hardware queue is empty
130 * @queue: queue number
132 * Returns non-zero value if the queue is empty.
134 static inline int qmgr_stat_empty(unsigned int queue) qmgr_stat_empty() argument
136 BUG_ON(queue >= HALF_QUEUES); qmgr_stat_empty()
137 return __qmgr_get_stat1(queue) & QUEUE_STAT1_EMPTY; qmgr_stat_empty()
141 * qmgr_stat_below_low_watermark() - checks if a queue is below low watermark
142 * @queue: queue number
144 * Returns non-zero value if the queue is below low watermark.
146 static inline int qmgr_stat_below_low_watermark(unsigned int queue) qmgr_stat_below_low_watermark() argument
149 if (queue >= HALF_QUEUES) qmgr_stat_below_low_watermark()
151 (queue - HALF_QUEUES)) & 0x01; qmgr_stat_below_low_watermark()
152 return __qmgr_get_stat1(queue) & QUEUE_STAT1_NEARLY_EMPTY; qmgr_stat_below_low_watermark()
156 * qmgr_stat_above_high_watermark() - checks if a queue is above high watermark
157 * @queue: queue number
159 * Returns non-zero value if the queue is above high watermark
161 static inline int qmgr_stat_above_high_watermark(unsigned int queue) qmgr_stat_above_high_watermark() argument
163 BUG_ON(queue >= HALF_QUEUES); qmgr_stat_above_high_watermark()
164 return __qmgr_get_stat1(queue) & QUEUE_STAT1_NEARLY_FULL; qmgr_stat_above_high_watermark()
168 * qmgr_stat_full() - checks if a hardware queue is full
169 * @queue: queue number
171 * Returns non-zero value if the queue is full.
173 static inline int qmgr_stat_full(unsigned int queue) qmgr_stat_full() argument
176 if (queue >= HALF_QUEUES) qmgr_stat_full()
178 (queue - HALF_QUEUES)) & 0x01; qmgr_stat_full()
179 return __qmgr_get_stat1(queue) & QUEUE_STAT1_FULL; qmgr_stat_full()
183 * qmgr_stat_underflow() - checks if a hardware queue experienced underflow
184 * @queue: queue number
186 * Returns non-zero value if the queue experienced underflow.
188 static inline int qmgr_stat_underflow(unsigned int queue) qmgr_stat_underflow() argument
190 return __qmgr_get_stat2(queue) & QUEUE_STAT2_UNDERFLOW; qmgr_stat_underflow()
194 * qmgr_stat_overflow() - checks if a hardware queue experienced overflow
195 * @queue: queue number
197 * Returns non-zero value if the queue experienced overflow.
199 static inline int qmgr_stat_overflow(unsigned int queue) qmgr_stat_overflow() argument
201 return __qmgr_get_stat2(queue) & QUEUE_STAT2_OVERFLOW; qmgr_stat_overflow()
/linux-4.1.27/drivers/media/usb/uvc/
H A Duvc_queue.c28 * Video buffers queue management.
34 * the videobuf2 queue operations by serializing calls to videobuf2 and a
35 * spinlock to protect the IRQ queue that holds the buffers to be processed by
40 uvc_queue_to_stream(struct uvc_video_queue *queue) uvc_queue_to_stream() argument
42 return container_of(queue, struct uvc_streaming, queue); uvc_queue_to_stream()
48 * This function must be called with the queue spinlock held.
50 static void uvc_queue_return_buffers(struct uvc_video_queue *queue, uvc_queue_return_buffers() argument
57 while (!list_empty(&queue->irqqueue)) { uvc_queue_return_buffers()
58 struct uvc_buffer *buf = list_first_entry(&queue->irqqueue, uvc_queue_return_buffers()
60 queue); uvc_queue_return_buffers()
61 list_del(&buf->queue); uvc_queue_return_buffers()
68 * videobuf2 queue operations
75 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); uvc_queue_setup() local
76 struct uvc_streaming *stream = uvc_queue_to_stream(queue); uvc_queue_setup()
92 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); uvc_buffer_prepare() local
101 if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED)) uvc_buffer_prepare()
118 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); uvc_buffer_queue() local
122 spin_lock_irqsave(&queue->irqlock, flags); uvc_buffer_queue()
123 if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) { uvc_buffer_queue()
124 list_add_tail(&buf->queue, &queue->irqqueue); uvc_buffer_queue()
133 spin_unlock_irqrestore(&queue->irqlock, flags); uvc_buffer_queue()
138 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); uvc_buffer_finish() local
139 struct uvc_streaming *stream = uvc_queue_to_stream(queue); uvc_buffer_finish()
148 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); uvc_start_streaming() local
149 struct uvc_streaming *stream = uvc_queue_to_stream(queue); uvc_start_streaming()
153 queue->buf_used = 0; uvc_start_streaming()
159 spin_lock_irqsave(&queue->irqlock, flags); uvc_start_streaming()
160 uvc_queue_return_buffers(queue, UVC_BUF_STATE_QUEUED); uvc_start_streaming()
161 spin_unlock_irqrestore(&queue->irqlock, flags); uvc_start_streaming()
168 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); uvc_stop_streaming() local
169 struct uvc_streaming *stream = uvc_queue_to_stream(queue); uvc_stop_streaming()
174 spin_lock_irqsave(&queue->irqlock, flags); uvc_stop_streaming()
175 uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR); uvc_stop_streaming()
176 spin_unlock_irqrestore(&queue->irqlock, flags); uvc_stop_streaming()
190 int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type, uvc_queue_init() argument
195 queue->queue.type = type; uvc_queue_init()
196 queue->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; uvc_queue_init()
197 queue->queue.drv_priv = queue; uvc_queue_init()
198 queue->queue.buf_struct_size = sizeof(struct uvc_buffer); uvc_queue_init()
199 queue->queue.ops = &uvc_queue_qops; uvc_queue_init()
200 queue->queue.mem_ops = &vb2_vmalloc_memops; uvc_queue_init()
201 queue->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC uvc_queue_init()
203 queue->queue.lock = &queue->mutex; uvc_queue_init()
204 ret = vb2_queue_init(&queue->queue); uvc_queue_init()
208 mutex_init(&queue->mutex); uvc_queue_init()
209 spin_lock_init(&queue->irqlock); uvc_queue_init()
210 INIT_LIST_HEAD(&queue->irqqueue); uvc_queue_init()
211 queue->flags = drop_corrupted ? UVC_QUEUE_DROP_CORRUPTED : 0; uvc_queue_init()
216 void uvc_queue_release(struct uvc_video_queue *queue) uvc_queue_release() argument
218 mutex_lock(&queue->mutex); uvc_queue_release()
219 vb2_queue_release(&queue->queue); uvc_queue_release()
220 mutex_unlock(&queue->mutex); uvc_queue_release()
224 * V4L2 queue operations
227 int uvc_request_buffers(struct uvc_video_queue *queue, uvc_request_buffers() argument
232 mutex_lock(&queue->mutex); uvc_request_buffers()
233 ret = vb2_reqbufs(&queue->queue, rb); uvc_request_buffers()
234 mutex_unlock(&queue->mutex); uvc_request_buffers()
239 int uvc_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) uvc_query_buffer() argument
243 mutex_lock(&queue->mutex); uvc_query_buffer()
244 ret = vb2_querybuf(&queue->queue, buf); uvc_query_buffer()
245 mutex_unlock(&queue->mutex); uvc_query_buffer()
250 int uvc_create_buffers(struct uvc_video_queue *queue, uvc_create_buffers() argument
255 mutex_lock(&queue->mutex); uvc_create_buffers()
256 ret = vb2_create_bufs(&queue->queue, cb); uvc_create_buffers()
257 mutex_unlock(&queue->mutex); uvc_create_buffers()
262 int uvc_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) uvc_queue_buffer() argument
266 mutex_lock(&queue->mutex); uvc_queue_buffer()
267 ret = vb2_qbuf(&queue->queue, buf); uvc_queue_buffer()
268 mutex_unlock(&queue->mutex); uvc_queue_buffer()
273 int uvc_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf, uvc_dequeue_buffer() argument
278 mutex_lock(&queue->mutex); uvc_dequeue_buffer()
279 ret = vb2_dqbuf(&queue->queue, buf, nonblocking); uvc_dequeue_buffer()
280 mutex_unlock(&queue->mutex); uvc_dequeue_buffer()
285 int uvc_queue_streamon(struct uvc_video_queue *queue, enum v4l2_buf_type type) uvc_queue_streamon() argument
289 mutex_lock(&queue->mutex); uvc_queue_streamon()
290 ret = vb2_streamon(&queue->queue, type); uvc_queue_streamon()
291 mutex_unlock(&queue->mutex); uvc_queue_streamon()
296 int uvc_queue_streamoff(struct uvc_video_queue *queue, enum v4l2_buf_type type) uvc_queue_streamoff() argument
300 mutex_lock(&queue->mutex); uvc_queue_streamoff()
301 ret = vb2_streamoff(&queue->queue, type); uvc_queue_streamoff()
302 mutex_unlock(&queue->mutex); uvc_queue_streamoff()
307 int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma) uvc_queue_mmap() argument
309 return vb2_mmap(&queue->queue, vma); uvc_queue_mmap()
313 unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue, uvc_queue_get_unmapped_area() argument
316 return vb2_get_unmapped_area(&queue->queue, 0, 0, pgoff, 0); uvc_queue_get_unmapped_area()
320 unsigned int uvc_queue_poll(struct uvc_video_queue *queue, struct file *file, uvc_queue_poll() argument
325 mutex_lock(&queue->mutex); uvc_queue_poll()
326 ret = vb2_poll(&queue->queue, file, wait); uvc_queue_poll()
327 mutex_unlock(&queue->mutex); uvc_queue_poll()
339 int uvc_queue_allocated(struct uvc_video_queue *queue) uvc_queue_allocated() argument
343 mutex_lock(&queue->mutex); uvc_queue_allocated()
344 allocated = vb2_is_busy(&queue->queue); uvc_queue_allocated()
345 mutex_unlock(&queue->mutex); uvc_queue_allocated()
351 * Cancel the video buffers queue.
353 * Cancelling the queue marks all buffers on the irq queue as erroneous,
354 * wakes them up and removes them from the queue.
362 void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect) uvc_queue_cancel() argument
366 spin_lock_irqsave(&queue->irqlock, flags); uvc_queue_cancel()
367 uvc_queue_return_buffers(queue, UVC_BUF_STATE_ERROR); uvc_queue_cancel()
372 * state outside the queue code. uvc_queue_cancel()
375 queue->flags |= UVC_QUEUE_DISCONNECTED; uvc_queue_cancel()
376 spin_unlock_irqrestore(&queue->irqlock, flags); uvc_queue_cancel()
379 struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue, uvc_queue_next_buffer() argument
385 if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) { uvc_queue_next_buffer()
393 spin_lock_irqsave(&queue->irqlock, flags); uvc_queue_next_buffer()
394 list_del(&buf->queue); uvc_queue_next_buffer()
395 if (!list_empty(&queue->irqqueue)) uvc_queue_next_buffer()
396 nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer, uvc_queue_next_buffer()
397 queue); uvc_queue_next_buffer()
400 spin_unlock_irqrestore(&queue->irqlock, flags); uvc_queue_next_buffer()
/linux-4.1.27/drivers/net/wireless/cw1200/
H A Dqueue.c2 * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers
14 #include "queue.h"
29 static inline void __cw1200_queue_lock(struct cw1200_queue *queue) __cw1200_queue_lock() argument
31 struct cw1200_queue_stats *stats = queue->stats; __cw1200_queue_lock()
32 if (queue->tx_locked_cnt++ == 0) { __cw1200_queue_lock()
34 queue->queue_id); __cw1200_queue_lock()
35 ieee80211_stop_queue(stats->priv->hw, queue->queue_id); __cw1200_queue_lock()
39 static inline void __cw1200_queue_unlock(struct cw1200_queue *queue) __cw1200_queue_unlock() argument
41 struct cw1200_queue_stats *stats = queue->stats; __cw1200_queue_unlock()
42 BUG_ON(!queue->tx_locked_cnt); __cw1200_queue_unlock()
43 if (--queue->tx_locked_cnt == 0) { __cw1200_queue_unlock()
45 queue->queue_id); __cw1200_queue_unlock()
46 ieee80211_wake_queue(stats->priv->hw, queue->queue_id); __cw1200_queue_unlock()
92 static void __cw1200_queue_gc(struct cw1200_queue *queue, __cw1200_queue_gc() argument
96 struct cw1200_queue_stats *stats = queue->stats; __cw1200_queue_gc()
100 list_for_each_entry_safe(item, tmp, &queue->queue, head) { __cw1200_queue_gc()
101 if (jiffies - item->queue_timestamp < queue->ttl) __cw1200_queue_gc()
103 --queue->num_queued; __cw1200_queue_gc()
104 --queue->link_map_cache[item->txpriv.link_id]; __cw1200_queue_gc()
113 list_move_tail(&item->head, &queue->free_pool); __cw1200_queue_gc()
119 if (queue->overfull) { __cw1200_queue_gc()
120 if (queue->num_queued <= (queue->capacity >> 1)) { __cw1200_queue_gc()
121 queue->overfull = false; __cw1200_queue_gc()
123 __cw1200_queue_unlock(queue); __cw1200_queue_gc()
125 unsigned long tmo = item->queue_timestamp + queue->ttl; __cw1200_queue_gc()
126 mod_timer(&queue->gc, tmo); __cw1200_queue_gc()
136 struct cw1200_queue *queue = cw1200_queue_gc() local
139 spin_lock_bh(&queue->lock); cw1200_queue_gc()
140 __cw1200_queue_gc(queue, &list, true); cw1200_queue_gc()
141 spin_unlock_bh(&queue->lock); cw1200_queue_gc()
142 cw1200_queue_post_gc(queue->stats, &list); cw1200_queue_gc()
165 int cw1200_queue_init(struct cw1200_queue *queue, cw1200_queue_init() argument
173 memset(queue, 0, sizeof(*queue)); cw1200_queue_init()
174 queue->stats = stats; cw1200_queue_init()
175 queue->capacity = capacity; cw1200_queue_init()
176 queue->queue_id = queue_id; cw1200_queue_init()
177 queue->ttl = ttl; cw1200_queue_init()
178 INIT_LIST_HEAD(&queue->queue); cw1200_queue_init()
179 INIT_LIST_HEAD(&queue->pending); cw1200_queue_init()
180 INIT_LIST_HEAD(&queue->free_pool); cw1200_queue_init()
181 spin_lock_init(&queue->lock); cw1200_queue_init()
182 setup_timer(&queue->gc, cw1200_queue_gc, (unsigned long)queue); cw1200_queue_init()
184 queue->pool = kzalloc(sizeof(struct cw1200_queue_item) * capacity, cw1200_queue_init()
186 if (!queue->pool) cw1200_queue_init()
189 queue->link_map_cache = kzalloc(sizeof(int) * stats->map_capacity, cw1200_queue_init()
191 if (!queue->link_map_cache) { cw1200_queue_init()
192 kfree(queue->pool); cw1200_queue_init()
193 queue->pool = NULL; cw1200_queue_init()
198 list_add_tail(&queue->pool[i].head, &queue->free_pool); cw1200_queue_init()
203 int cw1200_queue_clear(struct cw1200_queue *queue) cw1200_queue_clear() argument
207 struct cw1200_queue_stats *stats = queue->stats; cw1200_queue_clear()
210 spin_lock_bh(&queue->lock); cw1200_queue_clear()
211 queue->generation++; cw1200_queue_clear()
212 list_splice_tail_init(&queue->queue, &queue->pending); cw1200_queue_clear()
213 list_for_each_entry_safe(item, tmp, &queue->pending, head) { cw1200_queue_clear()
217 list_move_tail(&item->head, &queue->free_pool); cw1200_queue_clear()
219 queue->num_queued = 0; cw1200_queue_clear()
220 queue->num_pending = 0; cw1200_queue_clear()
224 stats->num_queued -= queue->link_map_cache[i]; cw1200_queue_clear()
225 stats->link_map_cache[i] -= queue->link_map_cache[i]; cw1200_queue_clear()
226 queue->link_map_cache[i] = 0; cw1200_queue_clear()
229 if (queue->overfull) { cw1200_queue_clear()
230 queue->overfull = false; cw1200_queue_clear()
231 __cw1200_queue_unlock(queue); cw1200_queue_clear()
233 spin_unlock_bh(&queue->lock); cw1200_queue_clear()
245 void cw1200_queue_deinit(struct cw1200_queue *queue) cw1200_queue_deinit() argument
247 cw1200_queue_clear(queue); cw1200_queue_deinit()
248 del_timer_sync(&queue->gc); cw1200_queue_deinit()
249 INIT_LIST_HEAD(&queue->free_pool); cw1200_queue_deinit()
250 kfree(queue->pool); cw1200_queue_deinit()
251 kfree(queue->link_map_cache); cw1200_queue_deinit()
252 queue->pool = NULL; cw1200_queue_deinit()
253 queue->link_map_cache = NULL; cw1200_queue_deinit()
254 queue->capacity = 0; cw1200_queue_deinit()
257 size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue, cw1200_queue_get_num_queued() argument
262 size_t map_capacity = queue->stats->map_capacity; cw1200_queue_get_num_queued()
267 spin_lock_bh(&queue->lock); cw1200_queue_get_num_queued()
269 ret = queue->num_queued - queue->num_pending; cw1200_queue_get_num_queued()
274 ret += queue->link_map_cache[i]; cw1200_queue_get_num_queued()
277 spin_unlock_bh(&queue->lock); cw1200_queue_get_num_queued()
281 int cw1200_queue_put(struct cw1200_queue *queue, cw1200_queue_put() argument
287 struct cw1200_queue_stats *stats = queue->stats; cw1200_queue_put()
289 if (txpriv->link_id >= queue->stats->map_capacity) cw1200_queue_put()
292 spin_lock_bh(&queue->lock); cw1200_queue_put()
293 if (!WARN_ON(list_empty(&queue->free_pool))) { cw1200_queue_put()
295 &queue->free_pool, struct cw1200_queue_item, head); cw1200_queue_put()
298 list_move_tail(&item->head, &queue->queue); cw1200_queue_put()
302 item->packet_id = cw1200_queue_mk_packet_id(queue->generation, cw1200_queue_put()
303 queue->queue_id, cw1200_queue_put()
305 item - queue->pool); cw1200_queue_put()
308 ++queue->num_queued; cw1200_queue_put()
309 ++queue->link_map_cache[txpriv->link_id]; cw1200_queue_put()
317 * Leave extra queue slots so we don't overflow. cw1200_queue_put()
319 if (queue->overfull == false && cw1200_queue_put()
320 queue->num_queued >= cw1200_queue_put()
321 (queue->capacity - (num_present_cpus() - 1))) { cw1200_queue_put()
322 queue->overfull = true; cw1200_queue_put()
323 __cw1200_queue_lock(queue); cw1200_queue_put()
324 mod_timer(&queue->gc, jiffies); cw1200_queue_put()
329 spin_unlock_bh(&queue->lock); cw1200_queue_put()
333 int cw1200_queue_get(struct cw1200_queue *queue, cw1200_queue_get() argument
341 struct cw1200_queue_stats *stats = queue->stats; cw1200_queue_get()
344 spin_lock_bh(&queue->lock); cw1200_queue_get()
345 list_for_each_entry(item, &queue->queue, head) { cw1200_queue_get()
357 list_move_tail(&item->head, &queue->pending); cw1200_queue_get()
358 ++queue->num_pending; cw1200_queue_get()
359 --queue->link_map_cache[item->txpriv.link_id]; cw1200_queue_get()
368 spin_unlock_bh(&queue->lock); cw1200_queue_get()
374 int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id) cw1200_queue_requeue() argument
379 struct cw1200_queue_stats *stats = queue->stats; cw1200_queue_requeue()
384 item = &queue->pool[item_id]; cw1200_queue_requeue()
386 spin_lock_bh(&queue->lock); cw1200_queue_requeue()
387 BUG_ON(queue_id != queue->queue_id); cw1200_queue_requeue()
388 if (queue_generation != queue->generation) { cw1200_queue_requeue()
390 } else if (item_id >= (unsigned) queue->capacity) { cw1200_queue_requeue()
397 --queue->num_pending; cw1200_queue_requeue()
398 ++queue->link_map_cache[item->txpriv.link_id]; cw1200_queue_requeue()
410 list_move(&item->head, &queue->queue); cw1200_queue_requeue()
412 spin_unlock_bh(&queue->lock); cw1200_queue_requeue()
416 int cw1200_queue_requeue_all(struct cw1200_queue *queue) cw1200_queue_requeue_all() argument
419 struct cw1200_queue_stats *stats = queue->stats; cw1200_queue_requeue_all()
420 spin_lock_bh(&queue->lock); cw1200_queue_requeue_all()
422 list_for_each_entry_safe_reverse(item, tmp, &queue->pending, head) { cw1200_queue_requeue_all()
423 --queue->num_pending; cw1200_queue_requeue_all()
424 ++queue->link_map_cache[item->txpriv.link_id]; cw1200_queue_requeue_all()
432 item->packet_id = cw1200_queue_mk_packet_id(queue->generation, cw1200_queue_requeue_all()
433 queue->queue_id, cw1200_queue_requeue_all()
435 item - queue->pool); cw1200_queue_requeue_all()
436 list_move(&item->head, &queue->queue); cw1200_queue_requeue_all()
438 spin_unlock_bh(&queue->lock); cw1200_queue_requeue_all()
443 int cw1200_queue_remove(struct cw1200_queue *queue, u32 packet_id) cw1200_queue_remove() argument
448 struct cw1200_queue_stats *stats = queue->stats; cw1200_queue_remove()
455 item = &queue->pool[item_id]; cw1200_queue_remove()
457 spin_lock_bh(&queue->lock); cw1200_queue_remove()
458 BUG_ON(queue_id != queue->queue_id); cw1200_queue_remove()
459 if (queue_generation != queue->generation) { cw1200_queue_remove()
461 } else if (item_id >= (unsigned) queue->capacity) { cw1200_queue_remove()
471 --queue->num_pending; cw1200_queue_remove()
472 --queue->num_queued; cw1200_queue_remove()
473 ++queue->num_sent; cw1200_queue_remove()
478 list_move(&item->head, &queue->free_pool); cw1200_queue_remove()
480 if (queue->overfull && cw1200_queue_remove()
481 (queue->num_queued <= (queue->capacity >> 1))) { cw1200_queue_remove()
482 queue->overfull = false; cw1200_queue_remove()
483 __cw1200_queue_unlock(queue); cw1200_queue_remove()
486 spin_unlock_bh(&queue->lock); cw1200_queue_remove()
494 int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id, cw1200_queue_get_skb() argument
504 item = &queue->pool[item_id]; cw1200_queue_get_skb()
506 spin_lock_bh(&queue->lock); cw1200_queue_get_skb()
507 BUG_ON(queue_id != queue->queue_id); cw1200_queue_get_skb()
508 if (queue_generation != queue->generation) { cw1200_queue_get_skb()
510 } else if (item_id >= (unsigned) queue->capacity) { cw1200_queue_get_skb()
520 spin_unlock_bh(&queue->lock); cw1200_queue_get_skb()
524 void cw1200_queue_lock(struct cw1200_queue *queue) cw1200_queue_lock() argument
526 spin_lock_bh(&queue->lock); cw1200_queue_lock()
527 __cw1200_queue_lock(queue); cw1200_queue_lock()
528 spin_unlock_bh(&queue->lock); cw1200_queue_lock()
531 void cw1200_queue_unlock(struct cw1200_queue *queue) cw1200_queue_unlock() argument
533 spin_lock_bh(&queue->lock); cw1200_queue_unlock()
534 __cw1200_queue_unlock(queue); cw1200_queue_unlock()
535 spin_unlock_bh(&queue->lock); cw1200_queue_unlock()
538 bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue, cw1200_queue_get_xmit_timestamp() argument
545 spin_lock_bh(&queue->lock); cw1200_queue_get_xmit_timestamp()
546 ret = !list_empty(&queue->pending); cw1200_queue_get_xmit_timestamp()
548 list_for_each_entry(item, &queue->pending, head) { cw1200_queue_get_xmit_timestamp()
555 spin_unlock_bh(&queue->lock); cw1200_queue_get_xmit_timestamp()
H A Dqueue.h2 * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers
36 struct list_head queue; member in struct:cw1200_queue
42 spinlock_t lock; /* Protect queue entry */
71 int cw1200_queue_init(struct cw1200_queue *queue,
76 int cw1200_queue_clear(struct cw1200_queue *queue);
78 void cw1200_queue_deinit(struct cw1200_queue *queue);
80 size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue,
82 int cw1200_queue_put(struct cw1200_queue *queue,
85 int cw1200_queue_get(struct cw1200_queue *queue,
90 int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id);
91 int cw1200_queue_requeue_all(struct cw1200_queue *queue);
92 int cw1200_queue_remove(struct cw1200_queue *queue,
94 int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id,
97 void cw1200_queue_lock(struct cw1200_queue *queue);
98 void cw1200_queue_unlock(struct cw1200_queue *queue);
99 bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue,
H A DMakefile5 queue.o \
/linux-4.1.27/drivers/usb/gadget/function/
H A Duvc_queue.c29 * Video buffers queue management.
35 * the videobuf2 queue operations by serializing calls to videobuf2 and a
36 * spinlock to protect the IRQ queue that holds the buffers to be processed by
41 * videobuf2 queue operations
48 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); uvc_queue_setup() local
49 struct uvc_video *video = container_of(queue, struct uvc_video, queue); uvc_queue_setup()
63 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); uvc_buffer_prepare() local
72 if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED)) uvc_buffer_prepare()
88 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); uvc_buffer_queue() local
92 spin_lock_irqsave(&queue->irqlock, flags); uvc_buffer_queue()
94 if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) { uvc_buffer_queue()
95 list_add_tail(&buf->queue, &queue->irqqueue); uvc_buffer_queue()
104 spin_unlock_irqrestore(&queue->irqlock, flags); uvc_buffer_queue()
115 int uvcg_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type, uvcg_queue_init() argument
120 queue->queue.type = type; uvcg_queue_init()
121 queue->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; uvcg_queue_init()
122 queue->queue.drv_priv = queue; uvcg_queue_init()
123 queue->queue.buf_struct_size = sizeof(struct uvc_buffer); uvcg_queue_init()
124 queue->queue.ops = &uvc_queue_qops; uvcg_queue_init()
125 queue->queue.lock = lock; uvcg_queue_init()
126 queue->queue.mem_ops = &vb2_vmalloc_memops; uvcg_queue_init()
127 queue->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC uvcg_queue_init()
129 ret = vb2_queue_init(&queue->queue); uvcg_queue_init()
133 spin_lock_init(&queue->irqlock); uvcg_queue_init()
134 INIT_LIST_HEAD(&queue->irqqueue); uvcg_queue_init()
135 queue->flags = 0; uvcg_queue_init()
143 void uvcg_free_buffers(struct uvc_video_queue *queue) uvcg_free_buffers() argument
145 vb2_queue_release(&queue->queue); uvcg_free_buffers()
151 int uvcg_alloc_buffers(struct uvc_video_queue *queue, uvcg_alloc_buffers() argument
156 ret = vb2_reqbufs(&queue->queue, rb); uvcg_alloc_buffers()
161 int uvcg_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) uvcg_query_buffer() argument
163 return vb2_querybuf(&queue->queue, buf); uvcg_query_buffer()
166 int uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) uvcg_queue_buffer() argument
171 ret = vb2_qbuf(&queue->queue, buf); uvcg_queue_buffer()
175 spin_lock_irqsave(&queue->irqlock, flags); uvcg_queue_buffer()
176 ret = (queue->flags & UVC_QUEUE_PAUSED) != 0; uvcg_queue_buffer()
177 queue->flags &= ~UVC_QUEUE_PAUSED; uvcg_queue_buffer()
178 spin_unlock_irqrestore(&queue->irqlock, flags); uvcg_queue_buffer()
186 int uvcg_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf, uvcg_dequeue_buffer() argument
189 return vb2_dqbuf(&queue->queue, buf, nonblocking); uvcg_dequeue_buffer()
193 * Poll the video queue.
195 * This function implements video queue polling and is intended to be used by
198 unsigned int uvcg_queue_poll(struct uvc_video_queue *queue, struct file *file, uvcg_queue_poll() argument
201 return vb2_poll(&queue->queue, file, wait); uvcg_queue_poll()
204 int uvcg_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma) uvcg_queue_mmap() argument
206 return vb2_mmap(&queue->queue, vma); uvcg_queue_mmap()
215 unsigned long uvcg_queue_get_unmapped_area(struct uvc_video_queue *queue, uvcg_queue_get_unmapped_area() argument
218 return vb2_get_unmapped_area(&queue->queue, 0, 0, pgoff, 0); uvcg_queue_get_unmapped_area()
223 * Cancel the video buffers queue.
225 * Cancelling the queue marks all buffers on the irq queue as erroneous,
226 * wakes them up and removes them from the queue.
234 void uvcg_queue_cancel(struct uvc_video_queue *queue, int disconnect) uvcg_queue_cancel() argument
239 spin_lock_irqsave(&queue->irqlock, flags); uvcg_queue_cancel()
240 while (!list_empty(&queue->irqqueue)) { uvcg_queue_cancel()
241 buf = list_first_entry(&queue->irqqueue, struct uvc_buffer, uvcg_queue_cancel()
242 queue); uvcg_queue_cancel()
243 list_del(&buf->queue); uvcg_queue_cancel()
251 * state outside the queue code. uvcg_queue_cancel()
254 queue->flags |= UVC_QUEUE_DISCONNECTED; uvcg_queue_cancel()
255 spin_unlock_irqrestore(&queue->irqlock, flags); uvcg_queue_cancel()
259 * Enable or disable the video buffers queue.
261 * The queue must be enabled before starting video acquisition and must be
262 * disabled after stopping it. This ensures that the video buffers queue
266 * Enabling the video queue initializes parameters (such as sequence number,
267 * sync pattern, ...). If the queue is already enabled, return -EBUSY.
269 * Disabling the video queue cancels the queue and removes all buffers from
270 * the main queue.
275 int uvcg_queue_enable(struct uvc_video_queue *queue, int enable) uvcg_queue_enable() argument
281 ret = vb2_streamon(&queue->queue, queue->queue.type); uvcg_queue_enable()
285 queue->sequence = 0; uvcg_queue_enable()
286 queue->buf_used = 0; uvcg_queue_enable()
288 ret = vb2_streamoff(&queue->queue, queue->queue.type); uvcg_queue_enable()
292 spin_lock_irqsave(&queue->irqlock, flags); uvcg_queue_enable()
293 INIT_LIST_HEAD(&queue->irqqueue); uvcg_queue_enable()
297 * applications will be able to queue buffers for the next uvcg_queue_enable()
301 queue->flags &= ~UVC_QUEUE_DISCONNECTED; uvcg_queue_enable()
302 spin_unlock_irqrestore(&queue->irqlock, flags); uvcg_queue_enable()
309 struct uvc_buffer *uvcg_queue_next_buffer(struct uvc_video_queue *queue, uvcg_queue_next_buffer() argument
314 if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) && uvcg_queue_next_buffer()
321 list_del(&buf->queue); uvcg_queue_next_buffer()
322 if (!list_empty(&queue->irqqueue)) uvcg_queue_next_buffer()
323 nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer, uvcg_queue_next_buffer()
324 queue); uvcg_queue_next_buffer()
329 buf->buf.v4l2_buf.sequence = queue->sequence++; uvcg_queue_next_buffer()
338 struct uvc_buffer *uvcg_queue_head(struct uvc_video_queue *queue) uvcg_queue_head() argument
342 if (!list_empty(&queue->irqqueue)) uvcg_queue_head()
343 buf = list_first_entry(&queue->irqqueue, struct uvc_buffer, uvcg_queue_head()
344 queue); uvcg_queue_head()
346 queue->flags |= UVC_QUEUE_PAUSED; uvcg_queue_head()
H A Duvc_queue.h30 struct list_head queue; member in struct:uvc_buffer
43 struct vb2_queue queue; member in struct:uvc_video_queue
54 static inline int uvc_queue_streaming(struct uvc_video_queue *queue) uvc_queue_streaming() argument
56 return vb2_is_streaming(&queue->queue); uvc_queue_streaming()
59 int uvcg_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
62 void uvcg_free_buffers(struct uvc_video_queue *queue);
64 int uvcg_alloc_buffers(struct uvc_video_queue *queue,
67 int uvcg_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf);
69 int uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf);
71 int uvcg_dequeue_buffer(struct uvc_video_queue *queue,
74 unsigned int uvcg_queue_poll(struct uvc_video_queue *queue,
77 int uvcg_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma);
80 unsigned long uvcg_queue_get_unmapped_area(struct uvc_video_queue *queue,
84 void uvcg_queue_cancel(struct uvc_video_queue *queue, int disconnect);
86 int uvcg_queue_enable(struct uvc_video_queue *queue, int enable);
88 struct uvc_buffer *uvcg_queue_next_buffer(struct uvc_video_queue *queue,
91 struct uvc_buffer *uvcg_queue_head(struct uvc_video_queue *queue);
H A Duvc_video.c37 if (buf->bytesused - video->queue.buf_used <= len - 2) uvc_video_encode_header()
47 struct uvc_video_queue *queue = &video->queue; uvc_video_encode_data() local
52 mem = buf->mem + queue->buf_used; uvc_video_encode_data()
53 nbytes = min((unsigned int)len, buf->bytesused - queue->buf_used); uvc_video_encode_data()
56 queue->buf_used += nbytes; uvc_video_encode_data()
87 if (buf->bytesused == video->queue.buf_used) { uvc_video_encode_bulk()
88 video->queue.buf_used = 0; uvc_video_encode_bulk()
90 uvcg_queue_next_buffer(&video->queue, buf); uvc_video_encode_bulk()
97 buf->bytesused == video->queue.buf_used) uvc_video_encode_bulk()
120 if (buf->bytesused == video->queue.buf_used) { uvc_video_encode_isoc()
121 video->queue.buf_used = 0; uvc_video_encode_isoc()
123 uvcg_queue_next_buffer(&video->queue, buf); uvc_video_encode_isoc()
150 * being empty, as a request can still be in flight. A separate "queue paused"
154 * queue is empty, and cleared when we queue a buffer.
157 * under protection of the queue spinlock. If the queue is empty, the streaming
159 * application can queue a buffer. The flag will then cleared, and the ioctl
166 struct uvc_video_queue *queue = &video->queue; uvc_video_complete() local
177 uvcg_queue_cancel(queue, 1); uvc_video_complete()
183 uvcg_queue_cancel(queue, 0); uvc_video_complete()
187 spin_lock_irqsave(&video->queue.irqlock, flags); uvc_video_complete()
188 buf = uvcg_queue_head(&video->queue); uvc_video_complete()
190 spin_unlock_irqrestore(&video->queue.irqlock, flags); uvc_video_complete()
197 printk(KERN_INFO "Failed to queue request (%d).\n", ret); uvc_video_complete()
199 spin_unlock_irqrestore(&video->queue.irqlock, flags); uvc_video_complete()
200 uvcg_queue_cancel(queue, 0); uvc_video_complete()
203 spin_unlock_irqrestore(&video->queue.irqlock, flags); uvc_video_complete()
286 struct uvc_video_queue *queue = &video->queue; uvcg_video_pump() local
311 * request, protected by the video queue irqlock. uvcg_video_pump()
313 spin_lock_irqsave(&queue->irqlock, flags); uvcg_video_pump()
314 buf = uvcg_queue_head(queue); uvcg_video_pump()
316 spin_unlock_irqrestore(&queue->irqlock, flags); uvcg_video_pump()
325 printk(KERN_INFO "Failed to queue request (%d)\n", ret); uvcg_video_pump()
327 spin_unlock_irqrestore(&queue->irqlock, flags); uvcg_video_pump()
328 uvcg_queue_cancel(queue, 0); uvcg_video_pump()
331 spin_unlock_irqrestore(&queue->irqlock, flags); uvcg_video_pump()
360 uvcg_queue_enable(&video->queue, 0); uvcg_video_enable()
364 if ((ret = uvcg_queue_enable(&video->queue, 1)) < 0) uvcg_video_enable()
393 /* Initialize the video buffers queue. */ uvcg_video_init()
394 uvcg_queue_init(&video->queue, V4L2_BUF_TYPE_VIDEO_OUTPUT, uvcg_video_init()
H A Duvc_v4l2.c152 if (b->type != video->queue.queue.type) uvc_v4l2_reqbufs()
155 return uvcg_alloc_buffers(&video->queue, b); uvc_v4l2_reqbufs()
165 return uvcg_query_buffer(&video->queue, b); uvc_v4l2_querybuf()
176 ret = uvcg_queue_buffer(&video->queue, b); uvc_v4l2_qbuf()
190 return uvcg_dequeue_buffer(&video->queue, b, file->f_flags & O_NONBLOCK); uvc_v4l2_dqbuf()
201 if (type != video->queue.queue.type) uvc_v4l2_streamon()
226 if (type != video->queue.queue.type) uvc_v4l2_streamoff()
317 uvcg_free_buffers(&video->queue); uvc_v4l2_release()
334 return uvcg_queue_mmap(&uvc->video.queue, vma); uvc_v4l2_mmap()
343 return uvcg_queue_poll(&uvc->video.queue, file, wait); uvc_v4l2_poll()
354 return uvcg_queue_get_unmapped_area(&uvc->video.queue, pgoff); uvcg_v4l2_get_unmapped_area()
/linux-4.1.27/drivers/net/wireless/b43legacy/
H A Dpio.c35 static void tx_start(struct b43legacy_pioqueue *queue) tx_start() argument
37 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, tx_start()
41 static void tx_octet(struct b43legacy_pioqueue *queue, tx_octet() argument
44 if (queue->need_workarounds) { tx_octet()
45 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); tx_octet()
46 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, tx_octet()
49 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, tx_octet()
51 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); tx_octet()
76 static void tx_data(struct b43legacy_pioqueue *queue, tx_data() argument
84 if (queue->need_workarounds) { tx_data()
87 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, data); tx_data()
89 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, tx_data()
95 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, data); tx_data()
98 tx_octet(queue, packet[octets - tx_data()
102 static void tx_complete(struct b43legacy_pioqueue *queue, tx_complete() argument
105 if (queue->need_workarounds) { tx_complete()
106 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, tx_complete()
108 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, tx_complete()
112 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, tx_complete()
116 static u16 generate_cookie(struct b43legacy_pioqueue *queue, generate_cookie() argument
126 switch (queue->mmio_base) { generate_cookie()
154 struct b43legacy_pioqueue *queue = NULL; parse_cookie() local
159 queue = pio->queue0; parse_cookie()
162 queue = pio->queue1; parse_cookie()
165 queue = pio->queue2; parse_cookie()
168 queue = pio->queue3; parse_cookie()
176 *packet = &(queue->tx_packets_cache[packetindex]); parse_cookie()
178 return queue; parse_cookie()
185 static int pio_tx_write_fragment(struct b43legacy_pioqueue *queue, pio_tx_write_fragment() argument
198 err = b43legacy_generate_txhdr(queue->dev, pio_tx_write_fragment()
201 generate_cookie(queue, packet)); pio_tx_write_fragment()
205 tx_start(queue); pio_tx_write_fragment()
207 if (queue->need_workarounds) pio_tx_write_fragment()
209 tx_data(queue, txhdr, (u8 *)skb->data, octets); pio_tx_write_fragment()
210 tx_complete(queue, skb); pio_tx_write_fragment()
218 struct b43legacy_pioqueue *queue = packet->queue; free_txpacket() local
226 list_move(&packet->list, &queue->txfree); free_txpacket()
227 queue->nr_txfree++; free_txpacket()
232 struct b43legacy_pioqueue *queue = packet->queue; pio_tx_packet() local
238 if (queue->tx_devq_size < octets) { pio_tx_packet()
239 b43legacywarn(queue->dev->wl, "PIO queue too small. " pio_tx_packet()
245 B43legacy_WARN_ON(queue->tx_devq_packets > pio_tx_packet()
247 B43legacy_WARN_ON(queue->tx_devq_used > queue->tx_devq_size); pio_tx_packet()
249 * TX queue. If not, return and let the TX tasklet pio_tx_packet()
252 if (queue->tx_devq_packets == B43legacy_PIO_MAXTXDEVQPACKETS) pio_tx_packet()
254 if (queue->tx_devq_used + octets > queue->tx_devq_size) pio_tx_packet()
257 err = pio_tx_write_fragment(queue, skb, packet, pio_tx_packet()
267 * (We must not overflow the device TX queue) pio_tx_packet()
269 queue->tx_devq_packets++; pio_tx_packet()
270 queue->tx_devq_used += octets; pio_tx_packet()
275 list_move_tail(&packet->list, &queue->txrunning); pio_tx_packet()
282 struct b43legacy_pioqueue *queue = (struct b43legacy_pioqueue *)d; tx_tasklet() local
283 struct b43legacy_wldev *dev = queue->dev; tx_tasklet()
290 if (queue->tx_frozen) tx_tasklet()
292 txctl = b43legacy_pio_read(queue, B43legacy_PIO_TXCTL); tx_tasklet()
296 list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list) { tx_tasklet()
298 * the device queue is full. In case of failure, the tx_tasklet()
312 static void setup_txqueues(struct b43legacy_pioqueue *queue) setup_txqueues() argument
317 queue->nr_txfree = B43legacy_PIO_MAXTXPACKETS; setup_txqueues()
319 packet = &(queue->tx_packets_cache[i]); setup_txqueues()
321 packet->queue = queue; setup_txqueues()
324 list_add(&packet->list, &queue->txfree); setup_txqueues()
332 struct b43legacy_pioqueue *queue; b43legacy_setup_pioqueue() local
336 queue = kzalloc(sizeof(*queue), GFP_KERNEL); b43legacy_setup_pioqueue()
337 if (!queue) b43legacy_setup_pioqueue()
340 queue->dev = dev; b43legacy_setup_pioqueue()
341 queue->mmio_base = pio_mmio_base; b43legacy_setup_pioqueue()
342 queue->need_workarounds = (dev->dev->id.revision < 3); b43legacy_setup_pioqueue()
344 INIT_LIST_HEAD(&queue->txfree); b43legacy_setup_pioqueue()
345 INIT_LIST_HEAD(&queue->txqueue); b43legacy_setup_pioqueue()
346 INIT_LIST_HEAD(&queue->txrunning); b43legacy_setup_pioqueue()
347 tasklet_init(&queue->txtask, tx_tasklet, b43legacy_setup_pioqueue()
348 (unsigned long)queue); b43legacy_setup_pioqueue()
354 qsize = b43legacy_read16(dev, queue->mmio_base b43legacy_setup_pioqueue()
363 b43legacyerr(dev->wl, "PIO tx device-queue too small (%u)\n", b43legacy_setup_pioqueue()
368 queue->tx_devq_size = qsize; b43legacy_setup_pioqueue()
370 setup_txqueues(queue); b43legacy_setup_pioqueue()
373 return queue; b43legacy_setup_pioqueue()
376 kfree(queue); b43legacy_setup_pioqueue()
377 queue = NULL; b43legacy_setup_pioqueue()
381 static void cancel_transfers(struct b43legacy_pioqueue *queue) cancel_transfers() argument
385 tasklet_kill(&queue->txtask); cancel_transfers()
387 list_for_each_entry_safe(packet, tmp_packet, &queue->txrunning, list) cancel_transfers()
389 list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list) cancel_transfers()
393 static void b43legacy_destroy_pioqueue(struct b43legacy_pioqueue *queue) b43legacy_destroy_pioqueue() argument
395 if (!queue) b43legacy_destroy_pioqueue()
398 cancel_transfers(queue); b43legacy_destroy_pioqueue()
399 kfree(queue); b43legacy_destroy_pioqueue()
423 struct b43legacy_pioqueue *queue; b43legacy_pio_init() local
426 queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO1_BASE); b43legacy_pio_init()
427 if (!queue) b43legacy_pio_init()
429 pio->queue0 = queue; b43legacy_pio_init()
431 queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO2_BASE); b43legacy_pio_init()
432 if (!queue) b43legacy_pio_init()
434 pio->queue1 = queue; b43legacy_pio_init()
436 queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO3_BASE); b43legacy_pio_init()
437 if (!queue) b43legacy_pio_init()
439 pio->queue2 = queue; b43legacy_pio_init()
441 queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO4_BASE); b43legacy_pio_init()
442 if (!queue) b43legacy_pio_init()
444 pio->queue3 = queue; b43legacy_pio_init()
469 struct b43legacy_pioqueue *queue = dev->pio.queue1; b43legacy_pio_tx() local
472 B43legacy_WARN_ON(queue->tx_suspended); b43legacy_pio_tx()
473 B43legacy_WARN_ON(list_empty(&queue->txfree)); b43legacy_pio_tx()
475 packet = list_entry(queue->txfree.next, struct b43legacy_pio_txpacket, b43legacy_pio_tx()
479 list_move_tail(&packet->list, &queue->txqueue); b43legacy_pio_tx()
480 queue->nr_txfree--; b43legacy_pio_tx()
481 B43legacy_WARN_ON(queue->nr_txfree >= B43legacy_PIO_MAXTXPACKETS); b43legacy_pio_tx()
483 tasklet_schedule(&queue->txtask); b43legacy_pio_tx()
491 struct b43legacy_pioqueue *queue; b43legacy_pio_handle_txstatus() local
496 queue = parse_cookie(dev, status->cookie, &packet); b43legacy_pio_handle_txstatus()
497 B43legacy_WARN_ON(!queue); b43legacy_pio_handle_txstatus()
502 queue->tx_devq_packets--; b43legacy_pio_handle_txstatus()
503 queue->tx_devq_used -= (packet->skb->len + b43legacy_pio_handle_txstatus()
545 if (!list_empty(&queue->txqueue)) b43legacy_pio_handle_txstatus()
546 tasklet_schedule(&queue->txtask); b43legacy_pio_handle_txstatus()
549 static void pio_rx_error(struct b43legacy_pioqueue *queue, pio_rx_error() argument
555 b43legacyerr(queue->dev->wl, "PIO RX error: %s\n", error); pio_rx_error()
556 b43legacy_pio_write(queue, B43legacy_PIO_RXCTL, pio_rx_error()
559 B43legacy_WARN_ON(queue->mmio_base != B43legacy_MMIO_PIO1_BASE); pio_rx_error()
562 b43legacy_pio_read(queue, B43legacy_PIO_RXDATA); pio_rx_error()
567 void b43legacy_pio_rx(struct b43legacy_pioqueue *queue) b43legacy_pio_rx() argument
578 tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXCTL); b43legacy_pio_rx()
581 b43legacy_pio_write(queue, B43legacy_PIO_RXCTL, b43legacy_pio_rx()
585 tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXCTL); b43legacy_pio_rx()
590 b43legacydbg(queue->dev->wl, "PIO RX timed out\n"); b43legacy_pio_rx()
594 len = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA); b43legacy_pio_rx()
596 pio_rx_error(queue, 0, "len > 0x700"); b43legacy_pio_rx()
599 if (unlikely(len == 0 && queue->mmio_base != b43legacy_pio_rx()
601 pio_rx_error(queue, 0, "len == 0"); b43legacy_pio_rx()
605 if (queue->mmio_base == B43legacy_MMIO_PIO4_BASE) b43legacy_pio_rx()
610 tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA); b43legacy_pio_rx()
616 pio_rx_error(queue, b43legacy_pio_rx()
617 (queue->mmio_base == B43legacy_MMIO_PIO1_BASE), b43legacy_pio_rx()
621 if (queue->mmio_base == B43legacy_MMIO_PIO4_BASE) { b43legacy_pio_rx()
626 b43legacy_handle_hwtxstatus(queue->dev, hw); b43legacy_pio_rx()
633 pio_rx_error(queue, 1, "OOM"); b43legacy_pio_rx()
638 tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA); b43legacy_pio_rx()
642 tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA); b43legacy_pio_rx()
645 b43legacy_rx(queue->dev, skb, rxhdr); b43legacy_pio_rx()
648 void b43legacy_pio_tx_suspend(struct b43legacy_pioqueue *queue) b43legacy_pio_tx_suspend() argument
650 b43legacy_power_saving_ctl_bits(queue->dev, -1, 1); b43legacy_pio_tx_suspend()
651 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, b43legacy_pio_tx_suspend()
652 b43legacy_pio_read(queue, B43legacy_PIO_TXCTL) b43legacy_pio_tx_suspend()
656 void b43legacy_pio_tx_resume(struct b43legacy_pioqueue *queue) b43legacy_pio_tx_resume() argument
658 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, b43legacy_pio_tx_resume()
659 b43legacy_pio_read(queue, B43legacy_PIO_TXCTL) b43legacy_pio_tx_resume()
661 b43legacy_power_saving_ctl_bits(queue->dev, -1, -1); b43legacy_pio_tx_resume()
662 tasklet_schedule(&queue->txtask); b43legacy_pio_tx_resume()
H A Dpio.h42 struct b43legacy_pioqueue *queue; member in struct:b43legacy_pio_txpacket
48 (packet)->queue->tx_packets_cache))
73 /* Packets on the txrunning queue are completely
83 u16 b43legacy_pio_read(struct b43legacy_pioqueue *queue, b43legacy_pio_read() argument
86 return b43legacy_read16(queue->dev, queue->mmio_base + offset); b43legacy_pio_read()
90 void b43legacy_pio_write(struct b43legacy_pioqueue *queue, b43legacy_pio_write() argument
93 b43legacy_write16(queue->dev, queue->mmio_base + offset, value); b43legacy_pio_write()
105 void b43legacy_pio_rx(struct b43legacy_pioqueue *queue);
107 /* Suspend TX queue in hardware. */
108 void b43legacy_pio_tx_suspend(struct b43legacy_pioqueue *queue);
109 void b43legacy_pio_tx_resume(struct b43legacy_pioqueue *queue);
137 void b43legacy_pio_rx(struct b43legacy_pioqueue *queue) b43legacy_pio_rx() argument
141 void b43legacy_pio_tx_suspend(struct b43legacy_pioqueue *queue) b43legacy_pio_tx_suspend() argument
145 void b43legacy_pio_tx_resume(struct b43legacy_pioqueue *queue) b43legacy_pio_tx_resume() argument
/linux-4.1.27/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_marker.c37 void vmw_marker_queue_init(struct vmw_marker_queue *queue) vmw_marker_queue_init() argument
39 INIT_LIST_HEAD(&queue->head); vmw_marker_queue_init()
40 queue->lag = 0; vmw_marker_queue_init()
41 queue->lag_time = ktime_get_raw_ns(); vmw_marker_queue_init()
42 spin_lock_init(&queue->lock); vmw_marker_queue_init()
45 void vmw_marker_queue_takedown(struct vmw_marker_queue *queue) vmw_marker_queue_takedown() argument
49 spin_lock(&queue->lock); vmw_marker_queue_takedown()
50 list_for_each_entry_safe(marker, next, &queue->head, head) { vmw_marker_queue_takedown()
53 spin_unlock(&queue->lock); vmw_marker_queue_takedown()
56 int vmw_marker_push(struct vmw_marker_queue *queue, vmw_marker_push() argument
66 spin_lock(&queue->lock); vmw_marker_push()
67 list_add_tail(&marker->head, &queue->head); vmw_marker_push()
68 spin_unlock(&queue->lock); vmw_marker_push()
73 int vmw_marker_pull(struct vmw_marker_queue *queue, vmw_marker_pull() argument
80 spin_lock(&queue->lock); vmw_marker_pull()
83 if (list_empty(&queue->head)) { vmw_marker_pull()
84 queue->lag = 0; vmw_marker_pull()
85 queue->lag_time = now; vmw_marker_pull()
90 list_for_each_entry_safe(marker, next, &queue->head, head) { vmw_marker_pull()
94 queue->lag = now - marker->submitted; vmw_marker_pull()
95 queue->lag_time = now; vmw_marker_pull()
102 spin_unlock(&queue->lock); vmw_marker_pull()
107 static u64 vmw_fifo_lag(struct vmw_marker_queue *queue) vmw_fifo_lag() argument
111 spin_lock(&queue->lock); vmw_fifo_lag()
113 queue->lag += now - queue->lag_time; vmw_fifo_lag()
114 queue->lag_time = now; vmw_fifo_lag()
115 spin_unlock(&queue->lock); vmw_fifo_lag()
116 return queue->lag; vmw_fifo_lag()
120 static bool vmw_lag_lt(struct vmw_marker_queue *queue, vmw_lag_lt() argument
125 return vmw_fifo_lag(queue) <= cond; vmw_lag_lt()
129 struct vmw_marker_queue *queue, uint32_t us) vmw_wait_lag()
135 while (!vmw_lag_lt(queue, us)) { vmw_wait_lag()
136 spin_lock(&queue->lock); vmw_wait_lag()
137 if (list_empty(&queue->head)) vmw_wait_lag()
140 marker = list_first_entry(&queue->head, vmw_wait_lag()
144 spin_unlock(&queue->lock); vmw_wait_lag()
152 (void) vmw_marker_pull(queue, seqno); vmw_wait_lag()
128 vmw_wait_lag(struct vmw_private *dev_priv, struct vmw_marker_queue *queue, uint32_t us) vmw_wait_lag() argument
/linux-4.1.27/drivers/infiniband/hw/ehca/
H A Dipz_pt_fn.c4 * internal queue handling
51 void *ipz_qpageit_get_inc(struct ipz_queue *queue) ipz_qpageit_get_inc() argument
53 void *ret = ipz_qeit_get(queue); ipz_qpageit_get_inc()
54 queue->current_q_offset += queue->pagesize; ipz_qpageit_get_inc()
55 if (queue->current_q_offset > queue->queue_length) { ipz_qpageit_get_inc()
56 queue->current_q_offset -= queue->pagesize; ipz_qpageit_get_inc()
59 if (((u64)ret) % queue->pagesize) { ipz_qpageit_get_inc()
66 void *ipz_qeit_eq_get_inc(struct ipz_queue *queue) ipz_qeit_eq_get_inc() argument
68 void *ret = ipz_qeit_get(queue); ipz_qeit_eq_get_inc()
69 u64 last_entry_in_q = queue->queue_length - queue->qe_size; ipz_qeit_eq_get_inc()
71 queue->current_q_offset += queue->qe_size; ipz_qeit_eq_get_inc()
72 if (queue->current_q_offset > last_entry_in_q) { ipz_qeit_eq_get_inc()
73 queue->current_q_offset = 0; ipz_qeit_eq_get_inc()
74 queue->toggle_state = (~queue->toggle_state) & 1; ipz_qeit_eq_get_inc()
80 int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset) ipz_queue_abs_to_offset() argument
83 for (i = 0; i < queue->queue_length / queue->pagesize; i++) { ipz_queue_abs_to_offset()
84 u64 page = __pa(queue->queue_pages[i]); ipz_queue_abs_to_offset()
85 if (addr >= page && addr < page + queue->pagesize) { ipz_queue_abs_to_offset()
86 *q_offset = addr - page + i * queue->pagesize; ipz_queue_abs_to_offset()
98 * allocate pages for queue:
100 * inner loop divides a kernel page into smaller hca queue pages
102 static int alloc_queue_pages(struct ipz_queue *queue, const u32 nr_of_pages) alloc_queue_pages() argument
113 queue->queue_pages[f] = (struct ipz_page *)kpage; alloc_queue_pages()
121 for (f = 0; f < nr_of_pages && queue->queue_pages[f]; alloc_queue_pages()
123 free_page((unsigned long)(queue->queue_pages)[f]); alloc_queue_pages()
127 static int alloc_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd) alloc_small_queue_page() argument
129 int order = ilog2(queue->pagesize) - 9; alloc_small_queue_page()
161 queue->queue_pages[0] = (void *)(page->page | (bit << (order + 9))); alloc_small_queue_page()
162 queue->small_page = page; alloc_small_queue_page()
163 queue->offset = bit << (order + 9); alloc_small_queue_page()
167 ehca_err(pd->ib_pd.device, "failed to allocate small queue page"); alloc_small_queue_page()
172 static void free_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd) free_small_queue_page() argument
174 int order = ilog2(queue->pagesize) - 9; free_small_queue_page()
175 struct ipz_small_queue_page *page = queue->small_page; free_small_queue_page()
179 bit = ((unsigned long)queue->queue_pages[0] & ~PAGE_MASK) free_small_queue_page()
204 int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue, ipz_queue_ctor() argument
215 /* init queue fields */ ipz_queue_ctor()
216 queue->queue_length = nr_of_pages * pagesize; ipz_queue_ctor()
217 queue->pagesize = pagesize; ipz_queue_ctor()
218 queue->qe_size = qe_size; ipz_queue_ctor()
219 queue->act_nr_of_sg = nr_of_sg; ipz_queue_ctor()
220 queue->current_q_offset = 0; ipz_queue_ctor()
221 queue->toggle_state = 1; ipz_queue_ctor()
222 queue->small_page = NULL; ipz_queue_ctor()
224 /* allocate queue page pointers */ ipz_queue_ctor()
225 queue->queue_pages = kzalloc(nr_of_pages * sizeof(void *), ipz_queue_ctor()
227 if (!queue->queue_pages) { ipz_queue_ctor()
228 queue->queue_pages = vzalloc(nr_of_pages * sizeof(void *)); ipz_queue_ctor()
229 if (!queue->queue_pages) { ipz_queue_ctor()
230 ehca_gen_err("Couldn't allocate queue page list"); ipz_queue_ctor()
235 /* allocate actual queue pages */ ipz_queue_ctor()
237 if (!alloc_small_queue_page(queue, pd)) ipz_queue_ctor()
240 if (!alloc_queue_pages(queue, nr_of_pages)) ipz_queue_ctor()
246 ehca_gen_err("Couldn't alloc pages queue=%p " ipz_queue_ctor()
247 "nr_of_pages=%x", queue, nr_of_pages); ipz_queue_ctor()
248 if (is_vmalloc_addr(queue->queue_pages)) ipz_queue_ctor()
249 vfree(queue->queue_pages); ipz_queue_ctor()
251 kfree(queue->queue_pages); ipz_queue_ctor()
256 int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue) ipz_queue_dtor() argument
260 if (!queue || !queue->queue_pages) { ipz_queue_dtor()
261 ehca_gen_dbg("queue or queue_pages is NULL"); ipz_queue_dtor()
265 if (queue->small_page) ipz_queue_dtor()
266 free_small_queue_page(queue, pd); ipz_queue_dtor()
268 nr_pages = queue->queue_length / queue->pagesize; ipz_queue_dtor()
270 free_page((unsigned long)queue->queue_pages[i]); ipz_queue_dtor()
273 if (is_vmalloc_addr(queue->queue_pages)) ipz_queue_dtor()
274 vfree(queue->queue_pages); ipz_queue_dtor()
276 kfree(queue->queue_pages); ipz_queue_dtor()
H A Dipz_pt_fn.h4 * internal queue handling
75 /* struct generic queue in linux kernel virtual memory (kv) */
77 u64 current_q_offset; /* current queue entry */
79 struct ipz_page **queue_pages; /* array of pages belonging to queue */
80 u32 qe_size; /* queue entry size */
82 u32 queue_length; /* queue length allocated in bytes */
93 static inline void *ipz_qeit_calc(struct ipz_queue *queue, u64 q_offset) ipz_qeit_calc() argument
96 if (q_offset >= queue->queue_length) ipz_qeit_calc()
98 current_page = (queue->queue_pages)[q_offset >> EHCA_PAGESHIFT]; ipz_qeit_calc()
106 static inline void *ipz_qeit_get(struct ipz_queue *queue) ipz_qeit_get() argument
108 return ipz_qeit_calc(queue, queue->current_q_offset); ipz_qeit_get()
118 void *ipz_qpageit_get_inc(struct ipz_queue *queue);
126 static inline void *ipz_qeit_get_inc(struct ipz_queue *queue) ipz_qeit_get_inc() argument
128 void *ret = ipz_qeit_get(queue); ipz_qeit_get_inc()
129 queue->current_q_offset += queue->qe_size; ipz_qeit_get_inc()
130 if (queue->current_q_offset >= queue->queue_length) { ipz_qeit_get_inc()
131 queue->current_q_offset = 0; ipz_qeit_get_inc()
133 queue->toggle_state = (~queue->toggle_state) & 1; ipz_qeit_get_inc()
142 static inline int ipz_qeit_is_valid(struct ipz_queue *queue) ipz_qeit_is_valid() argument
144 struct ehca_cqe *cqe = ipz_qeit_get(queue); ipz_qeit_is_valid()
145 return ((cqe->cqe_flags >> 7) == (queue->toggle_state & 1)); ipz_qeit_is_valid()
155 static inline void *ipz_qeit_get_inc_valid(struct ipz_queue *queue) ipz_qeit_get_inc_valid() argument
157 return ipz_qeit_is_valid(queue) ? ipz_qeit_get_inc(queue) : NULL; ipz_qeit_get_inc_valid()
164 static inline void *ipz_qeit_reset(struct ipz_queue *queue) ipz_qeit_reset() argument
166 queue->current_q_offset = 0; ipz_qeit_reset()
167 return ipz_qeit_get(queue); ipz_qeit_reset()
173 int ipz_queue_abs_to_offset(struct ipz_queue *queue, u64 addr, u64 *q_offset);
176 * return the next queue offset. don't modify the queue.
178 static inline u64 ipz_queue_advance_offset(struct ipz_queue *queue, u64 offset) ipz_queue_advance_offset() argument
180 offset += queue->qe_size; ipz_queue_advance_offset()
181 if (offset >= queue->queue_length) offset = 0; ipz_queue_advance_offset()
190 /* struct page table for a queue, only to be used in pf */
192 /* queue page tables (kv), use u64 because we know the element length */
204 * allocate+pin queue
208 int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
215 * -# free queue
217 * returns true if ok, false if queue was NULL-ptr of free failed
219 int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue);
226 * -# allocate+pin queue
248 void *ipz_qeit_eq_get_inc(struct ipz_queue *queue);
258 static inline void *ipz_eqit_eq_get_inc_valid(struct ipz_queue *queue) ipz_eqit_eq_get_inc_valid() argument
260 void *ret = ipz_qeit_get(queue); ipz_eqit_eq_get_inc_valid()
262 if ((qe >> 7) != (queue->toggle_state & 1)) ipz_eqit_eq_get_inc_valid()
264 ipz_qeit_eq_get_inc(queue); /* this is a good one */ ipz_eqit_eq_get_inc_valid()
268 static inline void *ipz_eqit_eq_peek_valid(struct ipz_queue *queue) ipz_eqit_eq_peek_valid() argument
270 void *ret = ipz_qeit_get(queue); ipz_eqit_eq_peek_valid()
272 if ((qe >> 7) != (queue->toggle_state & 1)) ipz_eqit_eq_peek_valid()
277 /* returns address (GX) of first queue entry */ ipz_qpt_get_firstpage()
283 /* returns address (kv) of first page of queue page table */ ipz_qpt_get_qpt()
/linux-4.1.27/drivers/net/xen-netback/
H A Dnetback.c58 /* The time that packets can stay on the guest Rx internal queue
93 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
96 static void make_tx_response(struct xenvif_queue *queue,
99 static void push_tx_responses(struct xenvif_queue *queue);
101 static inline int tx_work_todo(struct xenvif_queue *queue);
103 static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
110 static inline unsigned long idx_to_pfn(struct xenvif_queue *queue, idx_to_pfn() argument
113 return page_to_pfn(queue->mmap_pages[idx]); idx_to_pfn()
116 static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue, idx_to_kaddr() argument
119 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx)); idx_to_kaddr()
152 bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed) xenvif_rx_ring_slots_available() argument
157 prod = queue->rx.sring->req_prod; xenvif_rx_ring_slots_available()
158 cons = queue->rx.req_cons; xenvif_rx_ring_slots_available()
163 queue->rx.sring->req_event = prod + 1; xenvif_rx_ring_slots_available()
169 } while (queue->rx.sring->req_prod != prod); xenvif_rx_ring_slots_available()
174 void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) xenvif_rx_queue_tail() argument
178 spin_lock_irqsave(&queue->rx_queue.lock, flags); xenvif_rx_queue_tail()
180 __skb_queue_tail(&queue->rx_queue, skb); xenvif_rx_queue_tail()
182 queue->rx_queue_len += skb->len; xenvif_rx_queue_tail()
183 if (queue->rx_queue_len > queue->rx_queue_max) xenvif_rx_queue_tail()
184 netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id)); xenvif_rx_queue_tail()
186 spin_unlock_irqrestore(&queue->rx_queue.lock, flags); xenvif_rx_queue_tail()
189 static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue) xenvif_rx_dequeue() argument
193 spin_lock_irq(&queue->rx_queue.lock); xenvif_rx_dequeue()
195 skb = __skb_dequeue(&queue->rx_queue); xenvif_rx_dequeue()
197 queue->rx_queue_len -= skb->len; xenvif_rx_dequeue()
199 spin_unlock_irq(&queue->rx_queue.lock); xenvif_rx_dequeue()
204 static void xenvif_rx_queue_maybe_wake(struct xenvif_queue *queue) xenvif_rx_queue_maybe_wake() argument
206 spin_lock_irq(&queue->rx_queue.lock); xenvif_rx_queue_maybe_wake()
208 if (queue->rx_queue_len < queue->rx_queue_max) xenvif_rx_queue_maybe_wake()
209 netif_tx_wake_queue(netdev_get_tx_queue(queue->vif->dev, queue->id)); xenvif_rx_queue_maybe_wake()
211 spin_unlock_irq(&queue->rx_queue.lock); xenvif_rx_queue_maybe_wake()
215 static void xenvif_rx_queue_purge(struct xenvif_queue *queue) xenvif_rx_queue_purge() argument
218 while ((skb = xenvif_rx_dequeue(queue)) != NULL) xenvif_rx_queue_purge()
222 static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue) xenvif_rx_queue_drop_expired() argument
227 skb = skb_peek(&queue->rx_queue); xenvif_rx_queue_drop_expired()
232 xenvif_rx_dequeue(queue); xenvif_rx_queue_drop_expired()
246 static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue, get_next_rx_buffer() argument
252 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); get_next_rx_buffer()
270 static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb, xenvif_gop_frag_copy() argument
296 meta = get_next_rx_buffer(queue, npo); xenvif_gop_frag_copy()
321 copy_gop->dest.domid = queue->vif->domid; xenvif_gop_frag_copy()
346 if (*head && ((1 << gso_type) & queue->vif->gso_mask)) xenvif_gop_frag_copy()
347 queue->rx.req_cons++; xenvif_gop_frag_copy()
368 struct xenvif_queue *queue) xenvif_gop_skb()
392 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); xenvif_gop_skb()
400 req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++); xenvif_gop_skb()
424 xenvif_gop_frag_copy(queue, skb, npo, xenvif_gop_skb()
430 xenvif_gop_frag_copy(queue, skb, npo, xenvif_gop_skb()
466 static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status, xenvif_add_frag_responses() argument
487 make_rx_response(queue, meta[i].id, status, offset, xenvif_add_frag_responses()
492 void xenvif_kick_thread(struct xenvif_queue *queue) xenvif_kick_thread() argument
494 wake_up(&queue->wq); xenvif_kick_thread()
497 static void xenvif_rx_action(struct xenvif_queue *queue) xenvif_rx_action() argument
510 .copy = queue->grant_copy_op, xenvif_rx_action()
511 .meta = queue->meta, xenvif_rx_action()
516 while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX) xenvif_rx_action()
517 && (skb = xenvif_rx_dequeue(queue)) != NULL) { xenvif_rx_action()
521 queue->last_rx_time = jiffies; xenvif_rx_action()
523 old_req_cons = queue->rx.req_cons; xenvif_rx_action()
524 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue); xenvif_rx_action()
525 ring_slots_used = queue->rx.req_cons - old_req_cons; xenvif_rx_action()
530 BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta)); xenvif_rx_action()
536 gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod); xenvif_rx_action()
540 if ((1 << queue->meta[npo.meta_cons].gso_type) & xenvif_rx_action()
541 queue->vif->gso_prefix_mask) { xenvif_rx_action()
542 resp = RING_GET_RESPONSE(&queue->rx, xenvif_rx_action()
543 queue->rx.rsp_prod_pvt++); xenvif_rx_action()
547 resp->offset = queue->meta[npo.meta_cons].gso_size; xenvif_rx_action()
548 resp->id = queue->meta[npo.meta_cons].id; xenvif_rx_action()
556 queue->stats.tx_bytes += skb->len; xenvif_rx_action()
557 queue->stats.tx_packets++; xenvif_rx_action()
559 status = xenvif_check_gop(queue->vif, xenvif_rx_action()
575 resp = make_rx_response(queue, queue->meta[npo.meta_cons].id, xenvif_rx_action()
577 queue->meta[npo.meta_cons].size, xenvif_rx_action()
580 if ((1 << queue->meta[npo.meta_cons].gso_type) & xenvif_rx_action()
581 queue->vif->gso_mask) { xenvif_rx_action()
584 RING_GET_RESPONSE(&queue->rx, xenvif_rx_action()
585 queue->rx.rsp_prod_pvt++); xenvif_rx_action()
589 gso->u.gso.type = queue->meta[npo.meta_cons].gso_type; xenvif_rx_action()
590 gso->u.gso.size = queue->meta[npo.meta_cons].gso_size; xenvif_rx_action()
598 xenvif_add_frag_responses(queue, status, xenvif_rx_action()
599 queue->meta + npo.meta_cons + 1, xenvif_rx_action()
602 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret); xenvif_rx_action()
612 notify_remote_via_irq(queue->rx_irq); xenvif_rx_action()
615 void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue) xenvif_napi_schedule_or_enable_events() argument
619 RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do); xenvif_napi_schedule_or_enable_events()
622 napi_schedule(&queue->napi); xenvif_napi_schedule_or_enable_events()
625 static void tx_add_credit(struct xenvif_queue *queue) tx_add_credit() argument
633 max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size; tx_add_credit()
635 max_burst = max(max_burst, queue->credit_bytes); tx_add_credit()
638 max_credit = queue->remaining_credit + queue->credit_bytes; tx_add_credit()
639 if (max_credit < queue->remaining_credit) tx_add_credit()
642 queue->remaining_credit = min(max_credit, max_burst); tx_add_credit()
647 struct xenvif_queue *queue = (struct xenvif_queue *)data; xenvif_tx_credit_callback() local
648 tx_add_credit(queue); xenvif_tx_credit_callback()
649 xenvif_napi_schedule_or_enable_events(queue); xenvif_tx_credit_callback()
652 static void xenvif_tx_err(struct xenvif_queue *queue, xenvif_tx_err() argument
655 RING_IDX cons = queue->tx.req_cons; xenvif_tx_err()
659 spin_lock_irqsave(&queue->response_lock, flags); xenvif_tx_err()
660 make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); xenvif_tx_err()
661 push_tx_responses(queue); xenvif_tx_err()
662 spin_unlock_irqrestore(&queue->response_lock, flags); xenvif_tx_err()
665 txp = RING_GET_REQUEST(&queue->tx, cons++); xenvif_tx_err()
667 queue->tx.req_cons = cons; xenvif_tx_err()
674 /* Disable the vif from queue 0's kthread */ xenvif_fatal_tx_err()
679 static int xenvif_count_requests(struct xenvif_queue *queue, xenvif_count_requests() argument
684 RING_IDX cons = queue->tx.req_cons; xenvif_count_requests()
696 netdev_err(queue->vif->dev, xenvif_count_requests()
699 xenvif_fatal_tx_err(queue->vif); xenvif_count_requests()
707 netdev_err(queue->vif->dev, xenvif_count_requests()
710 xenvif_fatal_tx_err(queue->vif); xenvif_count_requests()
723 netdev_dbg(queue->vif->dev, xenvif_count_requests()
732 memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots), xenvif_count_requests()
746 netdev_dbg(queue->vif->dev, xenvif_count_requests()
756 netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n", xenvif_count_requests()
758 xenvif_fatal_tx_err(queue->vif); xenvif_count_requests()
770 xenvif_tx_err(queue, first, cons + slots); xenvif_count_requests()
784 static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue, xenvif_tx_create_map_op() argument
789 queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx]; xenvif_tx_create_map_op()
790 gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx), xenvif_tx_create_map_op()
792 txp->gref, queue->vif->domid); xenvif_tx_create_map_op()
794 memcpy(&queue->pending_tx_info[pending_idx].req, txp, xenvif_tx_create_map_op()
815 static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue, xenvif_get_requests() argument
842 index = pending_index(queue->pending_cons++); xenvif_get_requests()
843 pending_idx = queue->pending_ring[index]; xenvif_get_requests()
844 xenvif_tx_create_map_op(queue, pending_idx, txp, gop); xenvif_get_requests()
852 netdev_err(queue->vif->dev, xenvif_get_requests()
862 index = pending_index(queue->pending_cons++); xenvif_get_requests()
863 pending_idx = queue->pending_ring[index]; xenvif_get_requests()
864 xenvif_tx_create_map_op(queue, pending_idx, txp, gop); xenvif_get_requests()
875 static inline void xenvif_grant_handle_set(struct xenvif_queue *queue, xenvif_grant_handle_set() argument
879 if (unlikely(queue->grant_tx_handle[pending_idx] != xenvif_grant_handle_set()
881 netdev_err(queue->vif->dev, xenvif_grant_handle_set()
886 queue->grant_tx_handle[pending_idx] = handle; xenvif_grant_handle_set()
889 static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue, xenvif_grant_handle_reset() argument
892 if (unlikely(queue->grant_tx_handle[pending_idx] == xenvif_grant_handle_reset()
894 netdev_err(queue->vif->dev, xenvif_grant_handle_reset()
899 queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE; xenvif_grant_handle_reset()
902 static int xenvif_tx_check_gop(struct xenvif_queue *queue, xenvif_tx_check_gop() argument
926 netdev_dbg(queue->vif->dev, xenvif_tx_check_gop()
933 xenvif_idx_release(queue, pending_idx, xenvif_tx_check_gop()
948 xenvif_grant_handle_set(queue, xenvif_tx_check_gop()
953 xenvif_idx_unmap(queue, pending_idx); xenvif_tx_check_gop()
959 xenvif_idx_release(queue, pending_idx, xenvif_tx_check_gop()
962 xenvif_idx_release(queue, pending_idx, xenvif_tx_check_gop()
970 netdev_dbg(queue->vif->dev, xenvif_tx_check_gop()
977 xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR); xenvif_tx_check_gop()
987 xenvif_idx_release(queue, xenvif_tx_check_gop()
994 xenvif_idx_unmap(queue, pending_idx); xenvif_tx_check_gop()
995 xenvif_idx_release(queue, pending_idx, xenvif_tx_check_gop()
1005 xenvif_idx_unmap(queue, pending_idx); xenvif_tx_check_gop()
1006 xenvif_idx_release(queue, pending_idx, xenvif_tx_check_gop()
1027 static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb) xenvif_fill_frags() argument
1045 &callback_param(queue, pending_idx); xenvif_fill_frags()
1047 callback_param(queue, prev_pending_idx).ctx = xenvif_fill_frags()
1048 &callback_param(queue, pending_idx); xenvif_fill_frags()
1050 callback_param(queue, pending_idx).ctx = NULL; xenvif_fill_frags()
1053 txp = &queue->pending_tx_info[pending_idx].req; xenvif_fill_frags()
1054 page = virt_to_page(idx_to_kaddr(queue, pending_idx)); xenvif_fill_frags()
1061 get_page(queue->mmap_pages[pending_idx]); xenvif_fill_frags()
1065 static int xenvif_get_extras(struct xenvif_queue *queue, xenvif_get_extras() argument
1070 RING_IDX cons = queue->tx.req_cons; xenvif_get_extras()
1074 netdev_err(queue->vif->dev, "Missing extra info\n"); xenvif_get_extras()
1075 xenvif_fatal_tx_err(queue->vif); xenvif_get_extras()
1079 memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons), xenvif_get_extras()
1083 queue->tx.req_cons = ++cons; xenvif_get_extras()
1084 netdev_err(queue->vif->dev, xenvif_get_extras()
1086 xenvif_fatal_tx_err(queue->vif); xenvif_get_extras()
1091 queue->tx.req_cons = ++cons; xenvif_get_extras()
1126 static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb) checksum_setup() argument
1136 queue->stats.rx_gso_checksum_fixup++; checksum_setup()
1148 static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size) tx_credit_exceeded() argument
1151 u64 next_credit = queue->credit_window_start + tx_credit_exceeded()
1152 msecs_to_jiffies(queue->credit_usec / 1000); tx_credit_exceeded()
1155 if (timer_pending(&queue->credit_timeout)) tx_credit_exceeded()
1160 queue->credit_window_start = now; tx_credit_exceeded()
1161 tx_add_credit(queue); tx_credit_exceeded()
1165 if (size > queue->remaining_credit) { tx_credit_exceeded()
1166 queue->credit_timeout.data = tx_credit_exceeded()
1167 (unsigned long)queue; tx_credit_exceeded()
1168 mod_timer(&queue->credit_timeout, tx_credit_exceeded()
1170 queue->credit_window_start = next_credit; tx_credit_exceeded()
1178 static void xenvif_tx_build_gops(struct xenvif_queue *queue, xenvif_tx_build_gops() argument
1183 struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop; xenvif_tx_build_gops()
1187 while (skb_queue_len(&queue->tx_queue) < budget) { xenvif_tx_build_gops()
1197 if (queue->tx.sring->req_prod - queue->tx.req_cons > xenvif_tx_build_gops()
1199 netdev_err(queue->vif->dev, xenvif_tx_build_gops()
1202 queue->tx.sring->req_prod, queue->tx.req_cons, xenvif_tx_build_gops()
1204 xenvif_fatal_tx_err(queue->vif); xenvif_tx_build_gops()
1208 work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx); xenvif_tx_build_gops()
1212 idx = queue->tx.req_cons; xenvif_tx_build_gops()
1214 memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq)); xenvif_tx_build_gops()
1217 if (txreq.size > queue->remaining_credit && xenvif_tx_build_gops()
1218 tx_credit_exceeded(queue, txreq.size)) xenvif_tx_build_gops()
1221 queue->remaining_credit -= txreq.size; xenvif_tx_build_gops()
1224 queue->tx.req_cons = ++idx; xenvif_tx_build_gops()
1228 work_to_do = xenvif_get_extras(queue, extras, xenvif_tx_build_gops()
1230 idx = queue->tx.req_cons; xenvif_tx_build_gops()
1235 ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do); xenvif_tx_build_gops()
1242 netdev_dbg(queue->vif->dev, xenvif_tx_build_gops()
1244 xenvif_tx_err(queue, &txreq, idx); xenvif_tx_build_gops()
1250 netdev_err(queue->vif->dev, xenvif_tx_build_gops()
1254 xenvif_fatal_tx_err(queue->vif); xenvif_tx_build_gops()
1258 index = pending_index(queue->pending_cons); xenvif_tx_build_gops()
1259 pending_idx = queue->pending_ring[index]; xenvif_tx_build_gops()
1267 netdev_dbg(queue->vif->dev, xenvif_tx_build_gops()
1269 xenvif_tx_err(queue, &txreq, idx); xenvif_tx_build_gops()
1277 if (xenvif_set_skb_gso(queue->vif, skb, gso)) { xenvif_tx_build_gops()
1287 queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref; xenvif_tx_build_gops()
1288 queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid; xenvif_tx_build_gops()
1289 queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset; xenvif_tx_build_gops()
1291 queue->tx_copy_ops[*copy_ops].dest.u.gmfn = xenvif_tx_build_gops()
1293 queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF; xenvif_tx_build_gops()
1294 queue->tx_copy_ops[*copy_ops].dest.offset = xenvif_tx_build_gops()
1297 queue->tx_copy_ops[*copy_ops].len = data_len; xenvif_tx_build_gops()
1298 queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref; xenvif_tx_build_gops()
1307 xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop); xenvif_tx_build_gops()
1312 memcpy(&queue->pending_tx_info[pending_idx].req, &txreq, xenvif_tx_build_gops()
1316 queue->pending_cons++; xenvif_tx_build_gops()
1318 request_gop = xenvif_get_requests(queue, skb, txfrags, gop); xenvif_tx_build_gops()
1321 xenvif_tx_err(queue, &txreq, idx); xenvif_tx_build_gops()
1326 __skb_queue_tail(&queue->tx_queue, skb); xenvif_tx_build_gops()
1328 queue->tx.req_cons = idx; xenvif_tx_build_gops()
1330 if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) || xenvif_tx_build_gops()
1331 (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops))) xenvif_tx_build_gops()
1335 (*map_ops) = gop - queue->tx_map_ops; xenvif_tx_build_gops()
1342 static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb) xenvif_handle_frag_list() argument
1350 queue->stats.tx_zerocopy_sent += 2; xenvif_handle_frag_list()
1351 queue->stats.tx_frag_overflow++; xenvif_handle_frag_list()
1353 xenvif_fill_frags(queue, nskb); xenvif_handle_frag_list()
1389 xenvif_skb_zerocopy_prepare(queue, nskb); xenvif_handle_frag_list()
1397 atomic_inc(&queue->inflight_packets); xenvif_handle_frag_list()
1409 static int xenvif_tx_submit(struct xenvif_queue *queue) xenvif_tx_submit() argument
1411 struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops; xenvif_tx_submit()
1412 struct gnttab_copy *gop_copy = queue->tx_copy_ops; xenvif_tx_submit()
1416 while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) { xenvif_tx_submit()
1422 txp = &queue->pending_tx_info[pending_idx].req; xenvif_tx_submit()
1425 if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) { xenvif_tx_submit()
1441 callback_param(queue, pending_idx).ctx = NULL; xenvif_tx_submit()
1448 xenvif_idx_release(queue, pending_idx, xenvif_tx_submit()
1457 xenvif_fill_frags(queue, skb); xenvif_tx_submit()
1460 if (xenvif_handle_frag_list(queue, skb)) { xenvif_tx_submit()
1462 netdev_err(queue->vif->dev, xenvif_tx_submit()
1464 xenvif_skb_zerocopy_prepare(queue, skb); xenvif_tx_submit()
1470 skb->dev = queue->vif->dev; xenvif_tx_submit()
1474 if (checksum_setup(queue, skb)) { xenvif_tx_submit()
1475 netdev_dbg(queue->vif->dev, xenvif_tx_submit()
1479 xenvif_skb_zerocopy_prepare(queue, skb); xenvif_tx_submit()
1500 queue->stats.rx_bytes += skb->len; xenvif_tx_submit()
1501 queue->stats.rx_packets++; xenvif_tx_submit()
1511 xenvif_skb_zerocopy_prepare(queue, skb); xenvif_tx_submit()
1512 queue->stats.tx_zerocopy_sent++; xenvif_tx_submit()
1525 struct xenvif_queue *queue = ubuf_to_queue(ubuf); xenvif_zerocopy_callback() local
1530 spin_lock_irqsave(&queue->callback_lock, flags); xenvif_zerocopy_callback()
1534 BUG_ON(queue->dealloc_prod - queue->dealloc_cons >= xenvif_zerocopy_callback()
1536 index = pending_index(queue->dealloc_prod); xenvif_zerocopy_callback()
1537 queue->dealloc_ring[index] = pending_idx; xenvif_zerocopy_callback()
1542 queue->dealloc_prod++; xenvif_zerocopy_callback()
1544 wake_up(&queue->dealloc_wq); xenvif_zerocopy_callback()
1545 spin_unlock_irqrestore(&queue->callback_lock, flags); xenvif_zerocopy_callback()
1548 queue->stats.tx_zerocopy_success++; xenvif_zerocopy_callback()
1550 queue->stats.tx_zerocopy_fail++; xenvif_zerocopy_callback()
1551 xenvif_skb_zerocopy_complete(queue); xenvif_zerocopy_callback()
1554 static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue) xenvif_tx_dealloc_action() argument
1561 dc = queue->dealloc_cons; xenvif_tx_dealloc_action()
1562 gop = queue->tx_unmap_ops; xenvif_tx_dealloc_action()
1566 dp = queue->dealloc_prod; xenvif_tx_dealloc_action()
1574 BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS); xenvif_tx_dealloc_action()
1576 queue->dealloc_ring[pending_index(dc++)]; xenvif_tx_dealloc_action()
1578 pending_idx_release[gop - queue->tx_unmap_ops] = xenvif_tx_dealloc_action()
1580 queue->pages_to_unmap[gop - queue->tx_unmap_ops] = xenvif_tx_dealloc_action()
1581 queue->mmap_pages[pending_idx]; xenvif_tx_dealloc_action()
1583 idx_to_kaddr(queue, pending_idx), xenvif_tx_dealloc_action()
1585 queue->grant_tx_handle[pending_idx]); xenvif_tx_dealloc_action()
1586 xenvif_grant_handle_reset(queue, pending_idx); xenvif_tx_dealloc_action()
1590 } while (dp != queue->dealloc_prod); xenvif_tx_dealloc_action()
1592 queue->dealloc_cons = dc; xenvif_tx_dealloc_action()
1594 if (gop - queue->tx_unmap_ops > 0) { xenvif_tx_dealloc_action()
1596 ret = gnttab_unmap_refs(queue->tx_unmap_ops, xenvif_tx_dealloc_action()
1598 queue->pages_to_unmap, xenvif_tx_dealloc_action()
1599 gop - queue->tx_unmap_ops); xenvif_tx_dealloc_action()
1601 netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tx ret %d\n", xenvif_tx_dealloc_action()
1602 gop - queue->tx_unmap_ops, ret); xenvif_tx_dealloc_action()
1603 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) { xenvif_tx_dealloc_action()
1605 netdev_err(queue->vif->dev, xenvif_tx_dealloc_action()
1615 for (i = 0; i < gop - queue->tx_unmap_ops; ++i) xenvif_tx_dealloc_action()
1616 xenvif_idx_release(queue, pending_idx_release[i], xenvif_tx_dealloc_action()
1622 int xenvif_tx_action(struct xenvif_queue *queue, int budget) xenvif_tx_action() argument
1627 if (unlikely(!tx_work_todo(queue))) xenvif_tx_action()
1630 xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops); xenvif_tx_action()
1635 gnttab_batch_copy(queue->tx_copy_ops, nr_cops); xenvif_tx_action()
1637 ret = gnttab_map_refs(queue->tx_map_ops, xenvif_tx_action()
1639 queue->pages_to_map, xenvif_tx_action()
1644 work_done = xenvif_tx_submit(queue); xenvif_tx_action()
1649 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, xenvif_idx_release() argument
1656 pending_tx_info = &queue->pending_tx_info[pending_idx]; xenvif_idx_release()
1658 spin_lock_irqsave(&queue->response_lock, flags); xenvif_idx_release()
1660 make_tx_response(queue, &pending_tx_info->req, status); xenvif_idx_release()
1666 index = pending_index(queue->pending_prod++); xenvif_idx_release()
1667 queue->pending_ring[index] = pending_idx; xenvif_idx_release()
1669 push_tx_responses(queue); xenvif_idx_release()
1671 spin_unlock_irqrestore(&queue->response_lock, flags); xenvif_idx_release()
1675 static void make_tx_response(struct xenvif_queue *queue, make_tx_response() argument
1679 RING_IDX i = queue->tx.rsp_prod_pvt; make_tx_response()
1682 resp = RING_GET_RESPONSE(&queue->tx, i); make_tx_response()
1687 RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; make_tx_response()
1689 queue->tx.rsp_prod_pvt = ++i; make_tx_response()
1692 static void push_tx_responses(struct xenvif_queue *queue) push_tx_responses() argument
1696 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); push_tx_responses()
1698 notify_remote_via_irq(queue->tx_irq); push_tx_responses()
1701 static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, make_rx_response() argument
1708 RING_IDX i = queue->rx.rsp_prod_pvt; make_rx_response()
1711 resp = RING_GET_RESPONSE(&queue->rx, i); make_rx_response()
1719 queue->rx.rsp_prod_pvt = ++i; make_rx_response()
1724 void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx) xenvif_idx_unmap() argument
1730 idx_to_kaddr(queue, pending_idx), xenvif_idx_unmap()
1732 queue->grant_tx_handle[pending_idx]); xenvif_idx_unmap()
1733 xenvif_grant_handle_reset(queue, pending_idx); xenvif_idx_unmap()
1736 &queue->mmap_pages[pending_idx], 1); xenvif_idx_unmap()
1738 netdev_err(queue->vif->dev, xenvif_idx_unmap()
1749 static inline int tx_work_todo(struct xenvif_queue *queue) tx_work_todo() argument
1751 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))) tx_work_todo()
1757 static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue) tx_dealloc_work_todo() argument
1759 return queue->dealloc_cons != queue->dealloc_prod; tx_dealloc_work_todo()
1762 void xenvif_unmap_frontend_rings(struct xenvif_queue *queue) xenvif_unmap_frontend_rings() argument
1764 if (queue->tx.sring) xenvif_unmap_frontend_rings()
1765 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), xenvif_unmap_frontend_rings()
1766 queue->tx.sring); xenvif_unmap_frontend_rings()
1767 if (queue->rx.sring) xenvif_unmap_frontend_rings()
1768 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif), xenvif_unmap_frontend_rings()
1769 queue->rx.sring); xenvif_unmap_frontend_rings()
1772 int xenvif_map_frontend_rings(struct xenvif_queue *queue, xenvif_map_frontend_rings() argument
1782 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), xenvif_map_frontend_rings()
1788 BACK_RING_INIT(&queue->tx, txs, PAGE_SIZE); xenvif_map_frontend_rings()
1790 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif), xenvif_map_frontend_rings()
1796 BACK_RING_INIT(&queue->rx, rxs, PAGE_SIZE); xenvif_map_frontend_rings()
1801 xenvif_unmap_frontend_rings(queue); xenvif_map_frontend_rings()
1805 static void xenvif_queue_carrier_off(struct xenvif_queue *queue) xenvif_queue_carrier_off() argument
1807 struct xenvif *vif = queue->vif; xenvif_queue_carrier_off()
1809 queue->stalled = true; xenvif_queue_carrier_off()
1811 /* At least one queue has stalled? Disable the carrier. */ xenvif_queue_carrier_off()
1820 static void xenvif_queue_carrier_on(struct xenvif_queue *queue) xenvif_queue_carrier_on() argument
1822 struct xenvif *vif = queue->vif; xenvif_queue_carrier_on()
1824 queue->last_rx_time = jiffies; /* Reset Rx stall detection. */ xenvif_queue_carrier_on()
1825 queue->stalled = false; xenvif_queue_carrier_on()
1836 static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue) xenvif_rx_queue_stalled() argument
1840 prod = queue->rx.sring->req_prod; xenvif_rx_queue_stalled()
1841 cons = queue->rx.req_cons; xenvif_rx_queue_stalled()
1843 return !queue->stalled xenvif_rx_queue_stalled()
1846 queue->last_rx_time + queue->vif->stall_timeout); xenvif_rx_queue_stalled()
1849 static bool xenvif_rx_queue_ready(struct xenvif_queue *queue) xenvif_rx_queue_ready() argument
1853 prod = queue->rx.sring->req_prod; xenvif_rx_queue_ready()
1854 cons = queue->rx.req_cons; xenvif_rx_queue_ready()
1856 return queue->stalled xenvif_rx_queue_ready()
1860 static bool xenvif_have_rx_work(struct xenvif_queue *queue) xenvif_have_rx_work() argument
1862 return (!skb_queue_empty(&queue->rx_queue) xenvif_have_rx_work()
1863 && xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)) xenvif_have_rx_work()
1864 || (queue->vif->stall_timeout && xenvif_have_rx_work()
1865 (xenvif_rx_queue_stalled(queue) xenvif_have_rx_work()
1866 || xenvif_rx_queue_ready(queue))) xenvif_have_rx_work()
1868 || queue->vif->disabled; xenvif_have_rx_work()
1871 static long xenvif_rx_queue_timeout(struct xenvif_queue *queue) xenvif_rx_queue_timeout() argument
1876 skb = skb_peek(&queue->rx_queue); xenvif_rx_queue_timeout()
1887 * queue (and not just the head at the beginning). In particular, if
1888 * the queue is initially empty an infinite timeout is used and this
1894 static void xenvif_wait_for_rx_work(struct xenvif_queue *queue) xenvif_wait_for_rx_work() argument
1898 if (xenvif_have_rx_work(queue)) xenvif_wait_for_rx_work()
1904 prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE); xenvif_wait_for_rx_work()
1905 if (xenvif_have_rx_work(queue)) xenvif_wait_for_rx_work()
1907 ret = schedule_timeout(xenvif_rx_queue_timeout(queue)); xenvif_wait_for_rx_work()
1911 finish_wait(&queue->wq, &wait); xenvif_wait_for_rx_work()
1916 struct xenvif_queue *queue = data; xenvif_kthread_guest_rx() local
1917 struct xenvif *vif = queue->vif; xenvif_kthread_guest_rx()
1920 xenvif_queue_carrier_on(queue); xenvif_kthread_guest_rx()
1923 xenvif_wait_for_rx_work(queue); xenvif_kthread_guest_rx()
1933 * associated with queue 0. xenvif_kthread_guest_rx()
1935 if (unlikely(vif->disabled && queue->id == 0)) { xenvif_kthread_guest_rx()
1940 if (!skb_queue_empty(&queue->rx_queue)) xenvif_kthread_guest_rx()
1941 xenvif_rx_action(queue); xenvif_kthread_guest_rx()
1948 if (xenvif_rx_queue_stalled(queue)) xenvif_kthread_guest_rx()
1949 xenvif_queue_carrier_off(queue); xenvif_kthread_guest_rx()
1950 else if (xenvif_rx_queue_ready(queue)) xenvif_kthread_guest_rx()
1951 xenvif_queue_carrier_on(queue); xenvif_kthread_guest_rx()
1959 xenvif_rx_queue_drop_expired(queue); xenvif_kthread_guest_rx()
1961 xenvif_rx_queue_maybe_wake(queue); xenvif_kthread_guest_rx()
1967 xenvif_rx_queue_purge(queue); xenvif_kthread_guest_rx()
1972 static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue) xenvif_dealloc_kthread_should_stop() argument
1978 !atomic_read(&queue->inflight_packets); xenvif_dealloc_kthread_should_stop()
1983 struct xenvif_queue *queue = data; xenvif_dealloc_kthread() local
1986 wait_event_interruptible(queue->dealloc_wq, xenvif_dealloc_kthread()
1987 tx_dealloc_work_todo(queue) || xenvif_dealloc_kthread()
1988 xenvif_dealloc_kthread_should_stop(queue)); xenvif_dealloc_kthread()
1989 if (xenvif_dealloc_kthread_should_stop(queue)) xenvif_dealloc_kthread()
1992 xenvif_tx_dealloc_action(queue); xenvif_dealloc_kthread()
1997 if (tx_dealloc_work_todo(queue)) xenvif_dealloc_kthread()
1998 xenvif_tx_dealloc_action(queue); xenvif_dealloc_kthread()
366 xenvif_gop_skb(struct sk_buff *skb, struct netrx_pending_operations *npo, struct xenvif_queue *queue) xenvif_gop_skb() argument
H A Dinterface.c46 /* Number of bytes allowed on the internal guest Rx queue. */
54 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, xenvif_skb_zerocopy_prepare() argument
58 atomic_inc(&queue->inflight_packets); xenvif_skb_zerocopy_prepare()
61 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) xenvif_skb_zerocopy_complete() argument
63 atomic_dec(&queue->inflight_packets); xenvif_skb_zerocopy_complete()
75 struct xenvif_queue *queue = dev_id; xenvif_tx_interrupt() local
77 if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)) xenvif_tx_interrupt()
78 napi_schedule(&queue->napi); xenvif_tx_interrupt()
85 struct xenvif_queue *queue = xenvif_poll() local
93 if (unlikely(queue->vif->disabled)) { xenvif_poll()
98 work_done = xenvif_tx_action(queue, budget); xenvif_poll()
102 xenvif_napi_schedule_or_enable_events(queue); xenvif_poll()
110 struct xenvif_queue *queue = dev_id; xenvif_rx_interrupt() local
112 xenvif_kick_thread(queue); xenvif_rx_interrupt()
125 int xenvif_queue_stopped(struct xenvif_queue *queue) xenvif_queue_stopped() argument
127 struct net_device *dev = queue->vif->dev; xenvif_queue_stopped()
128 unsigned int id = queue->id; xenvif_queue_stopped()
132 void xenvif_wake_queue(struct xenvif_queue *queue) xenvif_wake_queue() argument
134 struct net_device *dev = queue->vif->dev; xenvif_wake_queue()
135 unsigned int id = queue->id; xenvif_wake_queue()
142 struct xenvif_queue *queue = NULL; xenvif_start_xmit() local
153 /* Obtain the queue to be used to transmit this packet */ xenvif_start_xmit()
156 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.", xenvif_start_xmit()
160 queue = &vif->queues[index]; xenvif_start_xmit()
162 /* Drop the packet if queue is not ready */ xenvif_start_xmit()
163 if (queue->task == NULL || xenvif_start_xmit()
164 queue->dealloc_task == NULL || xenvif_start_xmit()
171 xenvif_rx_queue_tail(queue, skb); xenvif_start_xmit()
172 xenvif_kick_thread(queue); xenvif_start_xmit()
185 struct xenvif_queue *queue = NULL; xenvif_get_stats() local
196 /* Aggregate tx and rx stats from each queue */ xenvif_get_stats()
198 queue = &vif->queues[index]; xenvif_get_stats()
199 rx_bytes += queue->stats.rx_bytes; xenvif_get_stats()
200 rx_packets += queue->stats.rx_packets; xenvif_get_stats()
201 tx_bytes += queue->stats.tx_bytes; xenvif_get_stats()
202 tx_packets += queue->stats.tx_packets; xenvif_get_stats()
216 struct xenvif_queue *queue = NULL; xenvif_up() local
221 queue = &vif->queues[queue_index]; xenvif_up()
222 napi_enable(&queue->napi); xenvif_up()
223 enable_irq(queue->tx_irq); xenvif_up()
224 if (queue->tx_irq != queue->rx_irq) xenvif_up()
225 enable_irq(queue->rx_irq); xenvif_up()
226 xenvif_napi_schedule_or_enable_events(queue); xenvif_up()
232 struct xenvif_queue *queue = NULL; xenvif_down() local
237 queue = &vif->queues[queue_index]; xenvif_down()
238 disable_irq(queue->tx_irq); xenvif_down()
239 if (queue->tx_irq != queue->rx_irq) xenvif_down()
240 disable_irq(queue->rx_irq); xenvif_down()
241 napi_disable(&queue->napi); xenvif_down()
242 del_timer_sync(&queue->credit_timeout); xenvif_down()
459 int xenvif_init_queue(struct xenvif_queue *queue) xenvif_init_queue() argument
463 queue->credit_bytes = queue->remaining_credit = ~0UL; xenvif_init_queue()
464 queue->credit_usec = 0UL; xenvif_init_queue()
465 init_timer(&queue->credit_timeout); xenvif_init_queue()
466 queue->credit_timeout.function = xenvif_tx_credit_callback; xenvif_init_queue()
467 queue->credit_window_start = get_jiffies_64(); xenvif_init_queue()
469 queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES; xenvif_init_queue()
471 skb_queue_head_init(&queue->rx_queue); xenvif_init_queue()
472 skb_queue_head_init(&queue->tx_queue); xenvif_init_queue()
474 queue->pending_cons = 0; xenvif_init_queue()
475 queue->pending_prod = MAX_PENDING_REQS; xenvif_init_queue()
477 queue->pending_ring[i] = i; xenvif_init_queue()
479 spin_lock_init(&queue->callback_lock); xenvif_init_queue()
480 spin_lock_init(&queue->response_lock); xenvif_init_queue()
487 queue->mmap_pages); xenvif_init_queue()
489 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n"); xenvif_init_queue()
494 queue->pending_tx_info[i].callback_struct = (struct ubuf_info) xenvif_init_queue()
498 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; xenvif_init_queue()
516 int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, xenvif_connect() argument
523 BUG_ON(queue->tx_irq); xenvif_connect()
524 BUG_ON(queue->task); xenvif_connect()
525 BUG_ON(queue->dealloc_task); xenvif_connect()
527 err = xenvif_map_frontend_rings(queue, tx_ring_ref, rx_ring_ref); xenvif_connect()
531 init_waitqueue_head(&queue->wq); xenvif_connect()
532 init_waitqueue_head(&queue->dealloc_wq); xenvif_connect()
533 atomic_set(&queue->inflight_packets, 0); xenvif_connect()
535 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, xenvif_connect()
541 queue->vif->domid, tx_evtchn, xenvif_interrupt, 0, xenvif_connect()
542 queue->name, queue); xenvif_connect()
545 queue->tx_irq = queue->rx_irq = err; xenvif_connect()
546 disable_irq(queue->tx_irq); xenvif_connect()
549 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), xenvif_connect()
550 "%s-tx", queue->name); xenvif_connect()
552 queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0, xenvif_connect()
553 queue->tx_irq_name, queue); xenvif_connect()
556 queue->tx_irq = err; xenvif_connect()
557 disable_irq(queue->tx_irq); xenvif_connect()
559 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), xenvif_connect()
560 "%s-rx", queue->name); xenvif_connect()
562 queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0, xenvif_connect()
563 queue->rx_irq_name, queue); xenvif_connect()
566 queue->rx_irq = err; xenvif_connect()
567 disable_irq(queue->rx_irq); xenvif_connect()
570 queue->stalled = true; xenvif_connect()
573 (void *)queue, "%s-guest-rx", queue->name); xenvif_connect()
575 pr_warn("Could not allocate kthread for %s\n", queue->name); xenvif_connect()
579 queue->task = task; xenvif_connect()
583 (void *)queue, "%s-dealloc", queue->name); xenvif_connect()
585 pr_warn("Could not allocate kthread for %s\n", queue->name); xenvif_connect()
589 queue->dealloc_task = task; xenvif_connect()
591 wake_up_process(queue->task); xenvif_connect()
592 wake_up_process(queue->dealloc_task); xenvif_connect()
597 unbind_from_irqhandler(queue->rx_irq, queue); xenvif_connect()
598 queue->rx_irq = 0; xenvif_connect()
600 unbind_from_irqhandler(queue->tx_irq, queue); xenvif_connect()
601 queue->tx_irq = 0; xenvif_connect()
603 xenvif_unmap_frontend_rings(queue); xenvif_connect()
624 struct xenvif_queue *queue = NULL; xenvif_disconnect() local
631 queue = &vif->queues[queue_index]; xenvif_disconnect()
633 netif_napi_del(&queue->napi); xenvif_disconnect()
635 if (queue->task) { xenvif_disconnect()
636 kthread_stop(queue->task); xenvif_disconnect()
637 put_task_struct(queue->task); xenvif_disconnect()
638 queue->task = NULL; xenvif_disconnect()
641 if (queue->dealloc_task) { xenvif_disconnect()
642 kthread_stop(queue->dealloc_task); xenvif_disconnect()
643 queue->dealloc_task = NULL; xenvif_disconnect()
646 if (queue->tx_irq) { xenvif_disconnect()
647 if (queue->tx_irq == queue->rx_irq) xenvif_disconnect()
648 unbind_from_irqhandler(queue->tx_irq, queue); xenvif_disconnect()
650 unbind_from_irqhandler(queue->tx_irq, queue); xenvif_disconnect()
651 unbind_from_irqhandler(queue->rx_irq, queue); xenvif_disconnect()
653 queue->tx_irq = 0; xenvif_disconnect()
656 xenvif_unmap_frontend_rings(queue); xenvif_disconnect()
661 * Used for queue teardown from xenvif_free(), and on the
664 void xenvif_deinit_queue(struct xenvif_queue *queue) xenvif_deinit_queue() argument
666 gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages); xenvif_deinit_queue()
671 struct xenvif_queue *queue = NULL; xenvif_free() local
678 queue = &vif->queues[queue_index]; xenvif_free()
679 xenvif_deinit_queue(queue); xenvif_free()
H A Dxenbus.c41 static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
55 struct xenvif_queue *queue = m->private; xenvif_read_io_ring() local
56 struct xen_netif_tx_back_ring *tx_ring = &queue->tx; xenvif_read_io_ring()
57 struct xen_netif_rx_back_ring *rx_ring = &queue->rx; xenvif_read_io_ring()
63 seq_printf(m, "Queue %d\nTX: nr_ents %u\n", queue->id, xenvif_read_io_ring()
79 queue->pending_prod, xenvif_read_io_ring()
80 queue->pending_cons, xenvif_read_io_ring()
81 nr_pending_reqs(queue)); xenvif_read_io_ring()
83 queue->dealloc_prod, xenvif_read_io_ring()
84 queue->dealloc_cons, xenvif_read_io_ring()
85 queue->dealloc_prod - queue->dealloc_cons); xenvif_read_io_ring()
107 seq_printf(m, "NAPI state: %lx NAPI weight: %d TX queue len %u\n" xenvif_read_io_ring()
110 queue->napi.state, queue->napi.weight, xenvif_read_io_ring()
111 skb_queue_len(&queue->tx_queue), xenvif_read_io_ring()
112 timer_pending(&queue->credit_timeout), xenvif_read_io_ring()
113 queue->credit_bytes, xenvif_read_io_ring()
114 queue->credit_usec, xenvif_read_io_ring()
115 queue->remaining_credit, xenvif_read_io_ring()
116 queue->credit_timeout.expires, xenvif_read_io_ring()
119 dev_queue = netdev_get_tx_queue(queue->vif->dev, queue->id); xenvif_read_io_ring()
121 seq_printf(m, "\nRx internal queue: len %u max %u pkts %u %s\n", xenvif_read_io_ring()
122 queue->rx_queue_len, queue->rx_queue_max, xenvif_read_io_ring()
123 skb_queue_len(&queue->rx_queue), xenvif_read_io_ring()
136 struct xenvif_queue *queue = xenvif_write_io_ring() local
158 xenvif_interrupt(0, (void *)queue); xenvif_write_io_ring()
161 queue->id); xenvif_write_io_ring()
170 void *queue = NULL; xenvif_dump_open() local
173 queue = inode->i_private; xenvif_dump_open()
174 ret = single_open(filp, xenvif_read_io_ring, queue); xenvif_dump_open()
348 /* Multi-queue support: This is an optional feature. */ netback_probe()
350 "multi-queue-max-queues", "%u", xenvif_max_queues); netback_probe()
352 pr_debug("Error writing multi-queue-max-queues\n"); netback_probe()
667 struct xenvif_queue *queue = &vif->queues[queue_index]; xen_net_rate_changed() local
669 queue->credit_bytes = credit_bytes; xen_net_rate_changed()
670 queue->credit_usec = credit_usec; xen_net_rate_changed()
671 if (!mod_timer_pending(&queue->credit_timeout, jiffies) && xen_net_rate_changed()
672 queue->remaining_credit > queue->credit_bytes) { xen_net_rate_changed()
673 queue->remaining_credit = queue->credit_bytes; xen_net_rate_changed()
751 struct xenvif_queue *queue; connect() local
757 "multi-queue-num-queues", connect()
760 requested_num_queues = 1; /* Fall back to single queue */ connect()
787 queue = &be->vif->queues[queue_index]; connect()
788 queue->vif = be->vif; connect()
789 queue->id = queue_index; connect()
790 snprintf(queue->name, sizeof(queue->name), "%s-q%u", connect()
791 be->vif->dev->name, queue->id); connect()
793 err = xenvif_init_queue(queue); connect()
805 queue->credit_bytes = credit_bytes; connect()
806 queue->remaining_credit = credit_bytes; connect()
807 queue->credit_usec = credit_usec; connect()
809 err = connect_rings(be, queue); connect()
815 xenvif_deinit_queue(queue); connect()
856 static int connect_rings(struct backend_info *be, struct xenvif_queue *queue) connect_rings() argument
859 unsigned int num_queues = queue->vif->num_queues; connect_rings()
865 const size_t xenstore_path_ext_size = 11; /* sufficient for "/queue-NNN" */ connect_rings()
867 /* If the frontend requested 1 queue, or we have fallen back connect_rings()
868 * to single queue due to lack of frontend support for multi- connect_rings()
869 * queue, expect the remaining XenStore keys in the toplevel connect_rings()
871 * queue-N. connect_rings()
889 snprintf(xspath, xspathsize, "%s/queue-%u", dev->otherend, connect_rings()
890 queue->id); connect_rings()
920 err = xenvif_connect(queue, tx_ring_ref, rx_ring_ref, connect_rings()
H A Dcommon.h106 /* IRQ name is queue name with "-tx" or "-rx" appended */
112 /* Stats fields to be updated per-queue.
114 * fields that are updated in netback.c for each queue.
129 struct xenvif_queue { /* Per-queue data for xenvif */
271 int xenvif_init_queue(struct xenvif_queue *queue);
272 void xenvif_deinit_queue(struct xenvif_queue *queue);
274 int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
285 int xenvif_queue_stopped(struct xenvif_queue *queue);
286 void xenvif_wake_queue(struct xenvif_queue *queue);
289 void xenvif_unmap_frontend_rings(struct xenvif_queue *queue);
290 int xenvif_map_frontend_rings(struct xenvif_queue *queue,
295 void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue);
300 int xenvif_tx_action(struct xenvif_queue *queue, int budget);
303 void xenvif_kick_thread(struct xenvif_queue *queue);
307 void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
312 bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed);
320 void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
322 static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue) nr_pending_reqs() argument
325 queue->pending_prod + queue->pending_cons; nr_pending_reqs()
343 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
345 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue);
/linux-4.1.27/drivers/mmc/card/
H A DMakefile6 mmc_block-objs := block.o queue.o
H A Dqueue.c2 * linux/drivers/mmc/card/queue.c
22 #include "queue.h"
52 struct request_queue *q = mq->queue; mmc_queue_thread()
108 * Generic MMC request handler. This is called for any queue on a
110 * on any queue on this host, and attempt to issue it. This may
111 * not be the queue we were asked to process.
182 * mmc_init_queue - initialise a queue structure.
183 * @mq: mmc queue
184 * @card: mmc card to attach this queue
185 * @lock: queue lock
188 * Initialise a MMC card request queue.
203 mq->queue = blk_init_queue(mmc_request_fn, lock); mmc_init_queue()
204 if (!mq->queue) mmc_init_queue()
209 mq->queue->queuedata = mq; mmc_init_queue()
211 blk_queue_prep_rq(mq->queue, mmc_prep_request); mmc_init_queue()
212 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); mmc_init_queue()
213 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue); mmc_init_queue()
215 mmc_queue_setup_discard(mq->queue, card); mmc_init_queue()
248 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); mmc_init_queue()
249 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); mmc_init_queue()
250 blk_queue_max_segments(mq->queue, bouncesz / 512); mmc_init_queue()
251 blk_queue_max_segment_size(mq->queue, bouncesz); mmc_init_queue()
275 blk_queue_bounce_limit(mq->queue, limit); mmc_init_queue()
276 blk_queue_max_hw_sectors(mq->queue, mmc_init_queue()
278 blk_queue_max_segments(mq->queue, host->max_segs); mmc_init_queue()
279 blk_queue_max_segment_size(mq->queue, host->max_seg_size); mmc_init_queue()
319 blk_cleanup_queue(mq->queue); mmc_init_queue()
325 struct request_queue *q = mq->queue; mmc_cleanup_queue()
330 /* Make sure the queue isn't suspended, as that will deadlock */ mmc_cleanup_queue()
336 /* Empty the queue */ mmc_cleanup_queue()
408 * mmc_queue_suspend - suspend a MMC request queue
409 * @mq: MMC queue to suspend
411 * Stop the block request queue, and wait for our thread to
417 struct request_queue *q = mq->queue; mmc_queue_suspend()
432 * mmc_queue_resume - resume a previously suspended MMC request queue
433 * @mq: MMC queue to resume
437 struct request_queue *q = mq->queue; mmc_queue_resume()
462 unsigned int max_seg_sz = queue_max_segment_size(mq->queue); mmc_queue_packed_map_sg()
478 sg_len += blk_rq_map_sg(mq->queue, req, __sg); mmc_queue_packed_map_sg()
504 return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); mmc_queue_map_sg()
513 sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); mmc_queue_map_sg()
/linux-4.1.27/drivers/net/wireless/rt2x00/
H A Drt2x00queue.c23 Abstract: rt2x00 queue specific routines.
36 struct data_queue *queue = entry->queue; rt2x00queue_alloc_rxskb() local
37 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; rt2x00queue_alloc_rxskb()
48 frame_size = queue->data_size + queue->desc_size + queue->winfo_size; rt2x00queue_alloc_rxskb()
107 struct device *dev = entry->queue->rt2x00dev->dev; rt2x00queue_map_txskb()
123 struct device *dev = entry->queue->rt2x00dev->dev; rt2x00queue_unmap_skb()
498 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; rt2x00queue_write_tx_data()
503 * a queue corruption! rt2x00queue_write_tx_data()
508 "Corrupt queue %d, accessing entry which is not ours\n" rt2x00queue_write_tx_data()
510 entry->queue->qid, DRV_PROJECT); rt2x00queue_write_tx_data()
539 struct data_queue *queue = entry->queue; rt2x00queue_write_tx_descriptor() local
541 queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc); rt2x00queue_write_tx_descriptor()
547 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb); rt2x00queue_write_tx_descriptor()
550 static void rt2x00queue_kick_tx_queue(struct data_queue *queue, rt2x00queue_kick_tx_queue() argument
554 * Check if we need to kick the queue, there are however a few rules rt2x00queue_kick_tx_queue()
560 * in the queue are less then a certain threshold. rt2x00queue_kick_tx_queue()
562 if (rt2x00queue_threshold(queue) || rt2x00queue_kick_tx_queue()
564 queue->rt2x00dev->ops->lib->kick_queue(queue); rt2x00queue_kick_tx_queue()
569 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; rt2x00queue_bar_check()
609 int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, rt2x00queue_write_tx_frame() argument
624 rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, sta); rt2x00queue_write_tx_frame()
649 if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_COPY_IV)) rt2x00queue_write_tx_frame()
663 if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_L2PAD)) rt2x00queue_write_tx_frame()
665 else if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_DMA)) rt2x00queue_write_tx_frame()
671 spin_lock(&queue->tx_lock); rt2x00queue_write_tx_frame()
673 if (unlikely(rt2x00queue_full(queue))) { rt2x00queue_write_tx_frame()
674 rt2x00_err(queue->rt2x00dev, "Dropping frame due to full tx queue %d\n", rt2x00queue_write_tx_frame()
675 queue->qid); rt2x00queue_write_tx_frame()
680 entry = rt2x00queue_get_entry(queue, Q_INDEX); rt2x00queue_write_tx_frame()
684 rt2x00_err(queue->rt2x00dev, rt2x00queue_write_tx_frame()
685 "Arrived at non-free entry in the non-full queue %d\n" rt2x00queue_write_tx_frame()
687 queue->qid, DRV_PROJECT); rt2x00queue_write_tx_frame()
696 * It could be possible that the queue was corrupted and this rt2x00queue_write_tx_frame()
716 rt2x00queue_kick_tx_queue(queue, &txdesc); rt2x00queue_write_tx_frame()
719 spin_unlock(&queue->tx_lock); rt2x00queue_write_tx_frame()
738 * since the beacon queue will get stopped anyway). rt2x00queue_clear_beacon()
788 bool rt2x00queue_for_each_entry(struct data_queue *queue, rt2x00queue_for_each_entry() argument
801 rt2x00_err(queue->rt2x00dev, rt2x00queue_for_each_entry()
813 spin_lock_irqsave(&queue->index_lock, irqflags); rt2x00queue_for_each_entry()
814 index_start = queue->index[start]; rt2x00queue_for_each_entry()
815 index_end = queue->index[end]; rt2x00queue_for_each_entry()
816 spin_unlock_irqrestore(&queue->index_lock, irqflags); rt2x00queue_for_each_entry()
824 if (fn(&queue->entries[i], data)) rt2x00queue_for_each_entry()
828 for (i = index_start; i < queue->limit; i++) { rt2x00queue_for_each_entry()
829 if (fn(&queue->entries[i], data)) rt2x00queue_for_each_entry()
834 if (fn(&queue->entries[i], data)) rt2x00queue_for_each_entry()
843 struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue, rt2x00queue_get_entry() argument
850 rt2x00_err(queue->rt2x00dev, "Entry requested from invalid index type (%d)\n", rt2x00queue_get_entry()
855 spin_lock_irqsave(&queue->index_lock, irqflags); rt2x00queue_get_entry()
857 entry = &queue->entries[queue->index[index]]; rt2x00queue_get_entry()
859 spin_unlock_irqrestore(&queue->index_lock, irqflags); rt2x00queue_get_entry()
867 struct data_queue *queue = entry->queue; rt2x00queue_index_inc() local
871 rt2x00_err(queue->rt2x00dev, rt2x00queue_index_inc()
876 spin_lock_irqsave(&queue->index_lock, irqflags); rt2x00queue_index_inc()
878 queue->index[index]++; rt2x00queue_index_inc()
879 if (queue->index[index] >= queue->limit) rt2x00queue_index_inc()
880 queue->index[index] = 0; rt2x00queue_index_inc()
885 queue->length++; rt2x00queue_index_inc()
887 queue->length--; rt2x00queue_index_inc()
888 queue->count++; rt2x00queue_index_inc()
891 spin_unlock_irqrestore(&queue->index_lock, irqflags); rt2x00queue_index_inc()
894 static void rt2x00queue_pause_queue_nocheck(struct data_queue *queue) rt2x00queue_pause_queue_nocheck() argument
896 switch (queue->qid) { rt2x00queue_pause_queue_nocheck()
902 * For TX queues, we have to disable the queue rt2x00queue_pause_queue_nocheck()
905 ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid); rt2x00queue_pause_queue_nocheck()
911 void rt2x00queue_pause_queue(struct data_queue *queue) rt2x00queue_pause_queue() argument
913 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || rt2x00queue_pause_queue()
914 !test_bit(QUEUE_STARTED, &queue->flags) || rt2x00queue_pause_queue()
915 test_and_set_bit(QUEUE_PAUSED, &queue->flags)) rt2x00queue_pause_queue()
918 rt2x00queue_pause_queue_nocheck(queue); rt2x00queue_pause_queue()
922 void rt2x00queue_unpause_queue(struct data_queue *queue) rt2x00queue_unpause_queue() argument
924 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || rt2x00queue_unpause_queue()
925 !test_bit(QUEUE_STARTED, &queue->flags) || rt2x00queue_unpause_queue()
926 !test_and_clear_bit(QUEUE_PAUSED, &queue->flags)) rt2x00queue_unpause_queue()
929 switch (queue->qid) { rt2x00queue_unpause_queue()
935 * For TX queues, we have to enable the queue rt2x00queue_unpause_queue()
938 ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid); rt2x00queue_unpause_queue()
942 * For RX we need to kick the queue now in order to rt2x00queue_unpause_queue()
945 queue->rt2x00dev->ops->lib->kick_queue(queue); rt2x00queue_unpause_queue()
952 void rt2x00queue_start_queue(struct data_queue *queue) rt2x00queue_start_queue() argument
954 mutex_lock(&queue->status_lock); rt2x00queue_start_queue()
956 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || rt2x00queue_start_queue()
957 test_and_set_bit(QUEUE_STARTED, &queue->flags)) { rt2x00queue_start_queue()
958 mutex_unlock(&queue->status_lock); rt2x00queue_start_queue()
962 set_bit(QUEUE_PAUSED, &queue->flags); rt2x00queue_start_queue()
964 queue->rt2x00dev->ops->lib->start_queue(queue); rt2x00queue_start_queue()
966 rt2x00queue_unpause_queue(queue); rt2x00queue_start_queue()
968 mutex_unlock(&queue->status_lock); rt2x00queue_start_queue()
972 void rt2x00queue_stop_queue(struct data_queue *queue) rt2x00queue_stop_queue() argument
974 mutex_lock(&queue->status_lock); rt2x00queue_stop_queue()
976 if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) { rt2x00queue_stop_queue()
977 mutex_unlock(&queue->status_lock); rt2x00queue_stop_queue()
981 rt2x00queue_pause_queue_nocheck(queue); rt2x00queue_stop_queue()
983 queue->rt2x00dev->ops->lib->stop_queue(queue); rt2x00queue_stop_queue()
985 mutex_unlock(&queue->status_lock); rt2x00queue_stop_queue()
989 void rt2x00queue_flush_queue(struct data_queue *queue, bool drop) rt2x00queue_flush_queue() argument
992 (queue->qid == QID_AC_VO) || rt2x00queue_flush_queue()
993 (queue->qid == QID_AC_VI) || rt2x00queue_flush_queue()
994 (queue->qid == QID_AC_BE) || rt2x00queue_flush_queue()
995 (queue->qid == QID_AC_BK); rt2x00queue_flush_queue()
1001 * to the queue to make sure the hardware will rt2x00queue_flush_queue()
1005 queue->rt2x00dev->ops->lib->kick_queue(queue); rt2x00queue_flush_queue()
1010 * alternative which just waits for the queue to become empty. rt2x00queue_flush_queue()
1012 if (likely(queue->rt2x00dev->ops->lib->flush_queue)) rt2x00queue_flush_queue()
1013 queue->rt2x00dev->ops->lib->flush_queue(queue, drop); rt2x00queue_flush_queue()
1016 * The queue flush has failed... rt2x00queue_flush_queue()
1018 if (unlikely(!rt2x00queue_empty(queue))) rt2x00queue_flush_queue()
1019 rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n", rt2x00queue_flush_queue()
1020 queue->qid); rt2x00queue_flush_queue()
1026 struct data_queue *queue; rt2x00queue_start_queues() local
1030 * for each queue after is has been properly initialized. rt2x00queue_start_queues()
1032 tx_queue_for_each(rt2x00dev, queue) rt2x00queue_start_queues()
1033 rt2x00queue_start_queue(queue); rt2x00queue_start_queues()
1041 struct data_queue *queue; rt2x00queue_stop_queues() local
1051 tx_queue_for_each(rt2x00dev, queue) rt2x00queue_stop_queues()
1052 rt2x00queue_stop_queue(queue); rt2x00queue_stop_queues()
1060 struct data_queue *queue; rt2x00queue_flush_queues() local
1062 tx_queue_for_each(rt2x00dev, queue) rt2x00queue_flush_queues()
1063 rt2x00queue_flush_queue(queue, drop); rt2x00queue_flush_queues()
1069 static void rt2x00queue_reset(struct data_queue *queue) rt2x00queue_reset() argument
1074 spin_lock_irqsave(&queue->index_lock, irqflags); rt2x00queue_reset()
1076 queue->count = 0; rt2x00queue_reset()
1077 queue->length = 0; rt2x00queue_reset()
1080 queue->index[i] = 0; rt2x00queue_reset()
1082 spin_unlock_irqrestore(&queue->index_lock, irqflags); rt2x00queue_reset()
1087 struct data_queue *queue; rt2x00queue_init_queues() local
1090 queue_for_each(rt2x00dev, queue) { queue_for_each()
1091 rt2x00queue_reset(queue); queue_for_each()
1093 for (i = 0; i < queue->limit; i++) queue_for_each()
1094 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]); queue_for_each()
1098 static int rt2x00queue_alloc_entries(struct data_queue *queue) rt2x00queue_alloc_entries() argument
1104 rt2x00queue_reset(queue); rt2x00queue_alloc_entries()
1107 * Allocate all queue entries. rt2x00queue_alloc_entries()
1109 entry_size = sizeof(*entries) + queue->priv_size; rt2x00queue_alloc_entries()
1110 entries = kcalloc(queue->limit, entry_size, GFP_KERNEL); rt2x00queue_alloc_entries()
1118 for (i = 0; i < queue->limit; i++) { rt2x00queue_alloc_entries()
1120 entries[i].queue = queue; rt2x00queue_alloc_entries()
1124 QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit, rt2x00queue_alloc_entries()
1125 sizeof(*entries), queue->priv_size); rt2x00queue_alloc_entries()
1130 queue->entries = entries; rt2x00queue_alloc_entries()
1135 static void rt2x00queue_free_skbs(struct data_queue *queue) rt2x00queue_free_skbs() argument
1139 if (!queue->entries) rt2x00queue_free_skbs()
1142 for (i = 0; i < queue->limit; i++) { rt2x00queue_free_skbs()
1143 rt2x00queue_free_skb(&queue->entries[i]); rt2x00queue_free_skbs()
1147 static int rt2x00queue_alloc_rxskbs(struct data_queue *queue) rt2x00queue_alloc_rxskbs() argument
1152 for (i = 0; i < queue->limit; i++) { rt2x00queue_alloc_rxskbs()
1153 skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL); rt2x00queue_alloc_rxskbs()
1156 queue->entries[i].skb = skb; rt2x00queue_alloc_rxskbs()
1164 struct data_queue *queue; rt2x00queue_initialize() local
1171 tx_queue_for_each(rt2x00dev, queue) { tx_queue_for_each()
1172 status = rt2x00queue_alloc_entries(queue); tx_queue_for_each()
1203 struct data_queue *queue; rt2x00queue_uninitialize() local
1207 queue_for_each(rt2x00dev, queue) { queue_for_each()
1208 kfree(queue->entries); queue_for_each()
1209 queue->entries = NULL; queue_for_each()
1214 struct data_queue *queue, enum data_queue_qid qid) rt2x00queue_init()
1216 mutex_init(&queue->status_lock); rt2x00queue_init()
1217 spin_lock_init(&queue->tx_lock); rt2x00queue_init()
1218 spin_lock_init(&queue->index_lock); rt2x00queue_init()
1220 queue->rt2x00dev = rt2x00dev; rt2x00queue_init()
1221 queue->qid = qid; rt2x00queue_init()
1222 queue->txop = 0; rt2x00queue_init()
1223 queue->aifs = 2; rt2x00queue_init()
1224 queue->cw_min = 5; rt2x00queue_init()
1225 queue->cw_max = 10; rt2x00queue_init()
1227 rt2x00dev->ops->queue_init(queue); rt2x00queue_init()
1229 queue->threshold = DIV_ROUND_UP(queue->limit, 10); rt2x00queue_init()
1234 struct data_queue *queue; rt2x00queue_allocate() local
1248 queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL); rt2x00queue_allocate()
1249 if (!queue) { rt2x00queue_allocate()
1257 rt2x00dev->rx = queue; rt2x00queue_allocate()
1258 rt2x00dev->tx = &queue[1]; rt2x00queue_allocate()
1259 rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues]; rt2x00queue_allocate()
1260 rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL; rt2x00queue_allocate()
1263 * Initialize queue parameters. rt2x00queue_allocate()
1274 tx_queue_for_each(rt2x00dev, queue) rt2x00queue_allocate()
1275 rt2x00queue_init(rt2x00dev, queue, qid++); rt2x00queue_allocate()
1213 rt2x00queue_init(struct rt2x00_dev *rt2x00dev, struct data_queue *queue, enum data_queue_qid qid) rt2x00queue_init() argument
H A Drt2x00mmio.c62 struct data_queue *queue = rt2x00dev->rx; rt2x00mmio_rxdone() local
69 entry = rt2x00queue_get_entry(queue, Q_INDEX); rt2x00mmio_rxdone()
80 skbdesc->desc_len = entry->queue->desc_size; rt2x00mmio_rxdone()
99 void rt2x00mmio_flush_queue(struct data_queue *queue, bool drop) rt2x00mmio_flush_queue() argument
103 for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++) rt2x00mmio_flush_queue()
112 struct data_queue *queue) rt2x00mmio_alloc_queue_dma()
123 queue->limit * queue->desc_size, &dma, rt2x00mmio_alloc_queue_dma()
129 * Initialize all queue entries to contain valid addresses. rt2x00mmio_alloc_queue_dma()
131 for (i = 0; i < queue->limit; i++) { rt2x00mmio_alloc_queue_dma()
132 entry_priv = queue->entries[i].priv_data; rt2x00mmio_alloc_queue_dma()
133 entry_priv->desc = addr + i * queue->desc_size; rt2x00mmio_alloc_queue_dma()
134 entry_priv->desc_dma = dma + i * queue->desc_size; rt2x00mmio_alloc_queue_dma()
141 struct data_queue *queue) rt2x00mmio_free_queue_dma()
144 queue->entries[0].priv_data; rt2x00mmio_free_queue_dma()
148 queue->limit * queue->desc_size, rt2x00mmio_free_queue_dma()
155 struct data_queue *queue; rt2x00mmio_initialize() local
161 queue_for_each(rt2x00dev, queue) { queue_for_each()
162 status = rt2x00mmio_alloc_queue_dma(rt2x00dev, queue); queue_for_each()
182 queue_for_each(rt2x00dev, queue)
183 rt2x00mmio_free_queue_dma(rt2x00dev, queue);
191 struct data_queue *queue; rt2x00mmio_uninitialize() local
201 queue_for_each(rt2x00dev, queue) rt2x00mmio_uninitialize()
202 rt2x00mmio_free_queue_dma(rt2x00dev, queue); rt2x00mmio_uninitialize()
111 rt2x00mmio_alloc_queue_dma(struct rt2x00_dev *rt2x00dev, struct data_queue *queue) rt2x00mmio_alloc_queue_dma() argument
140 rt2x00mmio_free_queue_dma(struct rt2x00_dev *rt2x00dev, struct data_queue *queue) rt2x00mmio_free_queue_dma() argument
H A Drt2x00usb.c238 struct data_queue *queue; rt2x00usb_work_txdone() local
241 tx_queue_for_each(rt2x00dev, queue) { tx_queue_for_each()
242 while (!rt2x00queue_empty(queue)) { tx_queue_for_each()
243 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); tx_queue_for_each()
257 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; rt2x00usb_interrupt_txdone()
284 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; rt2x00usb_kick_tx_entry()
312 usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint), rt2x00usb_kick_tx_entry()
350 skbdesc->desc_len = entry->queue->desc_size; rt2x00usb_work_rxdone()
362 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; rt2x00usb_interrupt_rxdone()
377 if (urb->actual_length < entry->queue->desc_size || urb->status) rt2x00usb_interrupt_rxdone()
389 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; rt2x00usb_kick_rx_entry()
401 usb_rcvbulkpipe(usb_dev, entry->queue->usb_endpoint), rt2x00usb_kick_rx_entry()
416 void rt2x00usb_kick_queue(struct data_queue *queue) rt2x00usb_kick_queue() argument
418 switch (queue->qid) { rt2x00usb_kick_queue()
423 if (!rt2x00queue_empty(queue)) rt2x00usb_kick_queue()
424 rt2x00queue_for_each_entry(queue, rt2x00usb_kick_queue()
431 if (!rt2x00queue_full(queue)) rt2x00usb_kick_queue()
432 rt2x00queue_for_each_entry(queue, rt2x00usb_kick_queue()
446 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; rt2x00usb_flush_entry()
458 if ((entry->queue->qid == QID_BEACON) && rt2x00usb_flush_entry()
465 void rt2x00usb_flush_queue(struct data_queue *queue, bool drop) rt2x00usb_flush_queue() argument
471 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX, NULL, rt2x00usb_flush_queue()
475 * Obtain the queue completion handler rt2x00usb_flush_queue()
477 switch (queue->qid) { rt2x00usb_flush_queue()
482 completion = &queue->rt2x00dev->txdone_work; rt2x00usb_flush_queue()
485 completion = &queue->rt2x00dev->rxdone_work; rt2x00usb_flush_queue()
497 if (rt2x00queue_empty(queue)) rt2x00usb_flush_queue()
502 * worker function runs, it should cleanup the queue. rt2x00usb_flush_queue()
504 queue_work(queue->rt2x00dev->workqueue, completion); rt2x00usb_flush_queue()
515 static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue) rt2x00usb_watchdog_tx_dma() argument
517 rt2x00_warn(queue->rt2x00dev, "TX queue %d DMA timed out, invoke forced forced reset\n", rt2x00usb_watchdog_tx_dma()
518 queue->qid); rt2x00usb_watchdog_tx_dma()
520 rt2x00queue_stop_queue(queue); rt2x00usb_watchdog_tx_dma()
521 rt2x00queue_flush_queue(queue, true); rt2x00usb_watchdog_tx_dma()
522 rt2x00queue_start_queue(queue); rt2x00usb_watchdog_tx_dma()
525 static int rt2x00usb_dma_timeout(struct data_queue *queue) rt2x00usb_dma_timeout() argument
529 entry = rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE); rt2x00usb_dma_timeout()
535 struct data_queue *queue; rt2x00usb_watchdog() local
537 tx_queue_for_each(rt2x00dev, queue) { tx_queue_for_each()
538 if (!rt2x00queue_empty(queue)) { tx_queue_for_each()
539 if (rt2x00usb_dma_timeout(queue)) tx_queue_for_each()
540 rt2x00usb_watchdog_tx_dma(queue); tx_queue_for_each()
563 if (entry->queue->qid == QID_RX) rt2x00usb_clear_entry()
568 static void rt2x00usb_assign_endpoint(struct data_queue *queue, rt2x00usb_assign_endpoint() argument
571 struct usb_device *usb_dev = to_usb_device_intf(queue->rt2x00dev->dev); rt2x00usb_assign_endpoint()
574 queue->usb_endpoint = usb_endpoint_num(ep_desc); rt2x00usb_assign_endpoint()
576 if (queue->qid == QID_RX) { rt2x00usb_assign_endpoint()
577 pipe = usb_rcvbulkpipe(usb_dev, queue->usb_endpoint); rt2x00usb_assign_endpoint()
578 queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 0); rt2x00usb_assign_endpoint()
580 pipe = usb_sndbulkpipe(usb_dev, queue->usb_endpoint); rt2x00usb_assign_endpoint()
581 queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 1); rt2x00usb_assign_endpoint()
584 if (!queue->usb_maxpacket) rt2x00usb_assign_endpoint()
585 queue->usb_maxpacket = 1; rt2x00usb_assign_endpoint()
593 struct data_queue *queue = rt2x00dev->tx; rt2x00usb_find_endpoints() local
601 * to the queue. rt2x00usb_find_endpoints()
609 (queue != queue_end(rt2x00dev))) { rt2x00usb_find_endpoints()
610 rt2x00usb_assign_endpoint(queue, ep_desc); rt2x00usb_find_endpoints()
611 queue = queue_next(queue); rt2x00usb_find_endpoints()
630 txall_queue_for_each(rt2x00dev, queue) { txall_queue_for_each()
631 if (!queue->usb_endpoint) txall_queue_for_each()
632 rt2x00usb_assign_endpoint(queue, tx_ep_desc); txall_queue_for_each()
638 static int rt2x00usb_alloc_entries(struct data_queue *queue) rt2x00usb_alloc_entries() argument
640 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; rt2x00usb_alloc_entries()
645 for (i = 0; i < queue->limit; i++) { rt2x00usb_alloc_entries()
646 entry_priv = queue->entries[i].priv_data; rt2x00usb_alloc_entries()
653 * If this is not the beacon queue or rt2x00usb_alloc_entries()
657 if (queue->qid != QID_BEACON || rt2x00usb_alloc_entries()
661 for (i = 0; i < queue->limit; i++) { rt2x00usb_alloc_entries()
662 bcn_priv = queue->entries[i].priv_data; rt2x00usb_alloc_entries()
671 static void rt2x00usb_free_entries(struct data_queue *queue) rt2x00usb_free_entries() argument
673 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; rt2x00usb_free_entries()
678 if (!queue->entries) rt2x00usb_free_entries()
681 for (i = 0; i < queue->limit; i++) { rt2x00usb_free_entries()
682 entry_priv = queue->entries[i].priv_data; rt2x00usb_free_entries()
688 * If this is not the beacon queue or rt2x00usb_free_entries()
692 if (queue->qid != QID_BEACON || rt2x00usb_free_entries()
696 for (i = 0; i < queue->limit; i++) { rt2x00usb_free_entries()
697 bcn_priv = queue->entries[i].priv_data; rt2x00usb_free_entries()
705 struct data_queue *queue; rt2x00usb_initialize() local
709 * Find endpoints for each queue rt2x00usb_initialize()
718 queue_for_each(rt2x00dev, queue) { queue_for_each()
719 status = rt2x00usb_alloc_entries(queue); queue_for_each()
735 struct data_queue *queue; rt2x00usb_uninitialize() local
737 queue_for_each(rt2x00dev, queue) rt2x00usb_uninitialize()
738 rt2x00usb_free_entries(queue); rt2x00usb_uninitialize()
H A Drt2x00queue.h21 Abstract: rt2x00 queue datastructures and routines
46 * @QID_AC_VO: AC VO queue
47 * @QID_AC_VI: AC VI queue
48 * @QID_AC_BE: AC BE queue
49 * @QID_AC_BK: AC BK queue
50 * @QID_HCCA: HCCA queue
51 * @QID_MGMT: MGMT queue (prio queue)
52 * @QID_RX: RX queue
54 * @QID_BEACON: Beacon queue (value unspecified, don't send it to device)
55 * @QID_ATIM: Atim queue (value unspecified, don't send it to device)
337 * enum queue_entry_flags: Status flags for queue entry
343 * transfer (either TX or RX depending on the queue). The entry should
369 * @queue: The data queue (&struct data_queue) to which this entry belongs.
370 * @skb: The buffer which is currently being transmitted (for TX queue),
371 * or used to directly receive data in (for RX queue).
373 * @priv_data: Private data belonging to this queue entry. The pointer
374 * points to data specific to a particular driver and queue type.
381 struct data_queue *queue; member in struct:queue_entry
395 * @Q_INDEX: Index pointer to the current entry in the queue, if this entry is
396 * owned by the hardware then the queue is considered to be full.
401 * entry is not owned by the hardware the queue is considered to be empty.
415 * @QUEUE_STARTED: The queue has been started. Fox RX queues this means the
419 * @QUEUE_PAUSED: The queue has been started but is currently paused.
420 * When this bit is set, the queue has been stopped in mac80211,
430 * struct data_queue: Data queue
432 * @rt2x00dev: Pointer to main &struct rt2x00dev where this queue belongs to.
434 * part of this queue.
435 * @qid: The queue identification, see &enum data_queue_qid.
438 * handling on this queue.
439 * @tx_lock: Spinlock to serialize tx operations on this queue.
443 * @count: Number of frames handled in the queue.
444 * @limit: Maximum number of entries in the queue.
445 * @threshold: Minimum number of free entries before queue is kicked by force.
446 * @length: Number of frames in queue.
447 * @index: Index pointers to entry positions in the queue,
450 * @aifs: The aifs value for outgoing frames (field ignored in RX queue).
451 * @cw_min: The cw min value for outgoing frames (field ignored in RX queue).
452 * @cw_max: The cw max value for outgoing frames (field ignored in RX queue).
453 * @data_size: Maximum data size for the frames in this queue.
454 * @desc_size: Hardware descriptor size for the data in this queue.
491 * queue_end - Return pointer to the last queue (HELPER MACRO).
502 * tx_queue_end - Return pointer to the last TX queue (HELPER MACRO).
507 * the end of the TX queue array.
513 * queue_next - Return pointer to next queue in list (HELPER MACRO).
514 * @__queue: Current queue for which we need the next queue
516 * Using the current queue address we take the address directly
517 * after the queue to take the next queue. Note that this macro
520 * &tx_queue_end for determining the end of the queue).
527 * @__entry: Pointer where the current queue entry will be stored in.
528 * @__start: Start queue pointer.
529 * @__end: End queue pointer.
541 * @__entry: Pointer where the current queue entry will be stored in.
551 * @__entry: Pointer where the current queue entry will be stored in.
562 * @__entry: Pointer where the current queue entry will be stored in.
571 * rt2x00queue_for_each_entry - Loop through all entries in the queue
572 * @queue: Pointer to @data_queue
578 * This will walk through all entries in the queue, in chronological
580 * and will walk through the queue until it reaches the @end pointer.
585 bool rt2x00queue_for_each_entry(struct data_queue *queue,
593 * rt2x00queue_empty - Check if the queue is empty.
594 * @queue: Queue to check if empty.
596 static inline int rt2x00queue_empty(struct data_queue *queue) rt2x00queue_empty() argument
598 return queue->length == 0; rt2x00queue_empty()
602 * rt2x00queue_full - Check if the queue is full.
603 * @queue: Queue to check if full.
605 static inline int rt2x00queue_full(struct data_queue *queue) rt2x00queue_full() argument
607 return queue->length == queue->limit; rt2x00queue_full()
611 * rt2x00queue_free - Check the number of available entries in queue.
612 * @queue: Queue to check.
614 static inline int rt2x00queue_available(struct data_queue *queue) rt2x00queue_available() argument
616 return queue->limit - queue->length; rt2x00queue_available()
620 * rt2x00queue_threshold - Check if the queue is below threshold
621 * @queue: Queue to check.
623 static inline int rt2x00queue_threshold(struct data_queue *queue) rt2x00queue_threshold() argument
625 return rt2x00queue_available(queue) < queue->threshold; rt2x00queue_threshold()
H A Drt2800mmio.c55 const unsigned int txwi_size = entry->queue->winfo_size; rt2800mmio_write_tx_desc()
192 * For example, a tx queue rt2800mmio_txdone_find_entry()
199 * in the tx queue with a matching wcid. rt2800mmio_txdone_find_entry()
252 struct data_queue *queue; rt2800mmio_txdone() local
261 * Unknown queue, this shouldn't happen. Just drop rt2800mmio_txdone()
269 queue = rt2x00queue_get_tx_queue(rt2x00dev, qid); rt2800mmio_txdone()
270 if (unlikely(queue == NULL)) { rt2800mmio_txdone()
272 * The queue is NULL, this shouldn't happen. Stop rt2800mmio_txdone()
275 rt2x00_warn(rt2x00dev, "Got TX status for an unavailable queue %u, dropping\n", rt2800mmio_txdone()
280 if (unlikely(rt2x00queue_empty(queue))) { rt2800mmio_txdone()
282 * The queue is empty. Stop processing here rt2800mmio_txdone()
285 rt2x00_warn(rt2x00dev, "Got TX status for an empty queue %u, dropping\n", rt2800mmio_txdone()
294 if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, rt2800mmio_txdone()
301 if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, rt2800mmio_txdone()
304 rt2x00_warn(rt2x00dev, "No frame found for TX status on queue %u, dropping\n", rt2800mmio_txdone()
313 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, rt2800mmio_txdone()
556 void rt2800mmio_start_queue(struct data_queue *queue) rt2800mmio_start_queue() argument
558 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; rt2800mmio_start_queue()
561 switch (queue->qid) { rt2800mmio_start_queue()
584 void rt2800mmio_kick_queue(struct data_queue *queue) rt2800mmio_kick_queue() argument
586 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; rt2800mmio_kick_queue()
589 switch (queue->qid) { rt2800mmio_kick_queue()
594 entry = rt2x00queue_get_entry(queue, Q_INDEX); rt2800mmio_kick_queue()
595 rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid), rt2800mmio_kick_queue()
599 entry = rt2x00queue_get_entry(queue, Q_INDEX); rt2800mmio_kick_queue()
609 void rt2800mmio_stop_queue(struct data_queue *queue) rt2800mmio_stop_queue() argument
611 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; rt2800mmio_stop_queue()
614 switch (queue->qid) { rt2800mmio_stop_queue()
646 void rt2800mmio_queue_init(struct data_queue *queue) rt2800mmio_queue_init() argument
648 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; rt2800mmio_queue_init()
653 switch (queue->qid) { rt2800mmio_queue_init()
655 queue->limit = 128; rt2800mmio_queue_init()
656 queue->data_size = AGGREGATION_SIZE; rt2800mmio_queue_init()
657 queue->desc_size = RXD_DESC_SIZE; rt2800mmio_queue_init()
658 queue->winfo_size = rxwi_size; rt2800mmio_queue_init()
659 queue->priv_size = sizeof(struct queue_entry_priv_mmio); rt2800mmio_queue_init()
666 queue->limit = 64; rt2800mmio_queue_init()
667 queue->data_size = AGGREGATION_SIZE; rt2800mmio_queue_init()
668 queue->desc_size = TXD_DESC_SIZE; rt2800mmio_queue_init()
669 queue->winfo_size = txwi_size; rt2800mmio_queue_init()
670 queue->priv_size = sizeof(struct queue_entry_priv_mmio); rt2800mmio_queue_init()
674 queue->limit = 8; rt2800mmio_queue_init()
675 queue->data_size = 0; /* No DMA required for beacons */ rt2800mmio_queue_init()
676 queue->desc_size = TXD_DESC_SIZE; rt2800mmio_queue_init()
677 queue->winfo_size = txwi_size; rt2800mmio_queue_init()
678 queue->priv_size = sizeof(struct queue_entry_priv_mmio); rt2800mmio_queue_init()
698 if (entry->queue->qid == QID_RX) { rt2800mmio_get_entry_state()
714 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; rt2800mmio_clear_entry()
717 if (entry->queue->qid == QID_RX) { rt2800mmio_clear_entry()
H A Drt2800usb.c57 static void rt2800usb_start_queue(struct data_queue *queue) rt2800usb_start_queue() argument
59 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; rt2800usb_start_queue()
62 switch (queue->qid) { rt2800usb_start_queue()
80 static void rt2800usb_stop_queue(struct data_queue *queue) rt2800usb_stop_queue() argument
82 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; rt2800usb_stop_queue()
85 switch (queue->qid) { rt2800usb_stop_queue()
104 * test if there is an entry in any TX queue for which DMA is done
109 struct data_queue *queue; rt2800usb_txstatus_pending() local
111 tx_queue_for_each(rt2x00dev, queue) { tx_queue_for_each()
112 if (rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE) != tx_queue_for_each()
113 rt2x00queue_get_entry(queue, Q_INDEX_DONE)) tx_queue_for_each()
128 rt2x00_dbg(entry->queue->rt2x00dev, rt2800usb_entry_txstatus_timeout()
129 "TX status timeout for entry %d in queue %d\n", rt2800usb_entry_txstatus_timeout()
130 entry->entry_idx, entry->queue->qid); rt2800usb_entry_txstatus_timeout()
137 struct data_queue *queue; rt2800usb_txstatus_timeout() local
140 tx_queue_for_each(rt2x00dev, queue) { tx_queue_for_each()
141 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); tx_queue_for_each()
213 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; rt2800usb_tx_dma_done()
451 rt2x00_warn(rt2x00dev, "TX HW queue 0 timed out, invoke forced kick\n"); rt2800usb_watchdog()
466 rt2x00_warn(rt2x00dev, "TX HW queue 1 timed out, invoke forced kick\n"); rt2800usb_watchdog()
487 if (entry->queue->qid == QID_BEACON) rt2800usb_get_txwi()
526 skbdesc->desc_len = TXINFO_DESC_SIZE + entry->queue->winfo_size; rt2800usb_write_tx_desc()
581 rt2x00_dbg(entry->queue->rt2x00dev, rt2800usb_txdone_entry_check()
582 "TX status report missed for queue %d entry %d\n", rt2800usb_txdone_entry_check()
583 entry->queue->qid, entry->entry_idx); rt2800usb_txdone_entry_check()
592 struct data_queue *queue; rt2800usb_txdone() local
604 queue = rt2x00queue_get_tx_queue(rt2x00dev, qid); rt2800usb_txdone()
606 if (unlikely(rt2x00queue_empty(queue))) { rt2800usb_txdone()
607 rt2x00_dbg(rt2x00dev, "Got TX status for an empty queue %u, dropping\n", rt2800usb_txdone()
612 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); rt2800usb_txdone()
616 rt2x00_warn(rt2x00dev, "Data pending for entry %u in queue %u\n", rt2800usb_txdone()
631 struct data_queue *queue; rt2800usb_txdone_nostatus() local
641 tx_queue_for_each(rt2x00dev, queue) { tx_queue_for_each()
642 while (!rt2x00queue_empty(queue)) { tx_queue_for_each()
643 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); tx_queue_for_each()
717 rx_pkt_len > entry->queue->data_size)) { rt2800usb_fill_rxdone()
718 rt2x00_err(entry->queue->rt2x00dev, rt2800usb_fill_rxdone()
906 static void rt2800usb_queue_init(struct data_queue *queue) rt2800usb_queue_init() argument
908 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; rt2800usb_queue_init()
913 switch (queue->qid) { rt2800usb_queue_init()
915 queue->limit = 128; rt2800usb_queue_init()
916 queue->data_size = AGGREGATION_SIZE; rt2800usb_queue_init()
917 queue->desc_size = RXINFO_DESC_SIZE; rt2800usb_queue_init()
918 queue->winfo_size = rxwi_size; rt2800usb_queue_init()
919 queue->priv_size = sizeof(struct queue_entry_priv_usb); rt2800usb_queue_init()
926 queue->limit = 16; rt2800usb_queue_init()
927 queue->data_size = AGGREGATION_SIZE; rt2800usb_queue_init()
928 queue->desc_size = TXINFO_DESC_SIZE; rt2800usb_queue_init()
929 queue->winfo_size = txwi_size; rt2800usb_queue_init()
930 queue->priv_size = sizeof(struct queue_entry_priv_usb); rt2800usb_queue_init()
934 queue->limit = 8; rt2800usb_queue_init()
935 queue->data_size = MGMT_FRAME_SIZE; rt2800usb_queue_init()
936 queue->desc_size = TXINFO_DESC_SIZE; rt2800usb_queue_init()
937 queue->winfo_size = txwi_size; rt2800usb_queue_init()
938 queue->priv_size = sizeof(struct queue_entry_priv_usb); rt2800usb_queue_init()
H A Drt2x00mac.c31 struct data_queue *queue, rt2x00mac_tx_rts_cts()
91 retval = rt2x00queue_write_tx_frame(queue, skb, NULL, true); rt2x00mac_tx_rts_cts()
107 struct data_queue *queue = NULL; rt2x00mac_tx() local
119 * Use the ATIM queue if appropriate and present. rt2x00mac_tx()
125 queue = rt2x00queue_get_tx_queue(rt2x00dev, qid); rt2x00mac_tx()
126 if (unlikely(!queue)) { rt2x00mac_tx()
128 "Attempt to send packet over invalid queue %d\n" rt2x00mac_tx()
134 * If CTS/RTS is required. create and queue that frame first. rt2x00mac_tx()
145 if (rt2x00queue_available(queue) <= 1) rt2x00mac_tx()
148 if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb)) rt2x00mac_tx()
152 if (unlikely(rt2x00queue_write_tx_frame(queue, skb, control->sta, false))) rt2x00mac_tx()
156 * Pausing queue has to be serialized with rt2x00lib_txdone(). Note rt2x00mac_tx()
160 spin_lock(&queue->tx_lock); rt2x00mac_tx()
161 if (rt2x00queue_threshold(queue)) rt2x00mac_tx()
162 rt2x00queue_pause_queue(queue); rt2x00mac_tx()
163 spin_unlock(&queue->tx_lock); rt2x00mac_tx()
168 spin_lock(&queue->tx_lock); rt2x00mac_tx()
169 rt2x00queue_pause_queue(queue); rt2x00mac_tx()
170 spin_unlock(&queue->tx_lock); rt2x00mac_tx()
203 struct data_queue *queue = rt2x00dev->bcn; rt2x00mac_add_interface() local
221 for (i = 0; i < queue->limit; i++) { rt2x00mac_add_interface()
222 entry = &queue->entries[i]; rt2x00mac_add_interface()
227 if (unlikely(i == queue->limit)) rt2x00mac_add_interface()
429 /* queue work to upodate the beacon template */ rt2x00mac_set_tim()
640 * -> stop beacon queue. rt2x00mac_bss_info_changed()
663 * -> start beacon queue. rt2x00mac_bss_info_changed()
714 struct data_queue *queue; rt2x00mac_conf_tx() local
716 queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx); rt2x00mac_conf_tx()
717 if (unlikely(!queue)) rt2x00mac_conf_tx()
725 queue->cw_min = fls(params->cw_min); rt2x00mac_conf_tx()
727 queue->cw_min = 5; /* cw_min: 2^5 = 32. */ rt2x00mac_conf_tx()
730 queue->cw_max = fls(params->cw_max); rt2x00mac_conf_tx()
732 queue->cw_max = 10; /* cw_min: 2^10 = 1024. */ rt2x00mac_conf_tx()
734 queue->aifs = params->aifs; rt2x00mac_conf_tx()
735 queue->txop = params->txop; rt2x00mac_conf_tx()
738 "Configured TX queue %d - CWmin: %d, CWmax: %d, Aifs: %d, TXop: %d\n", rt2x00mac_conf_tx()
739 queue_idx, queue->cw_min, queue->cw_max, queue->aifs, rt2x00mac_conf_tx()
740 queue->txop); rt2x00mac_conf_tx()
759 struct data_queue *queue; rt2x00mac_flush() local
764 tx_queue_for_each(rt2x00dev, queue) rt2x00mac_flush()
765 rt2x00queue_flush_queue(queue, drop); rt2x00mac_flush()
840 struct data_queue *queue; rt2x00mac_get_ringparam() local
842 tx_queue_for_each(rt2x00dev, queue) { tx_queue_for_each()
843 *tx += queue->length; tx_queue_for_each()
844 *tx_max += queue->limit; tx_queue_for_each()
855 struct data_queue *queue; rt2x00mac_tx_frames_pending() local
857 tx_queue_for_each(rt2x00dev, queue) { tx_queue_for_each()
858 if (!rt2x00queue_empty(queue)) tx_queue_for_each()
30 rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev, struct data_queue *queue, struct sk_buff *frag_skb) rt2x00mac_tx_rts_cts() argument
H A Drt2x00.h361 * Entry in the beacon queue which belongs to
548 * queue initialization handlers
569 * Data queue handlers.
572 void (*start_queue) (struct data_queue *queue);
573 void (*kick_queue) (struct data_queue *queue);
574 void (*stop_queue) (struct data_queue *queue);
575 void (*flush_queue) (struct data_queue *queue, bool drop);
640 void (*queue_init)(struct data_queue *queue);
925 * Work queue for all work which should not be placed
952 * Data queue arrays for RX, TX, Beacon and ATIM.
1276 * rt2x00queue_get_tx_queue - Convert tx queue index to queue pointer
1278 * @queue: rt2x00 queue index (see &enum data_queue_qid).
1284 const enum data_queue_qid queue) rt2x00queue_get_tx_queue()
1286 if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx) rt2x00queue_get_tx_queue()
1287 return &rt2x00dev->tx[queue]; rt2x00queue_get_tx_queue()
1289 if (queue == QID_ATIM) rt2x00queue_get_tx_queue()
1296 * rt2x00queue_get_entry - Get queue entry where the given index points to.
1297 * @queue: Pointer to &struct data_queue from where we obtain the entry.
1300 struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
1304 * rt2x00queue_pause_queue - Pause a data queue
1305 * @queue: Pointer to &struct data_queue.
1307 * This function will pause the data queue locally, preventing
1308 * new frames to be added to the queue (while the hardware is
1311 void rt2x00queue_pause_queue(struct data_queue *queue);
1314 * rt2x00queue_unpause_queue - unpause a data queue
1315 * @queue: Pointer to &struct data_queue.
1317 * This function will unpause the data queue locally, allowing
1318 * new frames to be added to the queue again.
1320 void rt2x00queue_unpause_queue(struct data_queue *queue);
1323 * rt2x00queue_start_queue - Start a data queue
1324 * @queue: Pointer to &struct data_queue.
1326 * This function will start handling all pending frames in the queue.
1328 void rt2x00queue_start_queue(struct data_queue *queue);
1331 * rt2x00queue_stop_queue - Halt a data queue
1332 * @queue: Pointer to &struct data_queue.
1334 * This function will stop all pending frames in the queue.
1336 void rt2x00queue_stop_queue(struct data_queue *queue);
1339 * rt2x00queue_flush_queue - Flush a data queue
1340 * @queue: Pointer to &struct data_queue.
1343 * This function will flush the queue. After this call
1344 * the queue is guaranteed to be empty.
1346 void rt2x00queue_flush_queue(struct data_queue *queue, bool drop);
1455 struct ieee80211_vif *vif, u16 queue,
1283 rt2x00queue_get_tx_queue(struct rt2x00_dev *rt2x00dev, const enum data_queue_qid queue) rt2x00queue_get_tx_queue() argument
/linux-4.1.27/drivers/net/
H A Dxen-netfront.c87 /* IRQ name is queue name with "-tx" or "-rx" appended */
153 /* Multi-queue support */
203 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue, xennet_get_rx_skb() argument
207 struct sk_buff *skb = queue->rx_skbs[i]; xennet_get_rx_skb()
208 queue->rx_skbs[i] = NULL; xennet_get_rx_skb()
212 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue, xennet_get_rx_ref() argument
216 grant_ref_t ref = queue->grant_rx_ref[i]; xennet_get_rx_ref()
217 queue->grant_rx_ref[i] = GRANT_INVALID_REF; xennet_get_rx_ref()
233 struct netfront_queue *queue = (struct netfront_queue *)data; rx_refill_timeout() local
234 napi_schedule(&queue->napi); rx_refill_timeout()
237 static int netfront_tx_slot_available(struct netfront_queue *queue) netfront_tx_slot_available() argument
239 return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < netfront_tx_slot_available()
243 static void xennet_maybe_wake_tx(struct netfront_queue *queue) xennet_maybe_wake_tx() argument
245 struct net_device *dev = queue->info->netdev; xennet_maybe_wake_tx()
246 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id); xennet_maybe_wake_tx()
249 netfront_tx_slot_available(queue) && xennet_maybe_wake_tx()
251 netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id)); xennet_maybe_wake_tx()
255 static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue) xennet_alloc_one_rx_buffer() argument
260 skb = __netdev_alloc_skb(queue->info->netdev, xennet_alloc_one_rx_buffer()
275 skb->dev = queue->info->netdev; xennet_alloc_one_rx_buffer()
281 static void xennet_alloc_rx_buffers(struct netfront_queue *queue) xennet_alloc_rx_buffers() argument
283 RING_IDX req_prod = queue->rx.req_prod_pvt; xennet_alloc_rx_buffers()
286 if (unlikely(!netif_carrier_ok(queue->info->netdev))) xennet_alloc_rx_buffers()
289 for (req_prod = queue->rx.req_prod_pvt; xennet_alloc_rx_buffers()
290 req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE; xennet_alloc_rx_buffers()
298 skb = xennet_alloc_one_rx_buffer(queue); xennet_alloc_rx_buffers()
304 BUG_ON(queue->rx_skbs[id]); xennet_alloc_rx_buffers()
305 queue->rx_skbs[id] = skb; xennet_alloc_rx_buffers()
307 ref = gnttab_claim_grant_reference(&queue->gref_rx_head); xennet_alloc_rx_buffers()
309 queue->grant_rx_ref[id] = ref; xennet_alloc_rx_buffers()
313 req = RING_GET_REQUEST(&queue->rx, req_prod); xennet_alloc_rx_buffers()
315 queue->info->xbdev->otherend_id, xennet_alloc_rx_buffers()
323 queue->rx.req_prod_pvt = req_prod; xennet_alloc_rx_buffers()
326 if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) { xennet_alloc_rx_buffers()
327 mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10)); xennet_alloc_rx_buffers()
333 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify); xennet_alloc_rx_buffers()
335 notify_remote_via_irq(queue->rx_irq); xennet_alloc_rx_buffers()
343 struct netfront_queue *queue = NULL; xennet_open() local
346 queue = &np->queues[i]; xennet_open()
347 napi_enable(&queue->napi); xennet_open()
349 spin_lock_bh(&queue->rx_lock); xennet_open()
351 xennet_alloc_rx_buffers(queue); xennet_open()
352 queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1; xennet_open()
353 if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)) xennet_open()
354 napi_schedule(&queue->napi); xennet_open()
356 spin_unlock_bh(&queue->rx_lock); xennet_open()
364 static void xennet_tx_buf_gc(struct netfront_queue *queue) xennet_tx_buf_gc() argument
370 BUG_ON(!netif_carrier_ok(queue->info->netdev)); xennet_tx_buf_gc()
373 prod = queue->tx.sring->rsp_prod; xennet_tx_buf_gc()
376 for (cons = queue->tx.rsp_cons; cons != prod; cons++) { xennet_tx_buf_gc()
379 txrsp = RING_GET_RESPONSE(&queue->tx, cons); xennet_tx_buf_gc()
384 skb = queue->tx_skbs[id].skb; xennet_tx_buf_gc()
386 queue->grant_tx_ref[id]) != 0)) { xennet_tx_buf_gc()
392 queue->grant_tx_ref[id], GNTMAP_readonly); xennet_tx_buf_gc()
394 &queue->gref_tx_head, queue->grant_tx_ref[id]); xennet_tx_buf_gc()
395 queue->grant_tx_ref[id] = GRANT_INVALID_REF; xennet_tx_buf_gc()
396 queue->grant_tx_page[id] = NULL; xennet_tx_buf_gc()
397 add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id); xennet_tx_buf_gc()
401 queue->tx.rsp_cons = prod; xennet_tx_buf_gc()
411 queue->tx.sring->rsp_event = xennet_tx_buf_gc()
412 prod + ((queue->tx.sring->req_prod - prod) >> 1) + 1; xennet_tx_buf_gc()
414 } while ((cons == prod) && (prod != queue->tx.sring->rsp_prod)); xennet_tx_buf_gc()
416 xennet_maybe_wake_tx(queue); xennet_tx_buf_gc()
420 struct netfront_queue *queue, struct sk_buff *skb, xennet_make_one_txreq()
429 id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); xennet_make_one_txreq()
430 tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); xennet_make_one_txreq()
431 ref = gnttab_claim_grant_reference(&queue->gref_tx_head); xennet_make_one_txreq()
434 gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, xennet_make_one_txreq()
437 queue->tx_skbs[id].skb = skb; xennet_make_one_txreq()
438 queue->grant_tx_page[id] = page; xennet_make_one_txreq()
439 queue->grant_tx_ref[id] = ref; xennet_make_one_txreq()
451 struct netfront_queue *queue, struct xen_netif_tx_request *tx, xennet_make_txreqs()
461 tx = xennet_make_one_txreq(queue, skb_get(skb), xennet_make_txreqs()
503 /* First, check if there is only one queue */ xennet_select_queue()
526 struct netfront_queue *queue = NULL; xennet_start_xmit() local
533 /* Determine which queue to transmit this SKB on */ xennet_start_xmit()
535 queue = &np->queues[queue_index]; xennet_start_xmit()
559 spin_lock_irqsave(&queue->tx_lock, flags); xennet_start_xmit()
564 spin_unlock_irqrestore(&queue->tx_lock, flags); xennet_start_xmit()
569 first_tx = tx = xennet_make_one_txreq(queue, skb, xennet_start_xmit()
587 RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); xennet_start_xmit()
603 tx = xennet_make_txreqs(queue, tx, skb, page, offset, len); xennet_start_xmit()
608 tx = xennet_make_txreqs(queue, tx, skb, xennet_start_xmit()
616 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); xennet_start_xmit()
618 notify_remote_via_irq(queue->tx_irq); xennet_start_xmit()
626 xennet_tx_buf_gc(queue); xennet_start_xmit()
628 if (!netfront_tx_slot_available(queue)) xennet_start_xmit()
629 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); xennet_start_xmit()
631 spin_unlock_irqrestore(&queue->tx_lock, flags); xennet_start_xmit()
646 struct netfront_queue *queue; xennet_close() local
649 queue = &np->queues[i]; xennet_close()
650 napi_disable(&queue->napi); xennet_close()
655 static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb, xennet_move_rx_slot() argument
658 int new = xennet_rxidx(queue->rx.req_prod_pvt); xennet_move_rx_slot()
660 BUG_ON(queue->rx_skbs[new]); xennet_move_rx_slot()
661 queue->rx_skbs[new] = skb; xennet_move_rx_slot()
662 queue->grant_rx_ref[new] = ref; xennet_move_rx_slot()
663 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new; xennet_move_rx_slot()
664 RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref; xennet_move_rx_slot()
665 queue->rx.req_prod_pvt++; xennet_move_rx_slot()
668 static int xennet_get_extras(struct netfront_queue *queue, xennet_get_extras() argument
674 struct device *dev = &queue->info->netdev->dev; xennet_get_extras()
675 RING_IDX cons = queue->rx.rsp_cons; xennet_get_extras()
690 RING_GET_RESPONSE(&queue->rx, ++cons); xennet_get_extras()
703 skb = xennet_get_rx_skb(queue, cons); xennet_get_extras()
704 ref = xennet_get_rx_ref(queue, cons); xennet_get_extras()
705 xennet_move_rx_slot(queue, skb, ref); xennet_get_extras()
708 queue->rx.rsp_cons = cons; xennet_get_extras()
712 static int xennet_get_responses(struct netfront_queue *queue, xennet_get_responses() argument
718 struct device *dev = &queue->info->netdev->dev; xennet_get_responses()
719 RING_IDX cons = queue->rx.rsp_cons; xennet_get_responses()
720 struct sk_buff *skb = xennet_get_rx_skb(queue, cons); xennet_get_responses()
721 grant_ref_t ref = xennet_get_rx_ref(queue, cons); xennet_get_responses()
728 err = xennet_get_extras(queue, extras, rp); xennet_get_responses()
729 cons = queue->rx.rsp_cons; xennet_get_responses()
738 xennet_move_rx_slot(queue, skb, ref); xennet_get_responses()
759 gnttab_release_grant_reference(&queue->gref_rx_head, ref); xennet_get_responses()
774 rx = RING_GET_RESPONSE(&queue->rx, cons + slots); xennet_get_responses()
775 skb = xennet_get_rx_skb(queue, cons + slots); xennet_get_responses()
776 ref = xennet_get_rx_ref(queue, cons + slots); xennet_get_responses()
787 queue->rx.rsp_cons = cons + slots; xennet_get_responses()
821 static RING_IDX xennet_fill_frags(struct netfront_queue *queue, xennet_fill_frags() argument
826 RING_IDX cons = queue->rx.rsp_cons; xennet_fill_frags()
831 RING_GET_RESPONSE(&queue->rx, ++cons); xennet_fill_frags()
876 static int handle_incoming_queue(struct netfront_queue *queue, handle_incoming_queue() argument
879 struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats); handle_incoming_queue()
890 skb->protocol = eth_type_trans(skb, queue->info->netdev); handle_incoming_queue()
893 if (checksum_setup(queue->info->netdev, skb)) { handle_incoming_queue()
896 queue->info->netdev->stats.rx_errors++; handle_incoming_queue()
906 napi_gro_receive(&queue->napi, skb); handle_incoming_queue()
914 struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi); xennet_poll() local
915 struct net_device *dev = queue->info->netdev; xennet_poll()
927 spin_lock(&queue->rx_lock); xennet_poll()
933 rp = queue->rx.sring->rsp_prod; xennet_poll()
936 i = queue->rx.rsp_cons; xennet_poll()
939 memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx)); xennet_poll()
942 err = xennet_get_responses(queue, &rinfo, rp, &tmpq); xennet_poll()
949 i = queue->rx.rsp_cons; xennet_poll()
961 queue->rx.rsp_cons += skb_queue_len(&tmpq); xennet_poll()
975 i = xennet_fill_frags(queue, skb, &tmpq); xennet_poll()
984 queue->rx.rsp_cons = ++i; xennet_poll()
990 work_done -= handle_incoming_queue(queue, &rxq); xennet_poll()
992 xennet_alloc_rx_buffers(queue); xennet_poll()
999 RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do); xennet_poll()
1004 spin_unlock(&queue->rx_lock); xennet_poll()
1055 static void xennet_release_tx_bufs(struct netfront_queue *queue) xennet_release_tx_bufs() argument
1062 if (skb_entry_is_link(&queue->tx_skbs[i])) xennet_release_tx_bufs()
1065 skb = queue->tx_skbs[i].skb; xennet_release_tx_bufs()
1066 get_page(queue->grant_tx_page[i]); xennet_release_tx_bufs()
1067 gnttab_end_foreign_access(queue->grant_tx_ref[i], xennet_release_tx_bufs()
1069 (unsigned long)page_address(queue->grant_tx_page[i])); xennet_release_tx_bufs()
1070 queue->grant_tx_page[i] = NULL; xennet_release_tx_bufs()
1071 queue->grant_tx_ref[i] = GRANT_INVALID_REF; xennet_release_tx_bufs()
1072 add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i); xennet_release_tx_bufs()
1077 static void xennet_release_rx_bufs(struct netfront_queue *queue) xennet_release_rx_bufs() argument
1081 spin_lock_bh(&queue->rx_lock); xennet_release_rx_bufs()
1087 skb = queue->rx_skbs[id]; xennet_release_rx_bufs()
1091 ref = queue->grant_rx_ref[id]; xennet_release_rx_bufs()
1103 queue->grant_rx_ref[id] = GRANT_INVALID_REF; xennet_release_rx_bufs()
1108 spin_unlock_bh(&queue->rx_lock); xennet_release_rx_bufs()
1169 struct netfront_queue *queue = dev_id; xennet_tx_interrupt() local
1172 spin_lock_irqsave(&queue->tx_lock, flags); xennet_tx_interrupt()
1173 xennet_tx_buf_gc(queue); xennet_tx_interrupt()
1174 spin_unlock_irqrestore(&queue->tx_lock, flags); xennet_tx_interrupt()
1181 struct netfront_queue *queue = dev_id; xennet_rx_interrupt() local
1182 struct net_device *dev = queue->info->netdev; xennet_rx_interrupt()
1185 RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))) xennet_rx_interrupt()
1186 napi_schedule(&queue->napi); xennet_rx_interrupt()
1201 /* Poll each queue */ xennet_poll_controller()
1345 struct netfront_queue *queue = &info->queues[i]; xennet_disconnect_backend() local
1347 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) xennet_disconnect_backend()
1348 unbind_from_irqhandler(queue->tx_irq, queue); xennet_disconnect_backend()
1349 if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) { xennet_disconnect_backend()
1350 unbind_from_irqhandler(queue->tx_irq, queue); xennet_disconnect_backend()
1351 unbind_from_irqhandler(queue->rx_irq, queue); xennet_disconnect_backend()
1353 queue->tx_evtchn = queue->rx_evtchn = 0; xennet_disconnect_backend()
1354 queue->tx_irq = queue->rx_irq = 0; xennet_disconnect_backend()
1357 napi_synchronize(&queue->napi); xennet_disconnect_backend()
1359 xennet_release_tx_bufs(queue); xennet_disconnect_backend()
1360 xennet_release_rx_bufs(queue); xennet_disconnect_backend()
1361 gnttab_free_grant_references(queue->gref_tx_head); xennet_disconnect_backend()
1362 gnttab_free_grant_references(queue->gref_rx_head); xennet_disconnect_backend()
1365 xennet_end_access(queue->tx_ring_ref, queue->tx.sring); xennet_disconnect_backend()
1366 xennet_end_access(queue->rx_ring_ref, queue->rx.sring); xennet_disconnect_backend()
1368 queue->tx_ring_ref = GRANT_INVALID_REF; xennet_disconnect_backend()
1369 queue->rx_ring_ref = GRANT_INVALID_REF; xennet_disconnect_backend()
1370 queue->tx.sring = NULL; xennet_disconnect_backend()
1371 queue->rx.sring = NULL; xennet_disconnect_backend()
1413 static int setup_netfront_single(struct netfront_queue *queue) setup_netfront_single() argument
1417 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn); setup_netfront_single()
1421 err = bind_evtchn_to_irqhandler(queue->tx_evtchn, setup_netfront_single()
1423 0, queue->info->netdev->name, queue); setup_netfront_single()
1426 queue->rx_evtchn = queue->tx_evtchn; setup_netfront_single()
1427 queue->rx_irq = queue->tx_irq = err; setup_netfront_single()
1432 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn); setup_netfront_single()
1433 queue->tx_evtchn = 0; setup_netfront_single()
1438 static int setup_netfront_split(struct netfront_queue *queue) setup_netfront_split() argument
1442 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn); setup_netfront_split()
1445 err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn); setup_netfront_split()
1449 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), setup_netfront_split()
1450 "%s-tx", queue->name); setup_netfront_split()
1451 err = bind_evtchn_to_irqhandler(queue->tx_evtchn, setup_netfront_split()
1453 0, queue->tx_irq_name, queue); setup_netfront_split()
1456 queue->tx_irq = err; setup_netfront_split()
1458 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), setup_netfront_split()
1459 "%s-rx", queue->name); setup_netfront_split()
1460 err = bind_evtchn_to_irqhandler(queue->rx_evtchn, setup_netfront_split()
1462 0, queue->rx_irq_name, queue); setup_netfront_split()
1465 queue->rx_irq = err; setup_netfront_split()
1470 unbind_from_irqhandler(queue->tx_irq, queue); setup_netfront_split()
1471 queue->tx_irq = 0; setup_netfront_split()
1473 xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn); setup_netfront_split()
1474 queue->rx_evtchn = 0; setup_netfront_split()
1476 xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn); setup_netfront_split()
1477 queue->tx_evtchn = 0; setup_netfront_split()
1483 struct netfront_queue *queue, unsigned int feature_split_evtchn) setup_netfront()
1490 queue->tx_ring_ref = GRANT_INVALID_REF; setup_netfront()
1491 queue->rx_ring_ref = GRANT_INVALID_REF; setup_netfront()
1492 queue->rx.sring = NULL; setup_netfront()
1493 queue->tx.sring = NULL; setup_netfront()
1502 FRONT_RING_INIT(&queue->tx, txs, PAGE_SIZE); setup_netfront()
1507 queue->tx_ring_ref = gref; setup_netfront()
1516 FRONT_RING_INIT(&queue->rx, rxs, PAGE_SIZE); setup_netfront()
1521 queue->rx_ring_ref = gref; setup_netfront()
1524 err = setup_netfront_split(queue); setup_netfront()
1530 err = setup_netfront_single(queue); setup_netfront()
1541 gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0); setup_netfront()
1545 gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0); setup_netfront()
1554 * be run per-queue.
1556 static int xennet_init_queue(struct netfront_queue *queue) xennet_init_queue() argument
1561 spin_lock_init(&queue->tx_lock); xennet_init_queue()
1562 spin_lock_init(&queue->rx_lock); xennet_init_queue()
1564 init_timer(&queue->rx_refill_timer); xennet_init_queue()
1565 queue->rx_refill_timer.data = (unsigned long)queue; xennet_init_queue()
1566 queue->rx_refill_timer.function = rx_refill_timeout; xennet_init_queue()
1568 snprintf(queue->name, sizeof(queue->name), "%s-q%u", xennet_init_queue()
1569 queue->info->netdev->name, queue->id); xennet_init_queue()
1572 queue->tx_skb_freelist = 0; xennet_init_queue()
1574 skb_entry_set_link(&queue->tx_skbs[i], i+1); xennet_init_queue()
1575 queue->grant_tx_ref[i] = GRANT_INVALID_REF; xennet_init_queue()
1576 queue->grant_tx_page[i] = NULL; xennet_init_queue()
1581 queue->rx_skbs[i] = NULL; xennet_init_queue()
1582 queue->grant_rx_ref[i] = GRANT_INVALID_REF; xennet_init_queue()
1587 &queue->gref_tx_head) < 0) { xennet_init_queue()
1595 &queue->gref_rx_head) < 0) { xennet_init_queue()
1604 gnttab_free_grant_references(queue->gref_tx_head); xennet_init_queue()
1609 static int write_queue_xenstore_keys(struct netfront_queue *queue, write_queue_xenstore_keys() argument
1612 /* Write the queue-specific keys into XenStore in the traditional write_queue_xenstore_keys()
1613 * way for a single queue, or in a queue subkeys for multiple write_queue_xenstore_keys()
1616 struct xenbus_device *dev = queue->info->xbdev; write_queue_xenstore_keys()
1631 snprintf(path, pathsize, "%s/queue-%u", write_queue_xenstore_keys()
1632 dev->nodename, queue->id); write_queue_xenstore_keys()
1639 queue->tx_ring_ref); write_queue_xenstore_keys()
1646 queue->rx_ring_ref); write_queue_xenstore_keys()
1655 if (queue->tx_evtchn == queue->rx_evtchn) { write_queue_xenstore_keys()
1658 "event-channel", "%u", queue->tx_evtchn); write_queue_xenstore_keys()
1666 "event-channel-tx", "%u", queue->tx_evtchn); write_queue_xenstore_keys()
1673 "event-channel-rx", "%u", queue->rx_evtchn); write_queue_xenstore_keys()
1698 struct netfront_queue *queue = &info->queues[i]; xennet_destroy_queues() local
1701 napi_disable(&queue->napi); xennet_destroy_queues()
1702 del_timer_sync(&queue->rx_refill_timer); xennet_destroy_queues()
1703 netif_napi_del(&queue->napi); xennet_destroy_queues()
1726 struct netfront_queue *queue = &info->queues[i]; xennet_create_queues() local
1728 queue->id = i; xennet_create_queues()
1729 queue->info = info; xennet_create_queues()
1731 ret = xennet_init_queue(queue); xennet_create_queues()
1739 netif_napi_add(queue->info->netdev, &queue->napi, xennet_create_queues()
1742 napi_enable(&queue->napi); xennet_create_queues()
1766 struct netfront_queue *queue = NULL; talk_to_netback() local
1773 "multi-queue-max-queues", "%u", &max_queues); talk_to_netback()
1799 /* Create shared ring, alloc event channel -- for each queue */ talk_to_netback()
1801 queue = &info->queues[i]; talk_to_netback()
1802 err = setup_netfront(dev, queue, feature_split_evtchn); talk_to_netback()
1805 * queue on error, but we need to clean up talk_to_netback()
1832 err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues", talk_to_netback()
1835 message = "writing multi-queue-num-queues"; talk_to_netback()
1839 /* Write the keys for each queue */ talk_to_netback()
1841 queue = &info->queues[i]; talk_to_netback()
1842 err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */ talk_to_netback()
1848 /* The remaining keys are not queue-specific */ talk_to_netback()
1919 struct netfront_queue *queue = NULL; xennet_connect() local
1951 queue = &np->queues[j]; xennet_connect()
1953 notify_remote_via_irq(queue->tx_irq); xennet_connect()
1954 if (queue->tx_irq != queue->rx_irq) xennet_connect()
1955 notify_remote_via_irq(queue->rx_irq); xennet_connect()
1957 spin_lock_irq(&queue->tx_lock); xennet_connect()
1958 xennet_tx_buf_gc(queue); xennet_connect()
1959 spin_unlock_irq(&queue->tx_lock); xennet_connect()
1961 spin_lock_bh(&queue->rx_lock); xennet_connect()
1962 xennet_alloc_rx_buffers(queue); xennet_connect()
1963 spin_unlock_bh(&queue->rx_lock); xennet_connect()
419 xennet_make_one_txreq( struct netfront_queue *queue, struct sk_buff *skb, struct page *page, unsigned int offset, unsigned int len) xennet_make_one_txreq() argument
450 xennet_make_txreqs( struct netfront_queue *queue, struct xen_netif_tx_request *tx, struct sk_buff *skb, struct page *page, unsigned int offset, unsigned int len) xennet_make_txreqs() argument
1482 setup_netfront(struct xenbus_device *dev, struct netfront_queue *queue, unsigned int feature_split_evtchn) setup_netfront() argument
H A Deql.c140 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave);
147 spin_lock(&eql->queue.lock); eql_timer()
148 head = &eql->queue.all_slaves; list_for_each_safe()
157 eql_kill_one_slave(&eql->queue, slave); list_for_each_safe()
161 spin_unlock(&eql->queue.lock);
186 spin_lock_init(&eql->queue.lock); eql_setup()
187 INIT_LIST_HEAD(&eql->queue.all_slaves); eql_setup()
188 eql->queue.master_dev = dev; eql_setup()
213 BUG_ON(!list_empty(&eql->queue.all_slaves)); eql_open()
223 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave) eql_kill_one_slave() argument
226 queue->num_slaves--; eql_kill_one_slave()
232 static void eql_kill_slave_queue(slave_queue_t *queue) eql_kill_slave_queue() argument
236 spin_lock_bh(&queue->lock); eql_kill_slave_queue()
238 head = &queue->all_slaves; list_for_each_safe()
242 eql_kill_one_slave(queue, s); list_for_each_safe()
245 spin_unlock_bh(&queue->lock);
259 eql_kill_slave_queue(&eql->queue); eql_close()
297 /* queue->lock must be held */ __eql_schedule_slaves()
298 static slave_t *__eql_schedule_slaves(slave_queue_t *queue) __eql_schedule_slaves() argument
307 head = &queue->all_slaves; list_for_each_safe()
327 eql_kill_one_slave(queue, slave); list_for_each_safe()
338 spin_lock(&eql->queue.lock); eql_slave_xmit()
340 slave = __eql_schedule_slaves(&eql->queue); eql_slave_xmit()
354 spin_unlock(&eql->queue.lock); eql_slave_xmit()
363 /* queue->lock must be held */ __eql_find_slave_dev()
364 static slave_t *__eql_find_slave_dev(slave_queue_t *queue, struct net_device *dev) __eql_find_slave_dev() argument
368 head = &queue->all_slaves; list_for_each()
379 static inline int eql_is_full(slave_queue_t *queue) eql_is_full() argument
381 equalizer_t *eql = netdev_priv(queue->master_dev); eql_is_full()
383 if (queue->num_slaves >= eql->max_slaves) eql_is_full()
388 /* queue->lock must be held */ __eql_insert_slave()
389 static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave) __eql_insert_slave() argument
391 if (!eql_is_full(queue)) { __eql_insert_slave()
394 duplicate_slave = __eql_find_slave_dev(queue, slave->dev); __eql_insert_slave()
396 eql_kill_one_slave(queue, duplicate_slave); __eql_insert_slave()
399 list_add(&slave->list, &queue->all_slaves); __eql_insert_slave()
400 queue->num_slaves++; __eql_insert_slave()
437 spin_lock_bh(&eql->queue.lock); eql_enslave()
438 ret = __eql_insert_slave(&eql->queue, s); eql_enslave()
442 spin_unlock_bh(&eql->queue.lock); eql_enslave()
466 spin_lock_bh(&eql->queue.lock); eql_emancipate()
468 slave_t *slave = __eql_find_slave_dev(&eql->queue, slave_dev); eql_emancipate()
470 eql_kill_one_slave(&eql->queue, slave); eql_emancipate()
474 spin_unlock_bh(&eql->queue.lock); eql_emancipate()
496 spin_lock_bh(&eql->queue.lock); eql_g_slave_cfg()
498 slave = __eql_find_slave_dev(&eql->queue, slave_dev); eql_g_slave_cfg()
504 spin_unlock_bh(&eql->queue.lock); eql_g_slave_cfg()
530 spin_lock_bh(&eql->queue.lock); eql_s_slave_cfg()
532 slave = __eql_find_slave_dev(&eql->queue, slave_dev); eql_s_slave_cfg()
540 spin_unlock_bh(&eql->queue.lock); eql_s_slave_cfg()
/linux-4.1.27/drivers/misc/genwqe/
H A Dcard_ddcb.c22 * Device Driver Control Block (DDCB) queue support. Definition of
23 * interrupt handlers for queue support as well as triggering the
49 * Situation (1): Empty queue
91 static int queue_empty(struct ddcb_queue *queue) queue_empty() argument
93 return queue->ddcb_next == queue->ddcb_act; queue_empty()
96 static int queue_enqueued_ddcbs(struct ddcb_queue *queue) queue_enqueued_ddcbs() argument
98 if (queue->ddcb_next >= queue->ddcb_act) queue_enqueued_ddcbs()
99 return queue->ddcb_next - queue->ddcb_act; queue_enqueued_ddcbs()
101 return queue->ddcb_max - (queue->ddcb_act - queue->ddcb_next); queue_enqueued_ddcbs()
104 static int queue_free_ddcbs(struct ddcb_queue *queue) queue_free_ddcbs() argument
106 int free_ddcbs = queue->ddcb_max - queue_enqueued_ddcbs(queue) - 1; queue_free_ddcbs()
115 * Use of the PRIV field in the DDCB for queue debugging:
172 static void print_ddcb_info(struct genwqe_dev *cd, struct ddcb_queue *queue) print_ddcb_info() argument
183 cd->card_idx, queue->ddcb_act, queue->ddcb_next); print_ddcb_info()
185 pddcb = queue->ddcb_vaddr; print_ddcb_info()
186 for (i = 0; i < queue->ddcb_max; i++) { print_ddcb_info()
189 i == queue->ddcb_act ? '>' : ' ', print_ddcb_info()
246 * This function will also return true if the state of the queue is
259 * @queue: queue this operation should be done on
262 * Start execution of DDCB by tapping or append to queue via NEXT
269 * 2 if DDCB queue is tapped via register/simulation
274 static int enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue, enqueue_ddcb() argument
291 prev_no = (ddcb_no == 0) ? queue->ddcb_max - 1 : ddcb_no - 1; enqueue_ddcb()
292 prev_ddcb = &queue->ddcb_vaddr[prev_no]; enqueue_ddcb()
313 return RET_DDCB_APPENDED; /* appended to queue */ enqueue_ddcb()
321 __genwqe_writeq(cd, queue->IO_QUEUE_OFFSET, num); /* start queue */ enqueue_ddcb()
339 struct ddcb_queue *queue = req->queue; copy_ddcb_results() local
340 struct ddcb *pddcb = &queue->ddcb_vaddr[req->num]; copy_ddcb_results()
355 queue->ddcb_max - 1 : ddcb_no - 1; copy_ddcb_results()
356 struct ddcb *prev_pddcb = &queue->ddcb_vaddr[prev_no]; copy_ddcb_results()
366 * genwqe_check_ddcb_queue() - Checks DDCB queue for completed work equests.
372 struct ddcb_queue *queue) genwqe_check_ddcb_queue()
378 spin_lock_irqsave(&queue->ddcb_lock, flags); genwqe_check_ddcb_queue()
381 while (!queue_empty(queue) && (ddcbs_finished < queue->ddcb_max)) { genwqe_check_ddcb_queue()
387 pddcb = &queue->ddcb_vaddr[queue->ddcb_act]; genwqe_check_ddcb_queue()
396 req = queue->ddcb_req[queue->ddcb_act]; genwqe_check_ddcb_queue()
407 * In case of seeing the queue in inconsistent state genwqe_check_ddcb_queue()
408 * we read the errcnts and the queue status to provide genwqe_check_ddcb_queue()
414 u64 ddcb_offs = (u64)pddcb - (u64)queue->ddcb_vaddr; genwqe_check_ddcb_queue()
416 errcnts = __genwqe_readq(cd, queue->IO_QUEUE_ERRCNTS); genwqe_check_ddcb_queue()
417 status = __genwqe_readq(cd, queue->IO_QUEUE_STATUS); genwqe_check_ddcb_queue()
423 queue->ddcb_daddr + ddcb_offs); genwqe_check_ddcb_queue()
426 copy_ddcb_results(req, queue->ddcb_act); genwqe_check_ddcb_queue()
427 queue->ddcb_req[queue->ddcb_act] = NULL; /* take from queue */ genwqe_check_ddcb_queue()
448 queue->ddcbs_completed++; genwqe_check_ddcb_queue()
449 queue->ddcbs_in_flight--; genwqe_check_ddcb_queue()
452 processes on the busy queue */ genwqe_check_ddcb_queue()
453 wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]); genwqe_check_ddcb_queue()
454 wake_up_interruptible(&queue->busy_waitq); genwqe_check_ddcb_queue()
457 queue->ddcb_act = (queue->ddcb_act + 1) % queue->ddcb_max; genwqe_check_ddcb_queue()
462 spin_unlock_irqrestore(&queue->ddcb_lock, flags); genwqe_check_ddcb_queue()
481 * queue.
487 struct ddcb_queue *queue; __genwqe_wait_ddcb() local
493 queue = req->queue; __genwqe_wait_ddcb()
494 if (queue == NULL) __genwqe_wait_ddcb()
498 if (ddcb_no >= queue->ddcb_max) __genwqe_wait_ddcb()
501 rc = wait_event_interruptible_timeout(queue->ddcb_waitqs[ddcb_no], __genwqe_wait_ddcb()
512 struct ddcb_queue *queue = req->queue; __genwqe_wait_ddcb() local
520 genwqe_check_ddcb_queue(cd, req->queue); __genwqe_wait_ddcb()
530 __genwqe_readq(cd, queue->IO_QUEUE_STATUS)); __genwqe_wait_ddcb()
532 pddcb = &queue->ddcb_vaddr[req->num]; __genwqe_wait_ddcb()
535 print_ddcb_info(cd, req->queue); __genwqe_wait_ddcb()
572 struct ddcb_queue *queue, get_next_ddcb()
578 if (queue_free_ddcbs(queue) == 0) /* queue is full */ get_next_ddcb()
582 pddcb = &queue->ddcb_vaddr[queue->ddcb_next]; get_next_ddcb()
589 *num = queue->ddcb_next; /* internal DDCB number */ get_next_ddcb()
590 queue->ddcb_next = (queue->ddcb_next + 1) % queue->ddcb_max; get_next_ddcb()
605 pddcb->seqnum_16 = cpu_to_be16(queue->ddcb_seq++); get_next_ddcb()
629 struct ddcb_queue *queue = req->queue; __genwqe_purge_ddcb() local
642 pddcb = &queue->ddcb_vaddr[req->num]; __genwqe_purge_ddcb()
646 spin_lock_irqsave(&queue->ddcb_lock, flags); __genwqe_purge_ddcb()
669 spin_unlock_irqrestore(&queue->ddcb_lock, flags); __genwqe_purge_ddcb()
685 queue->ddcbs_in_flight--; __genwqe_purge_ddcb()
686 queue->ddcb_req[req->num] = NULL; /* delete from array */ __genwqe_purge_ddcb()
693 * DDCB in the queue. To do that, we must update __genwqe_purge_ddcb()
701 (queue->ddcb_act == req->num)) { __genwqe_purge_ddcb()
702 queue->ddcb_act = ((queue->ddcb_act + 1) % __genwqe_purge_ddcb()
703 queue->ddcb_max); __genwqe_purge_ddcb()
706 spin_unlock_irqrestore(&queue->ddcb_lock, flags); __genwqe_purge_ddcb()
711 * If the card is dead and the queue is forced to stop, we __genwqe_purge_ddcb()
712 * might see this in the queue status register. __genwqe_purge_ddcb()
714 queue_status = __genwqe_readq(cd, queue->IO_QUEUE_STATUS); __genwqe_purge_ddcb()
724 print_ddcb_info(cd, req->queue); __genwqe_purge_ddcb()
763 struct ddcb_queue *queue; __genwqe_enqueue_ddcb() local
776 queue = req->queue = &cd->queue; __genwqe_enqueue_ddcb()
782 genwqe_check_ddcb_queue(cd, queue); __genwqe_enqueue_ddcb()
789 spin_lock_irqsave(&queue->ddcb_lock, flags); __genwqe_enqueue_ddcb()
791 pddcb = get_next_ddcb(cd, queue, &req->num); /* get ptr and num */ __genwqe_enqueue_ddcb()
795 spin_unlock_irqrestore(&queue->ddcb_lock, flags); __genwqe_enqueue_ddcb()
798 queue->return_on_busy++; __genwqe_enqueue_ddcb()
802 queue->wait_on_busy++; __genwqe_enqueue_ddcb()
803 rc = wait_event_interruptible(queue->busy_waitq, __genwqe_enqueue_ddcb()
804 queue_free_ddcbs(queue) != 0); __genwqe_enqueue_ddcb()
813 if (queue->ddcb_req[req->num] != NULL) { __genwqe_enqueue_ddcb()
814 spin_unlock_irqrestore(&queue->ddcb_lock, flags); __genwqe_enqueue_ddcb()
822 queue->ddcb_req[req->num] = req; __genwqe_enqueue_ddcb()
830 * stop the queue in those cases for this command. XDIR = 1 __genwqe_enqueue_ddcb()
896 enqueue_ddcb(cd, queue, pddcb, req->num); __genwqe_enqueue_ddcb()
897 queue->ddcbs_in_flight++; __genwqe_enqueue_ddcb()
899 if (queue->ddcbs_in_flight > queue->ddcbs_max_in_flight) __genwqe_enqueue_ddcb()
900 queue->ddcbs_max_in_flight = queue->ddcbs_in_flight; __genwqe_enqueue_ddcb()
903 spin_unlock_irqrestore(&queue->ddcb_lock, flags); __genwqe_enqueue_ddcb()
978 * We use this as condition for our wait-queue code.
984 struct ddcb_queue *queue = &cd->queue; genwqe_next_ddcb_ready() local
986 spin_lock_irqsave(&queue->ddcb_lock, flags); genwqe_next_ddcb_ready()
988 if (queue_empty(queue)) { /* emtpy queue */ genwqe_next_ddcb_ready()
989 spin_unlock_irqrestore(&queue->ddcb_lock, flags); genwqe_next_ddcb_ready()
993 pddcb = &queue->ddcb_vaddr[queue->ddcb_act]; genwqe_next_ddcb_ready()
995 spin_unlock_irqrestore(&queue->ddcb_lock, flags); genwqe_next_ddcb_ready()
999 spin_unlock_irqrestore(&queue->ddcb_lock, flags); genwqe_next_ddcb_ready()
1007 * queue. This is needed for statistics as well as conditon if we want
1014 struct ddcb_queue *queue = &cd->queue; genwqe_ddcbs_in_flight() local
1016 spin_lock_irqsave(&queue->ddcb_lock, flags); genwqe_ddcbs_in_flight()
1017 ddcbs_in_flight += queue->ddcbs_in_flight; genwqe_ddcbs_in_flight()
1018 spin_unlock_irqrestore(&queue->ddcb_lock, flags); genwqe_ddcbs_in_flight()
1023 static int setup_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue) setup_ddcb_queue() argument
1036 queue->ddcbs_in_flight = 0; /* statistics */ setup_ddcb_queue()
1037 queue->ddcbs_max_in_flight = 0; setup_ddcb_queue()
1038 queue->ddcbs_completed = 0; setup_ddcb_queue()
1039 queue->return_on_busy = 0; setup_ddcb_queue()
1040 queue->wait_on_busy = 0; setup_ddcb_queue()
1042 queue->ddcb_seq = 0x100; /* start sequence number */ setup_ddcb_queue()
1043 queue->ddcb_max = genwqe_ddcb_max; /* module parameter */ setup_ddcb_queue()
1044 queue->ddcb_vaddr = __genwqe_alloc_consistent(cd, queue_size, setup_ddcb_queue()
1045 &queue->ddcb_daddr); setup_ddcb_queue()
1046 if (queue->ddcb_vaddr == NULL) { setup_ddcb_queue()
1051 memset(queue->ddcb_vaddr, 0, queue_size); setup_ddcb_queue()
1053 queue->ddcb_req = kzalloc(sizeof(struct ddcb_requ *) * setup_ddcb_queue()
1054 queue->ddcb_max, GFP_KERNEL); setup_ddcb_queue()
1055 if (!queue->ddcb_req) { setup_ddcb_queue()
1060 queue->ddcb_waitqs = kzalloc(sizeof(wait_queue_head_t) * setup_ddcb_queue()
1061 queue->ddcb_max, GFP_KERNEL); setup_ddcb_queue()
1062 if (!queue->ddcb_waitqs) { setup_ddcb_queue()
1067 for (i = 0; i < queue->ddcb_max; i++) { setup_ddcb_queue()
1068 pddcb = &queue->ddcb_vaddr[i]; /* DDCBs */ setup_ddcb_queue()
1072 queue->ddcb_req[i] = NULL; /* requests */ setup_ddcb_queue()
1073 init_waitqueue_head(&queue->ddcb_waitqs[i]); /* waitqueues */ setup_ddcb_queue()
1076 queue->ddcb_act = 0; setup_ddcb_queue()
1077 queue->ddcb_next = 0; /* queue is empty */ setup_ddcb_queue()
1079 spin_lock_init(&queue->ddcb_lock); setup_ddcb_queue()
1080 init_waitqueue_head(&queue->busy_waitq); setup_ddcb_queue()
1082 val64 = ((u64)(queue->ddcb_max - 1) << 8); /* lastptr */ setup_ddcb_queue()
1083 __genwqe_writeq(cd, queue->IO_QUEUE_CONFIG, 0x07); /* iCRC/vCRC */ setup_ddcb_queue()
1084 __genwqe_writeq(cd, queue->IO_QUEUE_SEGMENT, queue->ddcb_daddr); setup_ddcb_queue()
1085 __genwqe_writeq(cd, queue->IO_QUEUE_INITSQN, queue->ddcb_seq); setup_ddcb_queue()
1086 __genwqe_writeq(cd, queue->IO_QUEUE_WRAP, val64); setup_ddcb_queue()
1090 kfree(queue->ddcb_req); setup_ddcb_queue()
1091 queue->ddcb_req = NULL; setup_ddcb_queue()
1093 __genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr, setup_ddcb_queue()
1094 queue->ddcb_daddr); setup_ddcb_queue()
1095 queue->ddcb_vaddr = NULL; setup_ddcb_queue()
1096 queue->ddcb_daddr = 0ull; setup_ddcb_queue()
1101 static int ddcb_queue_initialized(struct ddcb_queue *queue) ddcb_queue_initialized() argument
1103 return queue->ddcb_vaddr != NULL; ddcb_queue_initialized()
1106 static void free_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue) free_ddcb_queue() argument
1110 queue_size = roundup(queue->ddcb_max * sizeof(struct ddcb), PAGE_SIZE); free_ddcb_queue()
1112 kfree(queue->ddcb_req); free_ddcb_queue()
1113 queue->ddcb_req = NULL; free_ddcb_queue()
1115 if (queue->ddcb_vaddr) { free_ddcb_queue()
1116 __genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr, free_ddcb_queue()
1117 queue->ddcb_daddr); free_ddcb_queue()
1118 queue->ddcb_vaddr = NULL; free_ddcb_queue()
1119 queue->ddcb_daddr = 0ull; free_ddcb_queue()
1130 * In case of fatal FIR error the queue is stopped, such that genwqe_pf_isr()
1137 * Checking for errors before kicking the queue might be genwqe_pf_isr()
1183 * genwqe_card_thread() - Work thread for the DDCB queue
1197 genwqe_check_ddcb_queue(cd, &cd->queue); genwqe_card_thread()
1223 * genwqe_setup_service_layer() - Setup DDCB queue
1233 struct ddcb_queue *queue; genwqe_setup_service_layer() local
1246 queue = &cd->queue; genwqe_setup_service_layer()
1247 queue->IO_QUEUE_CONFIG = IO_SLC_QUEUE_CONFIG; genwqe_setup_service_layer()
1248 queue->IO_QUEUE_STATUS = IO_SLC_QUEUE_STATUS; genwqe_setup_service_layer()
1249 queue->IO_QUEUE_SEGMENT = IO_SLC_QUEUE_SEGMENT; genwqe_setup_service_layer()
1250 queue->IO_QUEUE_INITSQN = IO_SLC_QUEUE_INITSQN; genwqe_setup_service_layer()
1251 queue->IO_QUEUE_OFFSET = IO_SLC_QUEUE_OFFSET; genwqe_setup_service_layer()
1252 queue->IO_QUEUE_WRAP = IO_SLC_QUEUE_WRAP; genwqe_setup_service_layer()
1253 queue->IO_QUEUE_WTIME = IO_SLC_QUEUE_WTIME; genwqe_setup_service_layer()
1254 queue->IO_QUEUE_ERRCNTS = IO_SLC_QUEUE_ERRCNTS; genwqe_setup_service_layer()
1255 queue->IO_QUEUE_LRW = IO_SLC_QUEUE_LRW; genwqe_setup_service_layer()
1257 rc = setup_ddcb_queue(cd, queue); genwqe_setup_service_layer()
1305 free_ddcb_queue(cd, queue); genwqe_setup_service_layer()
1321 struct ddcb_queue *queue = &cd->queue; queue_wake_up_all() local
1323 spin_lock_irqsave(&queue->ddcb_lock, flags); queue_wake_up_all()
1325 for (i = 0; i < queue->ddcb_max; i++) queue_wake_up_all()
1326 wake_up_interruptible(&queue->ddcb_waitqs[queue->ddcb_act]); queue_wake_up_all()
1328 wake_up_interruptible(&queue->busy_waitq); queue_wake_up_all()
1329 spin_unlock_irqrestore(&queue->ddcb_lock, flags); queue_wake_up_all()
1347 struct ddcb_queue *queue = &cd->queue; genwqe_finish_queue() local
1349 if (!ddcb_queue_initialized(queue)) genwqe_finish_queue()
1356 /* Wake up all requests in the DDCB queue such that they genwqe_finish_queue()
1368 " DEBUG [%d/%d] waiting for queue to get empty: %d requests!\n", genwqe_finish_queue()
1373 * 16 DDCB queues, each queue has e.g. 32 entries, genwqe_finish_queue()
1381 dev_err(&pci_dev->dev, " [%s] err: queue is not empty!!\n", genwqe_finish_queue()
1389 * genwqe_release_service_layer() - Shutdown DDCB queue
1398 if (!ddcb_queue_initialized(&cd->queue)) genwqe_release_service_layer()
1409 free_ddcb_queue(cd, &cd->queue); genwqe_release_service_layer()
371 genwqe_check_ddcb_queue(struct genwqe_dev *cd, struct ddcb_queue *queue) genwqe_check_ddcb_queue() argument
571 get_next_ddcb(struct genwqe_dev *cd, struct ddcb_queue *queue, int *num) get_next_ddcb() argument
H A Dcard_debugfs.c236 struct ddcb_queue *queue; genwqe_ddcb_info_show() local
239 queue = &cd->queue; genwqe_ddcb_info_show()
250 queue->ddcb_max, (long long)queue->ddcb_daddr, genwqe_ddcb_info_show()
251 (long long)queue->ddcb_daddr + genwqe_ddcb_info_show()
252 (queue->ddcb_max * DDCB_LENGTH), genwqe_ddcb_info_show()
253 (long long)queue->ddcb_vaddr, queue->ddcbs_in_flight, genwqe_ddcb_info_show()
254 queue->ddcbs_max_in_flight, queue->ddcbs_completed, genwqe_ddcb_info_show()
255 queue->return_on_busy, queue->wait_on_busy, genwqe_ddcb_info_show()
268 queue->IO_QUEUE_CONFIG, genwqe_ddcb_info_show()
269 __genwqe_readq(cd, queue->IO_QUEUE_CONFIG), genwqe_ddcb_info_show()
270 queue->IO_QUEUE_STATUS, genwqe_ddcb_info_show()
271 __genwqe_readq(cd, queue->IO_QUEUE_STATUS), genwqe_ddcb_info_show()
272 queue->IO_QUEUE_SEGMENT, genwqe_ddcb_info_show()
273 __genwqe_readq(cd, queue->IO_QUEUE_SEGMENT), genwqe_ddcb_info_show()
274 queue->IO_QUEUE_INITSQN, genwqe_ddcb_info_show()
275 __genwqe_readq(cd, queue->IO_QUEUE_INITSQN), genwqe_ddcb_info_show()
276 queue->IO_QUEUE_WRAP, genwqe_ddcb_info_show()
277 __genwqe_readq(cd, queue->IO_QUEUE_WRAP), genwqe_ddcb_info_show()
278 queue->IO_QUEUE_OFFSET, genwqe_ddcb_info_show()
279 __genwqe_readq(cd, queue->IO_QUEUE_OFFSET), genwqe_ddcb_info_show()
280 queue->IO_QUEUE_WTIME, genwqe_ddcb_info_show()
281 __genwqe_readq(cd, queue->IO_QUEUE_WTIME), genwqe_ddcb_info_show()
282 queue->IO_QUEUE_ERRCNTS, genwqe_ddcb_info_show()
283 __genwqe_readq(cd, queue->IO_QUEUE_ERRCNTS), genwqe_ddcb_info_show()
284 queue->IO_QUEUE_LRW, genwqe_ddcb_info_show()
285 __genwqe_readq(cd, queue->IO_QUEUE_LRW)); genwqe_ddcb_info_show()
288 queue->ddcb_act, queue->ddcb_next); genwqe_ddcb_info_show()
290 pddcb = queue->ddcb_vaddr; genwqe_ddcb_info_show()
291 for (i = 0; i < queue->ddcb_max; i++) { genwqe_ddcb_info_show()
/linux-4.1.27/drivers/net/ethernet/ibm/ehea/
H A Dehea_qmr.h210 static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset) hw_qeit_calc() argument
214 if (q_offset >= queue->queue_length) hw_qeit_calc()
215 q_offset -= queue->queue_length; hw_qeit_calc()
216 current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT]; hw_qeit_calc()
220 static inline void *hw_qeit_get(struct hw_queue *queue) hw_qeit_get() argument
222 return hw_qeit_calc(queue, queue->current_q_offset); hw_qeit_get()
225 static inline void hw_qeit_inc(struct hw_queue *queue) hw_qeit_inc() argument
227 queue->current_q_offset += queue->qe_size; hw_qeit_inc()
228 if (queue->current_q_offset >= queue->queue_length) { hw_qeit_inc()
229 queue->current_q_offset = 0; hw_qeit_inc()
231 queue->toggle_state = (~queue->toggle_state) & 1; hw_qeit_inc()
235 static inline void *hw_qeit_get_inc(struct hw_queue *queue) hw_qeit_get_inc() argument
237 void *retvalue = hw_qeit_get(queue); hw_qeit_get_inc()
238 hw_qeit_inc(queue); hw_qeit_get_inc()
242 static inline void *hw_qeit_get_inc_valid(struct hw_queue *queue) hw_qeit_get_inc_valid() argument
244 struct ehea_cqe *retvalue = hw_qeit_get(queue); hw_qeit_get_inc_valid()
248 if ((valid >> 7) == (queue->toggle_state & 1)) { hw_qeit_get_inc_valid()
250 hw_qeit_inc(queue); hw_qeit_get_inc_valid()
251 pref = hw_qeit_calc(queue, queue->current_q_offset); hw_qeit_get_inc_valid()
259 static inline void *hw_qeit_get_valid(struct hw_queue *queue) hw_qeit_get_valid() argument
261 struct ehea_cqe *retvalue = hw_qeit_get(queue); hw_qeit_get_valid()
265 pref = hw_qeit_calc(queue, queue->current_q_offset); hw_qeit_get_valid()
270 if (!((valid >> 7) == (queue->toggle_state & 1))) hw_qeit_get_valid()
275 static inline void *hw_qeit_reset(struct hw_queue *queue) hw_qeit_reset() argument
277 queue->current_q_offset = 0; hw_qeit_reset()
278 return hw_qeit_get(queue); hw_qeit_reset()
281 static inline void *hw_qeit_eq_get_inc(struct hw_queue *queue) hw_qeit_eq_get_inc() argument
283 u64 last_entry_in_q = queue->queue_length - queue->qe_size; hw_qeit_eq_get_inc()
286 retvalue = hw_qeit_get(queue); hw_qeit_eq_get_inc()
287 queue->current_q_offset += queue->qe_size; hw_qeit_eq_get_inc()
288 if (queue->current_q_offset > last_entry_in_q) { hw_qeit_eq_get_inc()
289 queue->current_q_offset = 0; hw_qeit_eq_get_inc()
290 queue->toggle_state = (~queue->toggle_state) & 1; hw_qeit_eq_get_inc()
295 static inline void *hw_eqit_eq_get_inc_valid(struct hw_queue *queue) hw_eqit_eq_get_inc_valid() argument
297 void *retvalue = hw_qeit_get(queue); hw_eqit_eq_get_inc_valid()
299 if ((qe >> 7) == (queue->toggle_state & 1)) hw_eqit_eq_get_inc_valid()
300 hw_qeit_eq_get_inc(queue); hw_eqit_eq_get_inc_valid()
309 struct hw_queue *queue; ehea_get_next_rwqe() local
312 queue = &qp->hw_rqueue1; ehea_get_next_rwqe()
314 queue = &qp->hw_rqueue2; ehea_get_next_rwqe()
316 queue = &qp->hw_rqueue3; ehea_get_next_rwqe()
318 return hw_qeit_get_inc(queue); ehea_get_next_rwqe()
324 struct hw_queue *queue = &my_qp->hw_squeue; ehea_get_swqe() local
327 *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_SQ); ehea_get_swqe()
341 struct hw_queue *queue = &qp->hw_rqueue1; ehea_poll_rq1() local
343 *wqe_index = (queue->current_q_offset) >> (7 + EHEA_SG_RQ1); ehea_poll_rq1()
344 return hw_qeit_get_valid(queue); ehea_poll_rq1()
366 EHEA_EQ = 0, /* event queue */
367 EHEA_NEQ /* notification event queue */
H A Dehea_qmr.c39 static void *hw_qpageit_get_inc(struct hw_queue *queue) hw_qpageit_get_inc() argument
41 void *retvalue = hw_qeit_get(queue); hw_qpageit_get_inc()
43 queue->current_q_offset += queue->pagesize; hw_qpageit_get_inc()
44 if (queue->current_q_offset > queue->queue_length) { hw_qpageit_get_inc()
45 queue->current_q_offset -= queue->pagesize; hw_qpageit_get_inc()
54 static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages, hw_queue_ctor() argument
66 queue->queue_length = nr_of_pages * pagesize; hw_queue_ctor()
67 queue->queue_pages = kmalloc_array(nr_of_pages, sizeof(void *), hw_queue_ctor()
69 if (!queue->queue_pages) hw_queue_ctor()
73 * allocate pages for queue: hw_queue_ctor()
75 * inner loop divides a kernel page into smaller hea queue pages hw_queue_ctor()
83 (queue->queue_pages)[i] = (struct ehea_page *)kpage; hw_queue_ctor()
89 queue->current_q_offset = 0; hw_queue_ctor()
90 queue->qe_size = qe_size; hw_queue_ctor()
91 queue->pagesize = pagesize; hw_queue_ctor()
92 queue->toggle_state = 1; hw_queue_ctor()
97 if (!(queue->queue_pages)[i]) hw_queue_ctor()
99 free_page((unsigned long)(queue->queue_pages)[i]); hw_queue_ctor()
104 static void hw_queue_dtor(struct hw_queue *queue) hw_queue_dtor() argument
109 if (!queue || !queue->queue_pages) hw_queue_dtor()
112 pages_per_kpage = PAGE_SIZE / queue->pagesize; hw_queue_dtor()
114 nr_pages = queue->queue_length / queue->pagesize; hw_queue_dtor()
117 free_page((unsigned long)(queue->queue_pages)[i]); hw_queue_dtor()
119 kfree(queue->queue_pages); hw_queue_dtor()
376 /* allocates memory for a queue and registers pages in phyp */ ehea_qp_alloc_register()
/linux-4.1.27/net/sunrpc/
H A Dsched.c61 * queue->lock and bh_disabled in order to avoid races within
65 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) __rpc_disable_timer() argument
72 if (list_empty(&queue->timer_list.list)) __rpc_disable_timer()
73 del_timer(&queue->timer_list.timer); __rpc_disable_timer()
77 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) rpc_set_queue_timer() argument
79 queue->timer_list.expires = expires; rpc_set_queue_timer()
80 mod_timer(&queue->timer_list.timer, expires); rpc_set_queue_timer()
87 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task) __rpc_add_timer() argument
96 if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires)) __rpc_add_timer()
97 rpc_set_queue_timer(queue, task->u.tk_wait.expires); __rpc_add_timer()
98 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); __rpc_add_timer()
101 static void rpc_rotate_queue_owner(struct rpc_wait_queue *queue) rpc_rotate_queue_owner() argument
103 struct list_head *q = &queue->tasks[queue->priority]; rpc_rotate_queue_owner()
108 if (task->tk_owner == queue->owner) rpc_rotate_queue_owner()
113 static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority) rpc_set_waitqueue_priority() argument
115 if (queue->priority != priority) { rpc_set_waitqueue_priority()
117 rpc_rotate_queue_owner(queue); rpc_set_waitqueue_priority()
118 queue->priority = priority; rpc_set_waitqueue_priority()
122 static void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid) rpc_set_waitqueue_owner() argument
124 queue->owner = pid; rpc_set_waitqueue_owner()
125 queue->nr = RPC_BATCH_COUNT; rpc_set_waitqueue_owner()
128 static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue) rpc_reset_waitqueue_priority() argument
130 rpc_set_waitqueue_priority(queue, queue->maxpriority); rpc_reset_waitqueue_priority()
131 rpc_set_waitqueue_owner(queue, 0); rpc_reset_waitqueue_priority()
135 * Add new request to a priority queue.
137 static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, __rpc_add_wait_queue_priority() argument
145 if (unlikely(queue_priority > queue->maxpriority)) __rpc_add_wait_queue_priority()
146 queue_priority = queue->maxpriority; __rpc_add_wait_queue_priority()
147 if (queue_priority > queue->priority) __rpc_add_wait_queue_priority()
148 rpc_set_waitqueue_priority(queue, queue_priority); __rpc_add_wait_queue_priority()
149 q = &queue->tasks[queue_priority]; list_for_each_entry()
160 * Add new request to wait queue.
162 * Swapper tasks always get inserted at the head of the queue.
165 * Everyone else gets appended to the queue to ensure proper FIFO behavior.
167 static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, __rpc_add_wait_queue() argument
175 if (RPC_IS_PRIORITY(queue)) __rpc_add_wait_queue()
176 __rpc_add_wait_queue_priority(queue, task, queue_priority); __rpc_add_wait_queue()
178 list_add(&task->u.tk_wait.list, &queue->tasks[0]); __rpc_add_wait_queue()
180 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); __rpc_add_wait_queue()
181 task->tk_waitqueue = queue; __rpc_add_wait_queue()
182 queue->qlen++; __rpc_add_wait_queue()
187 dprintk("RPC: %5u added to queue %p \"%s\"\n", __rpc_add_wait_queue()
188 task->tk_pid, queue, rpc_qname(queue)); __rpc_add_wait_queue()
192 * Remove request from a priority queue.
206 * Remove request from queue.
209 static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task) __rpc_remove_wait_queue() argument
211 __rpc_disable_timer(queue, task); __rpc_remove_wait_queue()
212 if (RPC_IS_PRIORITY(queue)) __rpc_remove_wait_queue()
215 queue->qlen--; __rpc_remove_wait_queue()
216 dprintk("RPC: %5u removed from queue %p \"%s\"\n", __rpc_remove_wait_queue()
217 task->tk_pid, queue, rpc_qname(queue)); __rpc_remove_wait_queue()
220 static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues) __rpc_init_priority_wait_queue() argument
224 spin_lock_init(&queue->lock); __rpc_init_priority_wait_queue()
225 for (i = 0; i < ARRAY_SIZE(queue->tasks); i++) __rpc_init_priority_wait_queue()
226 INIT_LIST_HEAD(&queue->tasks[i]); __rpc_init_priority_wait_queue()
227 queue->maxpriority = nr_queues - 1; __rpc_init_priority_wait_queue()
228 rpc_reset_waitqueue_priority(queue); __rpc_init_priority_wait_queue()
229 queue->qlen = 0; __rpc_init_priority_wait_queue()
230 setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue); __rpc_init_priority_wait_queue()
231 INIT_LIST_HEAD(&queue->timer_list.list); __rpc_init_priority_wait_queue()
232 rpc_assign_waitqueue_name(queue, qname); __rpc_init_priority_wait_queue()
235 void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname) rpc_init_priority_wait_queue() argument
237 __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY); rpc_init_priority_wait_queue()
241 void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) rpc_init_wait_queue() argument
243 __rpc_init_priority_wait_queue(queue, qname, 1); rpc_init_wait_queue()
247 void rpc_destroy_wait_queue(struct rpc_wait_queue *queue) rpc_destroy_wait_queue() argument
249 del_timer_sync(&queue->timer_list.timer); rpc_destroy_wait_queue()
325 * rpc_wait_queue, this must be called with the queue spinlock held to protect
326 * the wait queue operation.
347 * Prepare for sleeping on a wait queue.
350 * as it's on a wait queue.
357 dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", __rpc_sleep_on_priority()
381 * Protect the queue operations. rpc_sleep_on()
401 * Protect the queue operations. rpc_sleep_on_priority()
411 * @queue: wait queue
414 * Caller must hold queue->lock, and have cleared the task queued flag.
416 static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task *task) __rpc_do_wake_up_task() argument
427 trace_rpc_task_wakeup(task->tk_client, task, queue); __rpc_do_wake_up_task()
429 __rpc_remove_wait_queue(queue, task); __rpc_do_wake_up_task()
437 * Wake up a queued task while the queue lock is being held
439 static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task) rpc_wake_up_task_queue_locked() argument
443 if (task->tk_waitqueue == queue) rpc_wake_up_task_queue_locked()
444 __rpc_do_wake_up_task(queue, task); rpc_wake_up_task_queue_locked()
449 * Wake up a task on a specific queue
451 void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task) rpc_wake_up_queued_task() argument
453 spin_lock_bh(&queue->lock); rpc_wake_up_queued_task()
454 rpc_wake_up_task_queue_locked(queue, task); rpc_wake_up_queued_task()
455 spin_unlock_bh(&queue->lock); rpc_wake_up_queued_task()
460 * Wake up the next task on a priority queue.
462 static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue) __rpc_find_next_queued_priority() argument
470 q = &queue->tasks[queue->priority]; __rpc_find_next_queued_priority()
473 if (queue->owner == task->tk_owner) { __rpc_find_next_queued_priority()
474 if (--queue->nr) __rpc_find_next_queued_priority()
485 * Service the next queue. __rpc_find_next_queued_priority()
488 if (q == &queue->tasks[0]) __rpc_find_next_queued_priority()
489 q = &queue->tasks[queue->maxpriority]; __rpc_find_next_queued_priority()
496 } while (q != &queue->tasks[queue->priority]); __rpc_find_next_queued_priority()
498 rpc_reset_waitqueue_priority(queue); __rpc_find_next_queued_priority()
502 rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0])); __rpc_find_next_queued_priority()
504 rpc_set_waitqueue_owner(queue, task->tk_owner); __rpc_find_next_queued_priority()
509 static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue) __rpc_find_next_queued() argument
511 if (RPC_IS_PRIORITY(queue)) __rpc_find_next_queued()
512 return __rpc_find_next_queued_priority(queue); __rpc_find_next_queued()
513 if (!list_empty(&queue->tasks[0])) __rpc_find_next_queued()
514 return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list); __rpc_find_next_queued()
519 * Wake up the first task on the wait queue.
521 struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue, rpc_wake_up_first() argument
527 queue, rpc_qname(queue)); rpc_wake_up_first()
528 spin_lock_bh(&queue->lock); rpc_wake_up_first()
529 task = __rpc_find_next_queued(queue); rpc_wake_up_first()
532 rpc_wake_up_task_queue_locked(queue, task); rpc_wake_up_first()
536 spin_unlock_bh(&queue->lock); rpc_wake_up_first()
548 * Wake up the next task on the wait queue.
550 struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue) rpc_wake_up_next() argument
552 return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL); rpc_wake_up_next()
558 * @queue: rpc_wait_queue on which the tasks are sleeping
560 * Grabs queue->lock
562 void rpc_wake_up(struct rpc_wait_queue *queue) rpc_wake_up() argument
566 spin_lock_bh(&queue->lock); rpc_wake_up()
567 head = &queue->tasks[queue->maxpriority]; rpc_wake_up()
574 rpc_wake_up_task_queue_locked(queue, task); rpc_wake_up()
576 if (head == &queue->tasks[0]) rpc_wake_up()
580 spin_unlock_bh(&queue->lock); rpc_wake_up()
586 * @queue: rpc_wait_queue on which the tasks are sleeping
589 * Grabs queue->lock
591 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) rpc_wake_up_status() argument
595 spin_lock_bh(&queue->lock); rpc_wake_up_status()
596 head = &queue->tasks[queue->maxpriority]; rpc_wake_up_status()
604 rpc_wake_up_task_queue_locked(queue, task); rpc_wake_up_status()
606 if (head == &queue->tasks[0]) rpc_wake_up_status()
610 spin_unlock_bh(&queue->lock); rpc_wake_up_status()
616 struct rpc_wait_queue *queue = (struct rpc_wait_queue *)ptr; __rpc_queue_timer_fn() local
620 spin_lock(&queue->lock); __rpc_queue_timer_fn()
622 list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) { __rpc_queue_timer_fn()
627 rpc_wake_up_task_queue_locked(queue, task); __rpc_queue_timer_fn()
633 if (!list_empty(&queue->timer_list.list)) __rpc_queue_timer_fn()
634 rpc_set_queue_timer(queue, expires); __rpc_queue_timer_fn()
635 spin_unlock(&queue->lock); __rpc_queue_timer_fn()
720 struct rpc_wait_queue *queue; __rpc_execute() local
759 * The queue->lock protects against races with __rpc_execute()
767 queue = task->tk_waitqueue; __rpc_execute()
768 spin_lock_bh(&queue->lock); __rpc_execute()
770 spin_unlock_bh(&queue->lock); __rpc_execute()
774 spin_unlock_bh(&queue->lock); __rpc_execute()
787 * clean up after sleeping on some queue, we don't __rpc_execute()
/linux-4.1.27/arch/arm/mach-ixp4xx/
H A Dixp4xx_qmgr.c28 void qmgr_set_irq(unsigned int queue, int src, qmgr_set_irq() argument
34 if (queue < HALF_QUEUES) { qmgr_set_irq()
38 reg = &qmgr_regs->irqsrc[queue >> 3]; /* 8 queues per u32 */ qmgr_set_irq()
39 bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */ qmgr_set_irq()
46 irq_handlers[queue] = handler; qmgr_set_irq()
47 irq_pdevs[queue] = pdev; qmgr_set_irq()
62 i = __fls(en_bitmap); /* number of the last "low" queue */ qmgr_irq1_a0()
87 i = __fls(req_bitmap); /* number of the last "high" queue */ qmgr_irq2_a0()
106 i = __fls(req_bitmap); /* number of the last queue */ qmgr_irq()
115 void qmgr_enable_irq(unsigned int queue) qmgr_enable_irq() argument
118 int half = queue / 32; qmgr_enable_irq()
119 u32 mask = 1 << (queue & (HALF_QUEUES - 1)); qmgr_enable_irq()
127 void qmgr_disable_irq(unsigned int queue) qmgr_disable_irq() argument
130 int half = queue / 32; qmgr_disable_irq()
131 u32 mask = 1 << (queue & (HALF_QUEUES - 1)); qmgr_disable_irq()
149 int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */, qmgr_request_queue() argument
154 int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */, qmgr_request_queue()
162 BUG_ON(queue >= QUEUES); qmgr_request_queue()
197 if (__raw_readl(&qmgr_regs->sram[queue])) { qmgr_request_queue()
213 " queue %i\n", queue); qmgr_request_queue()
223 __raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]); qmgr_request_queue()
225 snprintf(qmgr_queue_descs[queue], sizeof(qmgr_queue_descs[0]), qmgr_request_queue()
227 printk(KERN_DEBUG "qmgr: requested queue %s(%i) addr = 0x%02X\n", qmgr_request_queue()
228 qmgr_queue_descs[queue], queue, addr); qmgr_request_queue()
239 void qmgr_release_queue(unsigned int queue) qmgr_release_queue() argument
243 BUG_ON(queue >= QUEUES); /* not in valid range */ qmgr_release_queue()
246 cfg = __raw_readl(&qmgr_regs->sram[queue]); qmgr_release_queue()
264 printk(KERN_DEBUG "qmgr: releasing queue %s(%i)\n", qmgr_release_queue()
265 qmgr_queue_descs[queue], queue); qmgr_release_queue()
266 qmgr_queue_descs[queue][0] = '\x0'; qmgr_release_queue()
269 while ((addr = qmgr_get_entry(queue))) qmgr_release_queue()
270 printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n", qmgr_release_queue()
271 queue, addr); qmgr_release_queue()
273 __raw_writel(0, &qmgr_regs->sram[queue]); qmgr_release_queue()
279 irq_handlers[queue] = NULL; /* catch IRQ bugs */ qmgr_release_queue()
/linux-4.1.27/drivers/net/wireless/ath/ath5k/
H A Dqcu.c35 * basically we have 10 queues to play with. Each queue has a matching
36 * QCU that controls when the queue will get triggered and multiple QCUs
39 * and DCUs allowing us to have different DFS settings for each queue.
41 * When a frame goes into a TX queue, QCU decides when it'll trigger a
43 * it's buffer or -if it's a beacon queue- if it's time to fire up the queue
58 * ath5k_hw_num_tx_pending() - Get number of pending frames for a given queue
60 * @queue: One of enum ath5k_tx_queue_id
63 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue) ath5k_hw_num_tx_pending() argument
66 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); ath5k_hw_num_tx_pending()
68 /* Return if queue is declared inactive */ ath5k_hw_num_tx_pending()
69 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) ath5k_hw_num_tx_pending()
76 pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue)); ath5k_hw_num_tx_pending()
82 if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue)) ath5k_hw_num_tx_pending()
89 * ath5k_hw_release_tx_queue() - Set a transmit queue inactive
91 * @queue: One of enum ath5k_tx_queue_id
94 ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue) ath5k_hw_release_tx_queue() argument
96 if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num)) ath5k_hw_release_tx_queue()
99 /* This queue will be skipped in further operations */ ath5k_hw_release_tx_queue()
100 ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE; ath5k_hw_release_tx_queue()
102 AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue); ath5k_hw_release_tx_queue()
132 * ath5k_hw_get_tx_queueprops() - Get properties for a transmit queue
134 * @queue: One of enum ath5k_tx_queue_id
138 ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, ath5k_hw_get_tx_queueprops() argument
141 memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info)); ath5k_hw_get_tx_queueprops()
146 * ath5k_hw_set_tx_queueprops() - Set properties for a transmit queue
148 * @queue: One of enum ath5k_tx_queue_id
151 * Returns 0 on success or -EIO if queue is inactive
154 ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue, ath5k_hw_set_tx_queueprops() argument
159 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); ath5k_hw_set_tx_queueprops()
161 qi = &ah->ah_txq[queue]; ath5k_hw_set_tx_queueprops()
195 * ath5k_hw_setup_tx_queue() - Initialize a transmit queue
206 unsigned int queue; ath5k_hw_setup_tx_queue() local
210 * Get queue by type ath5k_hw_setup_tx_queue()
216 queue = AR5K_TX_QUEUE_ID_NOQCU_DATA; ath5k_hw_setup_tx_queue()
220 queue = AR5K_TX_QUEUE_ID_NOQCU_BEACON; ath5k_hw_setup_tx_queue()
228 queue = queue_info->tqi_subtype; ath5k_hw_setup_tx_queue()
231 queue = AR5K_TX_QUEUE_ID_UAPSD; ath5k_hw_setup_tx_queue()
234 queue = AR5K_TX_QUEUE_ID_BEACON; ath5k_hw_setup_tx_queue()
237 queue = AR5K_TX_QUEUE_ID_CAB; ath5k_hw_setup_tx_queue()
245 * Setup internal queue structure ath5k_hw_setup_tx_queue()
247 memset(&ah->ah_txq[queue], 0, sizeof(struct ath5k_txq_info)); ath5k_hw_setup_tx_queue()
248 ah->ah_txq[queue].tqi_type = queue_type; ath5k_hw_setup_tx_queue()
252 ret = ath5k_hw_set_tx_queueprops(ah, queue, queue_info); ath5k_hw_setup_tx_queue()
262 AR5K_Q_ENABLE_BITS(ah->ah_txq_status, queue); ath5k_hw_setup_tx_queue()
264 return queue; ath5k_hw_setup_tx_queue()
275 * @queue: One of enum ath5k_tx_queue_id
277 * This function is used when initializing a queue, to set
282 unsigned int queue) ath5k_hw_set_tx_retry_limits()
284 /* Single data queue on AR5210 */ ath5k_hw_set_tx_retry_limits()
286 struct ath5k_txq_info *tq = &ah->ah_txq[queue]; ath5k_hw_set_tx_retry_limits()
288 if (queue > 0) ath5k_hw_set_tx_retry_limits()
311 AR5K_QUEUE_DFS_RETRY_LIMIT(queue)); ath5k_hw_set_tx_retry_limits()
316 * ath5k_hw_reset_tx_queue() - Initialize a single hw queue
318 * @queue: One of enum ath5k_tx_queue_id
320 * Set DCF properties for the given transmit queue on DCU
321 * and configures all queue-specific parameters.
324 ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue) ath5k_hw_reset_tx_queue() argument
326 struct ath5k_txq_info *tq = &ah->ah_txq[queue]; ath5k_hw_reset_tx_queue()
328 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); ath5k_hw_reset_tx_queue()
330 tq = &ah->ah_txq[queue]; ath5k_hw_reset_tx_queue()
332 /* Skip if queue inactive or if we are on AR5210 ath5k_hw_reset_tx_queue()
346 AR5K_QUEUE_DFS_LOCAL_IFS(queue)); ath5k_hw_reset_tx_queue()
349 * Set tx retry limits for this queue ath5k_hw_reset_tx_queue()
351 ath5k_hw_set_tx_retry_limits(ah, queue); ath5k_hw_reset_tx_queue()
359 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue), ath5k_hw_reset_tx_queue()
364 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue), ath5k_hw_reset_tx_queue()
373 AR5K_QUEUE_CBRCFG(queue)); ath5k_hw_reset_tx_queue()
375 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), ath5k_hw_reset_tx_queue()
379 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), ath5k_hw_reset_tx_queue()
388 AR5K_QUEUE_RDYTIMECFG(queue)); ath5k_hw_reset_tx_queue()
394 AR5K_QUEUE_DFS_CHANNEL_TIME(queue)); ath5k_hw_reset_tx_queue()
397 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), ath5k_hw_reset_tx_queue()
404 AR5K_QUEUE_DFS_MISC(queue)); ath5k_hw_reset_tx_queue()
409 AR5K_QUEUE_DFS_MISC(queue)); ath5k_hw_reset_tx_queue()
412 * Set registers by queue type ath5k_hw_reset_tx_queue()
416 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), ath5k_hw_reset_tx_queue()
421 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue), ath5k_hw_reset_tx_queue()
431 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), ath5k_hw_reset_tx_queue()
441 AR5K_QUEUE_RDYTIMECFG(queue)); ath5k_hw_reset_tx_queue()
443 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue), ath5k_hw_reset_tx_queue()
449 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), ath5k_hw_reset_tx_queue()
461 * Enable interrupts for this tx queue ath5k_hw_reset_tx_queue()
465 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue); ath5k_hw_reset_tx_queue()
468 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue); ath5k_hw_reset_tx_queue()
471 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue); ath5k_hw_reset_tx_queue()
474 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue); ath5k_hw_reset_tx_queue()
477 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue); ath5k_hw_reset_tx_queue()
480 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrorn, queue); ath5k_hw_reset_tx_queue()
483 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrurn, queue); ath5k_hw_reset_tx_queue()
486 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_qtrig, queue); ath5k_hw_reset_tx_queue()
489 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_nofrm, queue); ath5k_hw_reset_tx_queue()
535 /* No queue has TXNOFRM enabled, disable the interrupt ath5k_hw_reset_tx_queue()
541 AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(queue), queue); ath5k_hw_reset_tx_queue()
707 "failed to reset TX queue #%d\n", i); ath5k_hw_init_queues()
281 ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah, unsigned int queue) ath5k_hw_set_tx_retry_limits() argument
H A Ddma.c27 * handle queue setup for 5210 chipset (rest are handled on qcu.c).
116 * ath5k_hw_start_tx_dma() - Start DMA transmit for a specific queue
118 * @queue: The hw queue number
120 * Start DMA transmit for a specific queue and since 5210 doesn't have
121 * QCU/DCU, set up queue parameters for 5210 here based on queue type (one
122 * queue for normal data and one queue for beacons). For queue setup
123 * on newer chips check out qcu.c. Returns -EINVAL if queue number is out
124 * of range or if queue is already disabled.
127 * queue (see below).
130 ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue) ath5k_hw_start_tx_dma() argument
134 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); ath5k_hw_start_tx_dma()
136 /* Return if queue is declared inactive */ ath5k_hw_start_tx_dma()
137 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) ath5k_hw_start_tx_dma()
144 * Set the queue by type on 5210 ath5k_hw_start_tx_dma()
146 switch (ah->ah_txq[queue].tqi_type) { ath5k_hw_start_tx_dma()
163 /* Start queue */ ath5k_hw_start_tx_dma()
167 /* Return if queue is disabled */ ath5k_hw_start_tx_dma()
168 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue)) ath5k_hw_start_tx_dma()
171 /* Start queue */ ath5k_hw_start_tx_dma()
172 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue); ath5k_hw_start_tx_dma()
179 * ath5k_hw_stop_tx_dma() - Stop DMA transmit on a specific queue
181 * @queue: The hw queue number
183 * Stop DMA transmit on a specific hw queue and drain queue so we don't
185 * -EINVAL if queue number is out of range or inactive.
188 ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue) ath5k_hw_stop_tx_dma() argument
193 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); ath5k_hw_stop_tx_dma()
195 /* Return if queue is declared inactive */ ath5k_hw_stop_tx_dma()
196 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) ath5k_hw_stop_tx_dma()
203 * Set by queue type ath5k_hw_stop_tx_dma()
205 switch (ah->ah_txq[queue].tqi_type) { ath5k_hw_stop_tx_dma()
219 /* Stop queue */ ath5k_hw_stop_tx_dma()
228 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), ath5k_hw_stop_tx_dma()
232 * Schedule TX disable and wait until queue is empty ath5k_hw_stop_tx_dma()
234 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue); ath5k_hw_stop_tx_dma()
236 /* Wait for queue to stop */ ath5k_hw_stop_tx_dma()
238 (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue) != 0); ath5k_hw_stop_tx_dma()
242 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue)) ath5k_hw_stop_tx_dma()
244 "queue %i didn't stop !\n", queue); ath5k_hw_stop_tx_dma()
250 AR5K_QUEUE_STATUS(queue)) & ath5k_hw_stop_tx_dma()
286 AR5K_QUEUE_STATUS(queue)) & ath5k_hw_stop_tx_dma()
297 queue); ath5k_hw_stop_tx_dma()
303 AR5K_REG_DISABLE_BITS(ah, AR5K_QUEUE_MISC(queue), ath5k_hw_stop_tx_dma()
311 queue, pending); ath5k_hw_stop_tx_dma()
321 * ath5k_hw_stop_beacon_queue() - Stop beacon queue
323 * @queue: The queue number
325 * Returns -EIO if queue didn't stop
328 ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue) ath5k_hw_stop_beacon_queue() argument
331 ret = ath5k_hw_stop_tx_dma(ah, queue); ath5k_hw_stop_beacon_queue()
334 "beacon queue didn't stop !\n"); ath5k_hw_stop_beacon_queue()
341 * ath5k_hw_get_txdp() - Get TX Descriptor's address for a specific queue
343 * @queue: The hw queue number
345 * Get TX descriptor's address for a specific queue. For 5210 we ignore
346 * the queue number and use tx queue type since we only have 2 queues.
347 * We use TXDP0 for normal data queue and TXDP1 for beacon queue.
353 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue) ath5k_hw_get_txdp() argument
357 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); ath5k_hw_get_txdp()
360 * Get the transmit queue descriptor pointer from the selected queue ath5k_hw_get_txdp()
364 switch (ah->ah_txq[queue].tqi_type) { ath5k_hw_get_txdp()
376 tx_reg = AR5K_QUEUE_TXDP(queue); ath5k_hw_get_txdp()
383 * ath5k_hw_set_txdp() - Set TX Descriptor's address for a specific queue
385 * @queue: The hw queue number
388 * Set TX descriptor's address for a specific queue. For 5210 we ignore
389 * the queue number and we use tx queue type since we only have 2 queues
390 * so as above we use TXDP0 for normal data queue and TXDP1 for beacon queue.
392 * Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still
396 ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr) ath5k_hw_set_txdp() argument
400 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); ath5k_hw_set_txdp()
403 * Set the transmit queue descriptor pointer register by type ath5k_hw_set_txdp()
407 switch (ah->ah_txq[queue].tqi_type) { ath5k_hw_set_txdp()
420 * Set the transmit queue descriptor pointer for ath5k_hw_set_txdp()
421 * the selected queue on QCU for 5211+ ath5k_hw_set_txdp()
422 * (this won't work if the queue is still active) ath5k_hw_set_txdp()
424 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue)) ath5k_hw_set_txdp()
427 tx_reg = AR5K_QUEUE_TXDP(queue); ath5k_hw_set_txdp()
592 * per-queue bits on SISR0 ath5k_hw_get_isr()
595 * per-queue bits on SISR1 ath5k_hw_get_isr()
597 * TXURN -> Logical OR of TXURN per-queue bits on SISR2 ath5k_hw_get_isr()
606 * QCBRURN per-queue bits on SISR3 ath5k_hw_get_isr()
607 * QTRIG -> Logical OR of QTRIG per-queue bits on SISR4 ath5k_hw_get_isr()
612 * interrupt got fired for another queue while we were reading ath5k_hw_get_isr()
656 * so we track them all together per queue */ ath5k_hw_get_isr()
711 /* A queue got CBR overrun */ ath5k_hw_get_isr()
718 /* A queue got CBR underrun */ ath5k_hw_get_isr()
725 /* A queue got triggered */ ath5k_hw_get_isr()
778 /* Preserve per queue TXURN interrupt mask */ ath5k_hw_set_imr()
808 /* Note: Per queue interrupt masks ath5k_hw_set_imr()
922 /* -EINVAL -> queue inactive */ ath5k_hw_dma_stop()
/linux-4.1.27/include/linux/soc/ti/
H A Dknav_qmss.h29 /* queue types */
30 #define KNAV_QUEUE_QPEND ((unsigned)-2) /* interruptible qpend queue */
31 #define KNAV_QUEUE_ACC ((unsigned)-3) /* Accumulated queue */
32 #define KNAV_QUEUE_GP ((unsigned)-4) /* General purpose queue */
34 /* queue flags */
38 * enum knav_queue_ctrl_cmd - queue operations.
39 * @KNAV_QUEUE_GET_ID: Get the ID number for an open queue
40 * @KNAV_QUEUE_FLUSH: forcibly empty a queue if possible
41 * @KNAV_QUEUE_SET_NOTIFIER: Set a notifier callback to a queue handle.
42 * @KNAV_QUEUE_ENABLE_NOTIFY: Enable notifier callback for a queue handle.
43 * @KNAV_QUEUE_DISABLE_NOTIFY: Disable notifier callback for a queue handle.
/linux-4.1.27/sound/core/seq/
H A Dseq_queue.c2 * ALSA sequencer Timing queue handling
23 * - Owner of unlocked queue is kept unmodified even if it is
32 * - The queue is locked when struct snd_seq_queue pointer is returned via
62 /* assign queue id and insert to list */ queue_list_add()
72 q->queue = i; queue_list_add()
108 /* create new queue (constructor) */ queue_new()
121 q->queue = -1; queue_new()
141 /* delete queue (destructor) */ queue_delete()
184 /* allocate a new queue -
185 * return queue index value or negative value for error
199 snd_seq_queue_use(q->queue, client, 1); /* use this queue */ snd_seq_queue_alloc()
200 return q->queue; snd_seq_queue_alloc()
203 /* delete a queue - queue must be owned by the client */ snd_seq_queue_delete()
219 /* return pointer to queue structure for specified id */ queueptr()
235 /* return the (first) queue matching with the specified name */ snd_seq_queue_find_name()
273 /* Process tick queue... */ snd_seq_check_queue()
281 /* event remains in the queue */ snd_seq_check_queue()
287 /* Process time queue... */ snd_seq_check_queue()
295 /* event remains in the queue */ snd_seq_check_queue()
312 /* enqueue a event to singe queue */ snd_seq_enqueue_event()
320 dest = cell->event.queue; /* destination queue */ snd_seq_enqueue_event()
339 /* enqueue event in the real-time or midi queue */ snd_seq_enqueue_event()
372 /* check if the client has permission to modify queue parameters.
373 * if it does, lock the queue
388 /* unlock the queue */ queue_access_unlock()
417 * change queue's owner and permission
449 struct snd_seq_queue *queue; snd_seq_queue_timer_open() local
452 queue = queueptr(queueid); snd_seq_queue_timer_open()
453 if (queue == NULL) snd_seq_queue_timer_open()
455 tmr = queue->timer; snd_seq_queue_timer_open()
456 if ((result = snd_seq_timer_open(queue)) < 0) { snd_seq_queue_timer_open()
458 result = snd_seq_timer_open(queue); snd_seq_queue_timer_open()
460 queuefree(queue); snd_seq_queue_timer_open()
469 struct snd_seq_queue *queue; snd_seq_queue_timer_close() local
472 queue = queueptr(queueid); snd_seq_queue_timer_close()
473 if (queue == NULL) snd_seq_queue_timer_close()
475 snd_seq_timer_close(queue); snd_seq_queue_timer_close()
476 queuefree(queue); snd_seq_queue_timer_close()
480 /* change queue tempo and ppq */ snd_seq_queue_timer_set_tempo()
506 /* use or unuse this queue -
512 struct snd_seq_queue *queue; snd_seq_queue_use() local
514 queue = queueptr(queueid); snd_seq_queue_use()
515 if (queue == NULL) snd_seq_queue_use()
517 mutex_lock(&queue->timer_mutex); snd_seq_queue_use()
519 if (!test_and_set_bit(client, queue->clients_bitmap)) snd_seq_queue_use()
520 queue->clients++; snd_seq_queue_use()
522 if (test_and_clear_bit(client, queue->clients_bitmap)) snd_seq_queue_use()
523 queue->clients--; snd_seq_queue_use()
525 if (queue->clients) { snd_seq_queue_use()
526 if (use && queue->clients == 1) snd_seq_queue_use()
527 snd_seq_timer_defaults(queue->timer); snd_seq_queue_use()
528 snd_seq_timer_open(queue); snd_seq_queue_use()
530 snd_seq_timer_close(queue); snd_seq_queue_use()
532 mutex_unlock(&queue->timer_mutex); snd_seq_queue_use()
533 queuefree(queue); snd_seq_queue_use()
538 * check if queue is used by the client
539 * return negative value if the queue is invalid.
549 return -EINVAL; /* invalid queue */ snd_seq_queue_is_used()
584 * remove cells for no longer exist client (for non-owned queue)
585 * or delete this queue (for owned queue)
592 /* delete own queues from queue list */ snd_seq_queue_client_leave()
607 snd_seq_queue_use(q->queue, client, 0); snd_seq_queue_client_leave()
643 q->queue == info->queue)) { snd_seq_queue_remove_cells()
665 sev.queue = q->queue; queue_broadcast_event()
666 sev.data.queue.queue = q->queue; queue_broadcast_event()
676 * process a received queue-control event.
702 snd_seq_timer_set_tempo(q->timer, ev->data.queue.param.value); snd_seq_queue_process_event()
707 if (snd_seq_timer_set_position_tick(q->timer, ev->data.queue.param.time.tick) == 0) { snd_seq_queue_process_event()
713 if (snd_seq_timer_set_position_time(q->timer, ev->data.queue.param.time.time) == 0) { snd_seq_queue_process_event()
719 ev->data.queue.param.skew.value, snd_seq_queue_process_event()
720 ev->data.queue.param.skew.base) == 0) { snd_seq_queue_process_event()
738 q = queueptr(ev->data.queue.queue); snd_seq_control_queue()
777 snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name); snd_seq_info_queues_read()
H A Dseq_queue.h34 int queue; /* queue number */ member in struct:snd_seq_queue
36 char name[64]; /* name of this queue */
38 struct snd_seq_prioq *tickq; /* midi tick event queue */
39 struct snd_seq_prioq *timeq; /* real-time event queue */
41 struct snd_seq_timer *timer; /* time keeper for this queue */
54 /* clients which uses this queue (bitmap) */
56 unsigned int clients; /* users of this queue */
73 /* create new queue (constructor) */
76 /* delete queue (destructor) */
92 /* return pointer to queue structure for specified id */
97 /* return the (first) queue matching with the specified name */
100 /* check single queue and dispatch events */
103 /* access to queue's parameters */
H A Dseq_system.c35 * queue's timer. The queue address is specified in
36 * data.queue.queue.
39 * value is stored on data.queue.value.
51 * queue.
53 * NOTE: the queue to be started, stopped, etc. must be specified
54 * in data.queue.addr.queue field. queue is used only for
55 * scheduling, and no longer referred as affected queue.
85 /*ev->data.addr.queue = SNDRV_SEQ_ADDRESS_UNKNOWN;*/ setheader()
140 port->capability = SNDRV_SEQ_PORT_CAP_WRITE; /* accept queue control */ snd_seq_system_client_init()
H A Dseq_clientmgr.c430 /* while data available in queue */ snd_seq_read()
545 bounce_ev.queue = SNDRV_SEQ_QUEUE_DIRECT; bounce_error_event()
565 * of the given queue.
569 int queue, int real_time) update_timestamp_of_queue()
573 q = queueptr(queue); update_timestamp_of_queue()
576 event->queue = queue; update_timestamp_of_queue()
687 update_timestamp_of_queue(event, subs->info.queue, deliver_to_subscribers()
818 if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS || snd_seq_deliver_event()
822 else if (event->queue == SNDRV_SEQ_ADDRESS_BROADCAST || snd_seq_deliver_event()
838 * This function is called only from queue check routines in timer
896 /* Now queue this cell as the note off event */ snd_seq_dispatch_event()
914 /* Allocate a cell from client pool and enqueue it to queue:
926 /* special queue values - force direct passing */ snd_seq_client_enqueue_event()
927 if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) { snd_seq_client_enqueue_event()
929 event->queue = SNDRV_SEQ_QUEUE_DIRECT; snd_seq_client_enqueue_event()
932 if (event->queue == SNDRV_SEQ_ADDRESS_BROADCAST) { snd_seq_client_enqueue_event()
934 event->queue = SNDRV_SEQ_QUEUE_DIRECT; snd_seq_client_enqueue_event()
953 if (snd_seq_queue_is_used(event->queue, client->number) <= 0) snd_seq_client_enqueue_event()
954 return -EINVAL; /* invalid queue */ snd_seq_client_enqueue_event()
956 return -ENXIO; /* queue is not allocated */ snd_seq_client_enqueue_event()
1547 info.queue = q->queue; snd_seq_ioctl_create_queue()
1551 /* set queue name */ snd_seq_ioctl_create_queue()
1553 snprintf(info.name, sizeof(info.name), "Queue-%d", q->queue); snd_seq_ioctl_create_queue()
1572 return snd_seq_queue_delete(client->number, info.queue); snd_seq_ioctl_delete_queue()
1585 q = queueptr(info.queue); snd_seq_ioctl_get_queue_info()
1590 info.queue = q->queue; snd_seq_ioctl_get_queue_info()
1616 if (snd_seq_queue_check_access(info.queue, client->number)) { snd_seq_ioctl_set_queue_info()
1617 if (snd_seq_queue_set_owner(info.queue, client->number, info.locked) < 0) snd_seq_ioctl_set_queue_info()
1620 snd_seq_queue_use(info.queue, client->number, 1); snd_seq_ioctl_set_queue_info()
1625 q = queueptr(info.queue); snd_seq_ioctl_set_queue_info()
1650 info.queue = q->queue; snd_seq_ioctl_get_named_queue()
1666 struct snd_seq_queue *queue; snd_seq_ioctl_get_queue_status() local
1672 queue = queueptr(status.queue); snd_seq_ioctl_get_queue_status()
1673 if (queue == NULL) snd_seq_ioctl_get_queue_status()
1676 status.queue = queue->queue; snd_seq_ioctl_get_queue_status()
1678 tmr = queue->timer; snd_seq_ioctl_get_queue_status()
1679 status.events = queue->tickq->cells + queue->timeq->cells; snd_seq_ioctl_get_queue_status()
1686 status.flags = queue->flags; snd_seq_ioctl_get_queue_status()
1687 queuefree(queue); snd_seq_ioctl_get_queue_status()
1700 struct snd_seq_queue *queue; snd_seq_ioctl_get_queue_tempo() local
1706 queue = queueptr(tempo.queue); snd_seq_ioctl_get_queue_tempo()
1707 if (queue == NULL) snd_seq_ioctl_get_queue_tempo()
1710 tempo.queue = queue->queue; snd_seq_ioctl_get_queue_tempo()
1712 tmr = queue->timer; snd_seq_ioctl_get_queue_tempo()
1718 queuefree(queue); snd_seq_ioctl_get_queue_tempo()
1729 if (!snd_seq_queue_check_access(tempo->queue, client)) snd_seq_set_queue_tempo()
1731 return snd_seq_queue_timer_set_tempo(tempo->queue, client, tempo); snd_seq_set_queue_tempo()
1755 struct snd_seq_queue *queue; snd_seq_ioctl_get_queue_timer() local
1761 queue = queueptr(timer.queue); snd_seq_ioctl_get_queue_timer()
1762 if (queue == NULL) snd_seq_ioctl_get_queue_timer()
1765 if (mutex_lock_interruptible(&queue->timer_mutex)) { snd_seq_ioctl_get_queue_timer()
1766 queuefree(queue); snd_seq_ioctl_get_queue_timer()
1769 tmr = queue->timer; snd_seq_ioctl_get_queue_timer()
1771 timer.queue = queue->queue; snd_seq_ioctl_get_queue_timer()
1778 mutex_unlock(&queue->timer_mutex); snd_seq_ioctl_get_queue_timer()
1779 queuefree(queue); snd_seq_ioctl_get_queue_timer()
1800 if (snd_seq_queue_check_access(timer.queue, client->number)) { snd_seq_ioctl_set_queue_timer()
1804 q = queueptr(timer.queue); snd_seq_ioctl_set_queue_timer()
1812 snd_seq_queue_timer_close(timer.queue); snd_seq_ioctl_set_queue_timer()
1818 result = snd_seq_queue_timer_open(timer.queue); snd_seq_ioctl_set_queue_timer()
1839 used = snd_seq_queue_is_used(info.queue, client->number); snd_seq_ioctl_get_queue_client()
1862 err = snd_seq_queue_use(info.queue, client->number, info.used); snd_seq_ioctl_set_queue_client()
2069 subs.queue = s->info.queue; snd_seq_ioctl_query_subs()
2264 /* empty write queue as default */ snd_seq_create_kernel_client()
2391 ev->queue = SNDRV_SEQ_QUEUE_DIRECT; snd_seq_kernel_client_dispatch()
2482 snd_iprintf(buffer, "[%c:%d]", ((s->info.flags & SNDRV_SEQ_PORT_SUBS_TIME_REAL) ? 'r' : 't'), s->info.queue); snd_seq_info_dump_subscribers()
568 update_timestamp_of_queue(struct snd_seq_event *event, int queue, int real_time) update_timestamp_of_queue() argument
/linux-4.1.27/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_kernel_queue.c47 pr_debug("amdkfd: In func %s initializing queue type %d size %d\n", initialize()
124 if (init_queue(&kq->queue, prop) != 0) initialize()
127 kq->queue->device = dev; initialize()
128 kq->queue->process = kfd_get_process(current); initialize()
130 retval = kq->mqd->init_mqd(kq->mqd, &kq->queue->mqd, initialize()
131 &kq->queue->mqd_mem_obj, initialize()
132 &kq->queue->gart_mqd_addr, initialize()
133 &kq->queue->properties); initialize()
140 kq->queue->pipe = KFD_CIK_HIQ_PIPE; initialize()
141 kq->queue->queue = KFD_CIK_HIQ_QUEUE; initialize()
142 kq->mqd->load_mqd(kq->mqd, kq->queue->mqd, kq->queue->pipe, initialize()
143 kq->queue->queue, NULL); initialize()
157 print_queue(kq->queue); initialize()
162 uninit_queue(kq->queue); initialize()
182 if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ) uninitialize()
187 kq->queue->pipe, uninitialize()
188 kq->queue->queue); uninitialize()
189 else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ) uninitialize()
192 kq->mqd->uninit_mqd(kq->mqd, kq->queue->mqd, kq->queue->mqd_mem_obj); uninitialize()
199 kq->queue->properties.doorbell_ptr); uninitialize()
200 uninit_queue(kq->queue); uninitialize()
216 queue_size_dwords = kq->queue->properties.queue_size / sizeof(uint32_t); acquire_packet_buffer()
265 write_kernel_doorbell(kq->queue->properties.doorbell_ptr, submit_packet()
272 kq->pending_wptr = *kq->queue->properties.write_ptr; rollback_packet()
303 pr_err("amdkfd: failed to init kernel queue\n"); kernel_queue_init()
326 pr_err("amdkfd: starting kernel queue test\n"); test_kq()
337 pr_err("amdkfd: ending kernel queue test\n"); test_kq()
H A Dkfd_kernel_queue.h34 * @initialize: Initialize a kernel queue, including allocations of GART memory
35 * needed for the queue.
37 * @uninitialize: Uninitialize a kernel queue and free all its memory usages.
40 * queue ring buffer where the calling function can write its packet. It is
45 * @submit_packet: Update the write pointer and doorbell of a kernel queue.
48 * queue are equal, which means the CP has read all the submitted packets.
74 struct queue *queue; member in struct:kernel_queue
H A Dkfd_queue.c32 pr_debug("Printing queue properties:\n"); print_queue_properties()
45 void print_queue(struct queue *q) print_queue()
49 pr_debug("Printing queue:\n"); print_queue()
66 int init_queue(struct queue **q, struct queue_properties properties) init_queue()
68 struct queue *tmp; init_queue()
72 tmp = kzalloc(sizeof(struct queue), GFP_KERNEL); init_queue()
82 void uninit_queue(struct queue *q) uninit_queue()
H A Dkfd_priv.h80 * the HIQ queue is used as a special queue that dispatches the configuration
82 * the DIQ queue is a debugging queue that dispatches debugging commands to the
146 * page used by kernel queue
192 * @KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE: Preempts single queue.
215 * @KFD_QUEUE_TYPE_COMPUTE: Regular user mode queue type.
217 * @KFD_QUEUE_TYPE_SDMA: Sdma user mode queue type.
219 * @KFD_QUEUE_TYPE_HIQ: HIQ queue type.
221 * @KFD_QUEUE_TYPE_DIQ: DIQ queue type.
238 * @type: The queue type.
246 * @priority: Defines the queue priority relative to other queues in the
254 * this field defines that the queue is non active.
262 * the queue ring buffer. This field should be similar to write_ptr and the user
267 * @is_interop: Defines if this is a interop queue. Interop queue means that the
268 * queue can access both graphics and compute resources.
270 * @is_active: Defines if the queue is active or not.
273 * of the queue.
275 * This structure represents the queue properties for each queue no matter if
276 * it's user mode or kernel mode queue.
307 * struct queue
311 * @mqd: The queue MQD.
317 * @properties: The queue properties.
320 * that the queue should be execute on.
322 * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe id.
324 * @queue: Used only in no cp scheduliong mode and identifies the queue's slot.
326 * @process: The kfd process that created this queue.
328 * @device: The kfd device that created this queue.
335 struct queue { struct
344 uint32_t queue; member in struct:queue
573 int init_queue(struct queue **q, struct queue_properties properties);
574 void uninit_queue(struct queue *q);
576 void print_queue(struct queue *q);
592 struct queue *q;
H A Dkfd_process_queue_manager.c40 if (pqn->kq && pqn->kq->queue->properties.queue_id == qid) get_queue_by_qid()
102 pqn->kq->queue->properties.queue_id); pqm_uninit()
105 pr_err("kfd: failed to destroy queue\n"); pqm_uninit()
114 struct kfd_dev *dev, struct queue **q, create_cp_queue()
139 pr_debug("kfd: PQM After init queue"); create_cp_queue()
158 struct queue *q; pqm_create_queue()
218 kq->queue->properties.queue_id = *qid; pqm_create_queue()
230 pr_debug("Error dqm create queue\n"); pqm_create_queue()
234 pr_debug("kfd: PQM After DQM create queue\n"); pqm_create_queue()
240 pr_debug("kfd: PQM done creating queue\n"); pqm_create_queue()
273 pr_err("kfd: queue id does not match any known queue\n"); pqm_destroy_queue()
291 /* destroy kernel queue (DIQ) */ pqm_destroy_queue()
326 pr_debug("amdkfd: No queue %d exists for update operation\n", pqm_update_queue()
H A Dkfd_device_queue_manager.h71 * @stop: This routine stops execution of all the active queue running on the
74 * @uninitialize: Destroys all the device queue manager resources allocated in
77 * @create_kernel_queue: Creates kernel queue. Used for debug queue.
79 * @destroy_kernel_queue: Destroys kernel queue. Used for debug queue.
88 struct queue *q,
93 struct queue *q);
95 struct queue *q);
128 * for queue creation and queue destruction. This base class hides the
H A Dkfd_device_queue_manager.c36 /* Size of the per-pipe EOP queue */
44 struct queue *q,
51 struct queue *q,
95 struct queue *q) allocate_vmid()
119 struct queue *q) deallocate_vmid()
132 struct queue *q, create_queue_nocpsch()
146 pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n", create_queue_nocpsch()
184 * Unconditionally increment this counter, regardless of the queue's create_queue_nocpsch()
185 * type or whether the queue is active. create_queue_nocpsch()
195 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q) allocate_hqd()
212 q->queue = bit; allocate_hqd()
221 pr_debug("kfd: DQM %s hqd slot - pipe (%d) queue(%d)\n", allocate_hqd()
222 __func__, q->pipe, q->queue); allocate_hqd()
230 struct queue *q) deallocate_hqd()
232 set_bit(q->queue, (unsigned long *)&dqm->allocated_queues[q->pipe]); deallocate_hqd()
236 struct queue *q, create_compute_queue_nocpsch()
259 pr_debug("kfd: loading mqd to hqd on pipe (%d) queue (%d)\n", create_compute_queue_nocpsch()
261 q->queue); create_compute_queue_nocpsch()
264 q->queue, (uint32_t __user *) q->properties.write_ptr); create_compute_queue_nocpsch()
276 struct queue *q) destroy_queue_nocpsch()
314 q->pipe, q->queue); destroy_queue_nocpsch()
328 * Unconditionally decrement this counter, regardless of the queue's destroy_queue_nocpsch()
340 static int update_queue(struct device_queue_manager *dqm, struct queue *q) update_queue()
618 static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, init_sdma_vm()
632 struct queue *q, create_sdma_queue_nocpsch()
650 pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id); create_sdma_queue_nocpsch()
695 " queue mask: 0x%8llX\n", set_sched_resources()
798 pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n", create_kernel_queue_cpsch()
805 * Unconditionally increment this counter, regardless of the queue's create_kernel_queue_cpsch()
806 * type or whether the queue is active. create_kernel_queue_cpsch()
836 * Unconditionally decrement this counter, regardless of the queue's destroy_kernel_queue_cpsch()
845 static void select_sdma_engine_id(struct queue *q) select_sdma_engine_id()
853 static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, create_queue_cpsch()
869 pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n", create_queue_cpsch()
902 * Unconditionally increment this counter, regardless of the queue's create_queue_cpsch()
903 * type or whether the queue is active. create_queue_cpsch()
954 pr_debug("kfd: Before destroying queues, sdma queue count is : %u\n", destroy_queues_cpsch()
1022 struct queue *q) destroy_queue_cpsch()
1031 /* remove queue from list to prevent rescheduling after preemption */ destroy_queue_cpsch()
1052 * Unconditionally decrement this counter, regardless of the queue's destroy_queue_cpsch()
1149 pr_debug("kfd: loading device queue manager\n"); device_queue_manager_init()
/linux-4.1.27/include/net/
H A Drequest_sock.h166 /** struct request_sock_queue - queue of request_socks
193 int reqsk_queue_alloc(struct request_sock_queue *queue,
196 void __reqsk_queue_destroy(struct request_sock_queue *queue);
197 void reqsk_queue_destroy(struct request_sock_queue *queue);
202 reqsk_queue_yank_acceptq(struct request_sock_queue *queue) reqsk_queue_yank_acceptq() argument
204 struct request_sock *req = queue->rskq_accept_head; reqsk_queue_yank_acceptq()
206 queue->rskq_accept_head = NULL; reqsk_queue_yank_acceptq()
210 static inline int reqsk_queue_empty(struct request_sock_queue *queue) reqsk_queue_empty() argument
212 return queue->rskq_accept_head == NULL; reqsk_queue_empty()
215 static inline void reqsk_queue_add(struct request_sock_queue *queue, reqsk_queue_add() argument
223 if (queue->rskq_accept_head == NULL) reqsk_queue_add()
224 queue->rskq_accept_head = req; reqsk_queue_add()
226 queue->rskq_accept_tail->dl_next = req; reqsk_queue_add()
228 queue->rskq_accept_tail = req; reqsk_queue_add()
232 static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue) reqsk_queue_remove() argument
234 struct request_sock *req = queue->rskq_accept_head; reqsk_queue_remove()
238 queue->rskq_accept_head = req->dl_next; reqsk_queue_remove()
239 if (queue->rskq_accept_head == NULL) reqsk_queue_remove()
240 queue->rskq_accept_tail = NULL; reqsk_queue_remove()
245 static inline void reqsk_queue_removed(struct request_sock_queue *queue, reqsk_queue_removed() argument
248 struct listen_sock *lopt = queue->listen_opt; reqsk_queue_removed()
255 static inline void reqsk_queue_added(struct request_sock_queue *queue) reqsk_queue_added() argument
257 struct listen_sock *lopt = queue->listen_opt; reqsk_queue_added()
273 static inline int reqsk_queue_len(const struct request_sock_queue *queue) reqsk_queue_len() argument
275 const struct listen_sock *lopt = queue->listen_opt; reqsk_queue_len()
280 static inline int reqsk_queue_len_young(const struct request_sock_queue *queue) reqsk_queue_len_young() argument
282 return listen_sock_young(queue->listen_opt); reqsk_queue_len_young()
285 static inline int reqsk_queue_is_full(const struct request_sock_queue *queue) reqsk_queue_is_full() argument
287 return reqsk_queue_len(queue) >> queue->listen_opt->max_qlen_log; reqsk_queue_is_full()
290 void reqsk_queue_hash_req(struct request_sock_queue *queue,
H A Dinet_frag.h19 * fragment queue flags
23 * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction
24 * @INET_FRAG_EVICTED: frag queue is being evicted
34 * struct inet_frag_queue - fragment queue
36 * @lock: spinlock protecting the queue
37 * @timer: queue expiration timer
39 * @refcnt: reference count of the queue
45 * @flags: fragment queue flags
/linux-4.1.27/drivers/soc/ti/
H A Dknav_qmss_acc.c2 * Keystone accumulator queue manager
45 int range_base, queue; __knav_acc_notify() local
50 for (queue = 0; queue < range->num_queues; queue++) { __knav_acc_notify()
52 queue); __knav_acc_notify()
56 range_base + queue); __knav_acc_notify()
61 queue = acc->channel - range->acc_info.start_channel; __knav_acc_notify()
62 inst = knav_range_offset_to_inst(kdev, range, queue); __knav_acc_notify()
64 range_base + queue); __knav_acc_notify()
104 int range_base, channel, queue = 0; knav_acc_int_handler() local
115 for (queue = 0; queue < range->num_irqs; queue++) knav_acc_int_handler()
116 if (range->irqs[queue].irq == irq) knav_acc_int_handler()
118 kq = knav_range_offset_to_inst(kdev, range, queue); knav_acc_int_handler()
119 acc += queue; knav_acc_int_handler()
165 queue = list[ACC_LIST_ENTRY_QUEUE_IDX] >> 16; knav_acc_int_handler()
166 if (queue < range_base || knav_acc_int_handler()
167 queue >= range_base + range->num_queues) { knav_acc_int_handler()
169 "bad queue %d, expecting %d-%d\n", knav_acc_int_handler()
170 queue, range_base, knav_acc_int_handler()
174 queue -= range_base; knav_acc_int_handler()
176 queue); knav_acc_int_handler()
182 "acc-irq: queue %d full, entry dropped\n", knav_acc_int_handler()
183 queue + range_base); knav_acc_int_handler()
190 dev_dbg(kdev->dev, "acc-irq: enqueue %08x at %d, queue %d\n", knav_acc_int_handler()
191 val, idx, queue + range_base); knav_acc_int_handler()
213 int queue, bool enabled) knav_range_setup_acc_irq()
226 acc = range->acc + queue; knav_range_setup_acc_irq()
227 irq = range->irqs[queue].irq; knav_range_setup_acc_irq()
228 cpu_map = range->irqs[queue].cpu_map; knav_range_setup_acc_irq()
233 new = old | BIT(queue); knav_range_setup_acc_irq()
235 new = old & ~BIT(queue); knav_range_setup_acc_irq()
279 [ACC_RET_INVALID_QUEUE] = "invalid queue", knav_acc_result_str()
316 int queue) knav_acc_setup_cmd()
328 acc = range->acc + queue; knav_acc_setup_cmd()
329 queue_base = range->queue_base + queue; knav_acc_setup_cmd()
349 int queue) knav_acc_stop()
355 acc = range->acc + queue; knav_acc_stop()
357 knav_acc_setup_cmd(kdev, range, &cmd, queue); knav_acc_stop()
367 int queue) knav_acc_start()
373 acc = range->acc + queue; knav_acc_start()
375 knav_acc_setup_cmd(kdev, range, &cmd, queue); knav_acc_start()
390 int queue; knav_acc_init_range() local
392 for (queue = 0; queue < range->num_queues; queue++) { knav_acc_init_range()
393 acc = range->acc + queue; knav_acc_init_range()
395 knav_acc_stop(kdev, range, queue); knav_acc_init_range()
397 result = knav_acc_start(kdev, range, queue); knav_acc_init_range()
531 if (of_get_property(node, "multi-queue", NULL)) { knav_init_acc_range()
536 "misaligned multi-queue accumulator range %s\n", knav_init_acc_range()
212 knav_range_setup_acc_irq(struct knav_range_info *range, int queue, bool enabled) knav_range_setup_acc_irq() argument
313 knav_acc_setup_cmd(struct knav_device *kdev, struct knav_range_info *range, struct knav_reg_acc_command *cmd, int queue) knav_acc_setup_cmd() argument
347 knav_acc_stop(struct knav_device *kdev, struct knav_range_info *range, int queue) knav_acc_stop() argument
365 knav_acc_start(struct knav_device *kdev, struct knav_range_info *range, int queue) knav_acc_start() argument
H A Dknav_qmss.h156 * struct knav_queue_stats: queue statistics
172 * struct knav_reg_queue: queue registers
173 * @entry_count: valid entries in the queue
174 * @byte_count: total byte count in thhe queue
175 * @packet_size: packet size for the queue
215 * @queue: queue registers
228 struct knav_queue *queue; member in struct:knav_pool
240 * struct knav_queue_inst: qmss queue instace properties
246 * @qmgr: queue manager info
247 * @id: queue instace id
249 * @notify_needed: notifier needed based on queue type
252 * @name: queue instance name
272 * struct knav_queue: qmss queue properties
273 * @reg_push, reg_pop, reg_peek: push, pop queue registers
274 * @inst: qmss queue instace properties
277 * @notifier_enabled: notier enabled for a give queue
279 * @flags: queue flags
H A Dknav_qmss_queue.c72 * knav_queue_notify: qmss queue notfier call
74 * @inst: qmss queue instance like accumulator
107 unsigned queue = inst->id - range->queue_base; knav_queue_setup_irq() local
112 irq = range->irqs[queue].irq; knav_queue_setup_irq()
113 cpu_map = range->irqs[queue].cpu_map; knav_queue_setup_irq()
134 unsigned queue = inst->id - inst->range->queue_base; knav_queue_free_irq() local
138 irq = range->irqs[queue].irq; knav_queue_free_irq()
371 unsigned queue; knav_gp_set_notify() local
374 queue = inst->id - range->queue_base; knav_gp_set_notify()
376 enable_irq(range->irqs[queue].irq); knav_gp_set_notify()
378 disable_irq_nosync(range->irqs[queue].irq); knav_gp_set_notify()
498 * knav_queue_open() - open a hardware queue
499 * @name - name to give the queue handle
500 * @id - desired queue number if any or specifes the type
501 * of queue
503 * KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are
505 * Subsequent attempts to open a shared queue should
508 * Returns a handle to the open hardware queue if successful. Use IS_ERR()
532 * knav_queue_close() - close a hardware queue handle
558 * knav_queue_device_control() - Perform control operations on a queue
559 * @qh - queue handle
609 * knav_queue_push() - push data (or descriptor) to the tail of a queue
610 * @qh - hardware queue handle
632 * knav_queue_pop() - pop data (or descriptor) from the head of a queue
633 * @qh - hardware queue handle
669 /* carve out descriptors and push into queue */ kdesc_fill_pool()
685 knav_queue_push(pool->queue, dma_addr, dma_size, 0); kdesc_fill_pool()
689 /* pop out descriptors and close the queue */ kdesc_empty_pool()
697 if (!pool->queue) kdesc_empty_pool()
701 dma = knav_queue_pop(pool->queue, &size); kdesc_empty_pool()
712 knav_queue_close(pool->queue); kdesc_empty_pool()
773 pool->queue = knav_queue_open(name, KNAV_QUEUE_GP, 0);
774 if (IS_ERR_OR_NULL(pool->queue)) {
776 "failed to open queue for pool(%s), error %ld\n",
777 name, PTR_ERR(pool->queue));
778 ret = PTR_ERR(pool->queue);
879 dma = knav_queue_pop(pool->queue, &size); knav_pool_desc_get()
896 knav_queue_push(pool->queue, dma, pool->region->desc_size, 0); knav_pool_desc_put()
958 return knav_queue_get_count(pool->queue); knav_pool_count()
1208 dev_err(dev, "invalid queue range %s\n", range->name); knav_setup_queue_range()
1288 dev_err(kdev->dev, "no valid queue range found\n");
1379 dev_err(dev, "invalid qmgr queue range\n"); for_each_child_of_node()
1384 dev_info(dev, "qmgr start queue %d, number of queues %d\n", for_each_child_of_node()
1427 dev_info(dev, "added qmgr start queue %d, num of queues %d, reg_peek %p, reg_status %p, reg_config %p, reg_region %p, reg_push %p, reg_pop %p\n", for_each_child_of_node()
1705 if (of_property_read_u32_array(node, "queue-range", temp, 2)) { knav_queue_probe()
1706 dev_err(dev, "queue-range not specified\n"); knav_queue_probe()
1713 /* Initialize queue managers using device tree configuration */ knav_queue_probe()
1716 dev_err(dev, "queue manager info not specified\n"); knav_queue_probe()
1738 /* get usable queue range values from device tree */ knav_queue_probe()
1739 queue_pools = of_get_child_by_name(node, "queue-pools"); knav_queue_probe()
1741 dev_err(dev, "queue-pools not specified\n"); knav_queue_probe()
/linux-4.1.27/drivers/staging/rtl8723au/os_dep/
H A Dxmit_linux.c63 u16 queue; rtw_os_pkt_complete23a() local
65 queue = skb_get_queue_mapping(pkt); rtw_os_pkt_complete23a()
67 if (__netif_subqueue_stopped(padapter->pnetdev, queue) && rtw_os_pkt_complete23a()
68 (pxmitpriv->hwxmits[queue].accnt < WMM_XMIT_THRESHOLD)) rtw_os_pkt_complete23a()
69 netif_wake_subqueue(padapter->pnetdev, queue); rtw_os_pkt_complete23a()
71 if (__netif_subqueue_stopped(padapter->pnetdev, queue)) rtw_os_pkt_complete23a()
72 netif_wake_subqueue(padapter->pnetdev, queue); rtw_os_pkt_complete23a()
105 u16 queue; rtw_check_xmit_resource() local
107 queue = skb_get_queue_mapping(pkt); rtw_check_xmit_resource()
110 if (pxmitpriv->hwxmits[queue].accnt > WMM_XMIT_THRESHOLD) rtw_check_xmit_resource()
111 netif_stop_subqueue(padapter->pnetdev, queue); rtw_check_xmit_resource()
114 if (!netif_tx_queue_stopped(netdev_get_tx_queue(padapter->pnetdev, queue))) rtw_check_xmit_resource()
115 netif_stop_subqueue(padapter->pnetdev, queue); rtw_check_xmit_resource()
/linux-4.1.27/drivers/misc/vmw_vmci/
H A Dvmci_queue_pair.c70 * In more detail. When a VMCI queue pair is first created, it will be in the
79 * vmci_qp_broker_set_page_store to specify the UVAs of the queue pair at
84 * - VMCIQPB_CREATED_MEM: this state is the result when the queue pair
85 * is created by a VMX using the queue pair device backend that
86 * sets the UVAs of the queue pair immediately and stores the
90 * Once the queue pair is in one of the created states (with the exception of
92 * queue pair. Again we have two new states possible:
97 * - from VMCIQPB_CREATED_NO_MEM when a new-style VMX allocates a queue
98 * pair, and attaches to a queue pair previously created by the host side.
100 * - from VMCIQPB_CREATED_MEM when the host side attaches to a queue pair
106 * - VMCIQPB_ATTACHED_NO_MEM: If the queue pair already was in the
108 * bring the queue pair into this state. Once vmci_qp_broker_set_page_store
112 * From the attached queue pair, the queue pair can enter the shutdown states
113 * when either side of the queue pair detaches. If the guest side detaches
114 * first, the queue pair will enter the VMCIQPB_SHUTDOWN_NO_MEM state, where
115 * the content of the queue pair will no longer be available. If the host
116 * side detaches first, the queue pair will either enter the
123 * memory will no longer be available, and the queue pair will transition from
125 * in which case the queue pair will transition from the *_NO_MEM state at that
142 typedef int vmci_memcpy_to_queue_func(struct vmci_queue *queue,
146 const struct vmci_queue *queue,
151 struct mutex __mutex; /* Protects the queue. */
201 * In the queue pair broker, we always use the guest point of view for
202 * the produce and consume queue values and references, e.g., the
203 * produce queue size stored is the guests produce queue size. The
205 * the local queue pairs on the host, in which case the host endpoint
206 * that creates the queue pair will have the right orientation, and
234 void *local_mem; /* Kernel memory for local queue pair */
248 struct mutex mutex; /* Protect queue list. */
268 * Frees kernel VA space for a given queue and its queue header, and
273 struct vmci_queue *queue = q; qp_free_queue() local
275 if (queue) { qp_free_queue()
281 queue->kernel_if->u.g.vas[i], qp_free_queue()
282 queue->kernel_if->u.g.pas[i]); qp_free_queue()
285 vfree(queue); qp_free_queue()
290 * Allocates kernel queue pages of specified size with IOMMU mappings,
291 * plus space for the queue structure/kernel interface and the queue
297 struct vmci_queue *queue; qp_alloc_queue() local
300 size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if); qp_alloc_queue()
305 (sizeof(*queue->kernel_if->u.g.pas) + qp_alloc_queue()
306 sizeof(*queue->kernel_if->u.g.vas))) qp_alloc_queue()
309 pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas); qp_alloc_queue()
310 vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas); qp_alloc_queue()
313 queue = vmalloc(queue_size); qp_alloc_queue()
314 if (!queue) qp_alloc_queue()
317 queue->q_header = NULL; qp_alloc_queue()
318 queue->saved_header = NULL; qp_alloc_queue()
319 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1); qp_alloc_queue()
320 queue->kernel_if->mutex = NULL; qp_alloc_queue()
321 queue->kernel_if->num_pages = num_pages; qp_alloc_queue()
322 queue->kernel_if->u.g.pas = (dma_addr_t *)(queue->kernel_if + 1); qp_alloc_queue()
323 queue->kernel_if->u.g.vas = qp_alloc_queue()
324 (void **)((u8 *)queue->kernel_if->u.g.pas + pas_size); qp_alloc_queue()
325 queue->kernel_if->host = false; qp_alloc_queue()
328 queue->kernel_if->u.g.vas[i] = qp_alloc_queue()
330 &queue->kernel_if->u.g.pas[i], qp_alloc_queue()
332 if (!queue->kernel_if->u.g.vas[i]) { qp_alloc_queue()
334 qp_free_queue(queue, i * PAGE_SIZE); qp_alloc_queue()
340 queue->q_header = queue->kernel_if->u.g.vas[0]; qp_alloc_queue()
342 return queue; qp_alloc_queue()
347 * kmap()/kunmap() to dynamically map/unmap required portions of the queue
348 * by traversing the offset -> page translation structure for the queue.
349 * Assumes that offset + size does not wrap around in the queue.
351 static int __qp_memcpy_to_queue(struct vmci_queue *queue, __qp_memcpy_to_queue() argument
357 struct vmci_queue_kern_if *kernel_if = queue->kernel_if; __qp_memcpy_to_queue()
407 * kmap()/kunmap() to dynamically map/unmap required portions of the queue
408 * by traversing the offset -> page translation structure for the queue.
409 * Assumes that offset + size does not wrap around in the queue.
412 const struct vmci_queue *queue, __qp_memcpy_from_queue()
417 struct vmci_queue_kern_if *kernel_if = queue->kernel_if; __qp_memcpy_from_queue()
466 * Allocates two list of PPNs --- one for the pages in the produce queue,
467 * and the other for the pages in the consume queue. Intializes the list
469 * the queue headers).
541 * Frees the two list of PPNs for a queue pair.
555 * of the produce queue and the consume queue.
569 static int qp_memcpy_to_queue(struct vmci_queue *queue, qp_memcpy_to_queue() argument
573 return __qp_memcpy_to_queue(queue, queue_offset, qp_memcpy_to_queue()
579 const struct vmci_queue *queue, qp_memcpy_from_queue()
583 queue, queue_offset, size, false); qp_memcpy_from_queue()
589 static int qp_memcpy_to_queue_iov(struct vmci_queue *queue, qp_memcpy_to_queue_iov() argument
599 return __qp_memcpy_to_queue(queue, queue_offset, msg, size, true); qp_memcpy_to_queue_iov()
607 const struct vmci_queue *queue, qp_memcpy_from_queue_iov()
614 return __qp_memcpy_from_queue(dest, queue, queue_offset, size, true); qp_memcpy_from_queue_iov()
618 * Allocates kernel VA space of specified size plus space for the queue
619 * and kernel interface. This is different from the guest queue allocator,
620 * because we do not allocate our own queue header/data pages here but
625 struct vmci_queue *queue; qp_host_alloc_queue() local
628 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if)); qp_host_alloc_queue()
631 sizeof(*queue->kernel_if->u.h.page)) qp_host_alloc_queue()
634 queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page); qp_host_alloc_queue()
636 queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL); qp_host_alloc_queue()
637 if (queue) { qp_host_alloc_queue()
638 queue->q_header = NULL; qp_host_alloc_queue()
639 queue->saved_header = NULL; qp_host_alloc_queue()
640 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1); qp_host_alloc_queue()
641 queue->kernel_if->host = true; qp_host_alloc_queue()
642 queue->kernel_if->mutex = NULL; qp_host_alloc_queue()
643 queue->kernel_if->num_pages = num_pages; qp_host_alloc_queue()
644 queue->kernel_if->u.h.header_page = qp_host_alloc_queue()
645 (struct page **)((u8 *)queue + queue_size); qp_host_alloc_queue()
646 queue->kernel_if->u.h.page = qp_host_alloc_queue()
647 &queue->kernel_if->u.h.header_page[1]; qp_host_alloc_queue()
650 return queue; qp_host_alloc_queue()
654 * Frees kernel memory for a given queue (header plus translation
657 static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size) qp_host_free_queue() argument
659 kfree(queue); qp_host_free_queue()
665 * users of either queue. Of course, it's only any good if the mutexes
673 * Only the host queue has shared state - the guest queues do not qp_init_queue_mutex()
674 * need to synchronize access using a queue mutex. qp_init_queue_mutex()
697 * Acquire the mutex for the queue. Note that the produce_q and
701 static void qp_acquire_queue_mutex(struct vmci_queue *queue) qp_acquire_queue_mutex() argument
703 if (queue->kernel_if->host) qp_acquire_queue_mutex()
704 mutex_lock(queue->kernel_if->mutex); qp_acquire_queue_mutex()
708 * Release the mutex for the queue. Note that the produce_q and
712 static void qp_release_queue_mutex(struct vmci_queue *queue) qp_release_queue_mutex() argument
714 if (queue->kernel_if->host) qp_release_queue_mutex()
715 mutex_unlock(queue->kernel_if->mutex); qp_release_queue_mutex()
779 * Registers the specification of the user pages used for backing a queue
824 * queue, the queue pair headers can be mapped into the
867 * Unmaps previously mapped queue pair headers from the kernel.
936 * Dispatches a queue pair event message directly into the local event
937 * queue.
974 /* One page each for the queue headers. */ qp_guest_endpoint_create()
1105 * pages for the queue pair.
1173 * This functions handles the actual allocation of a VMCI queue
1174 * pair guest endpoint. Allocates physical pages for the queue
1215 pr_devel("Error mismatched queue pair in local attach\n"); qp_alloc_guest_work()
1240 pr_warn("Error allocating pages for produce queue\n"); qp_alloc_guest_work()
1247 pr_warn("Error allocating pages for consume queue\n"); qp_alloc_guest_work()
1270 * It's only necessary to notify the host if this queue pair will be qp_alloc_guest_work()
1278 * Enforce similar checks on local queue pairs as we qp_alloc_guest_work()
1317 * We should initialize the queue pair header pages on a local qp_alloc_guest_work()
1318 * queue pair create. For non-local queue pairs, the qp_alloc_guest_work()
1349 * The first endpoint issuing a queue pair allocation will create the state
1350 * of the queue pair in the queue pair broker.
1353 * with the queue pair as specified by the page_store. For compatibility with
1360 * since the host is not able to supply a page store for the queue pair.
1362 * For older VMX and host callers, the queue pair will be created in the
1399 * Creator's context ID for local queue pairs should match the qp_broker_create()
1411 * The queue pair broker entry stores values from the guest qp_broker_create()
1413 * produce and consume values -- unless it is a local queue qp_broker_create()
1472 * The VMX already initialized the queue pair headers, so no qp_broker_create()
1487 * queue pair create (in which case we will expect a qp_broker_create()
1531 * the given queue pair handle about attach/detach event by the
1574 * The second endpoint issuing a queue pair allocation will attach to
1575 * the queue pair registered with the queue pair broker.
1578 * range with the queue pair as specified by the page_store. At this
1579 * point, the already attach host endpoint may start using the queue
1590 * For new VMX and host callers, the queue pair will be moved to the
1654 * and a host created this queue pair. qp_broker_attach()
1665 * Do not attach a host to a user created queue pair if that qp_broker_attach()
1666 * user doesn't support host queue pair end points. qp_broker_attach()
1682 * The queue pair broker entry stores values from the guest qp_broker_attach()
1698 * If a guest attached to a queue pair, it will supply qp_broker_attach()
1706 * must the initially created queue pair not have any qp_broker_attach()
1717 * initialized the queue pair headers, so no qp_broker_attach()
1733 * The host side is attempting to attach to a queue qp_broker_attach()
1741 /* The host side has successfully attached to a queue pair. */ qp_broker_attach()
1750 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n", qp_broker_attach()
1763 * When attaching to local queue pairs, the context already has qp_broker_attach()
1764 * an entry tracking the queue pair, so don't add another one. qp_broker_attach()
1776 * queue_pair_Alloc for use when setting up queue pair endpoints
1811 * are not allowed to create local queue pairs. qp_broker_alloc()
1817 pr_devel("Context (ID=0x%x) already attached to queue pair (handle=0x%x:0x%x)\n", qp_broker_alloc()
1850 * This function implements the kernel API for allocating a queue
1885 * If this is a local queue pair, the attacher qp_alloc_host_work()
1900 pr_devel("queue pair broker failed to alloc (result=%d)\n", qp_alloc_host_work()
1943 * a queue pair.
2007 * Requests that a queue pair be allocated with the VMCI queue
2008 * pair broker. Allocates a queue pair entry if one does not
2010 * files backing that queue_pair. Assumes that the queue pair
2029 * step to add the UVAs of the VMX mapping of the queue pair. This function
2031 * registering the page store for a queue pair previously allocated by the
2032 * VMX during create or attach. This function will move the queue pair state
2035 * attached state with memory, the queue pair is ready to be used by the
2038 * Assumes that the queue pair broker lock is held.
2057 * We only support guest to host queue pairs, so the VMX must vmci_qp_broker_set_page_store()
2067 pr_warn("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", vmci_qp_broker_set_page_store()
2121 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n", vmci_qp_broker_set_page_store()
2134 * Resets saved queue headers for the given QP broker
2145 * The main entry point for detaching from a queue pair registered with the
2146 * queue pair broker. If more than one endpoint is attached to the queue
2149 * the queue pair state registered with the broker.
2152 * memory backing the queue pair. If the host is still attached, it will
2153 * no longer be able to access the queue pair content.
2155 * If the queue pair is already in a state where there is no memory
2156 * registered for the queue pair (any *_NO_MEM state), it will transition to
2159 * the first out of two to detach, the queue pair will move to the
2178 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", vmci_qp_broker_detach()
2186 pr_devel("Context (ID=0x%x) reports being attached to queue pair(handle=0x%x:0x%x) that isn't present in broker\n", vmci_qp_broker_detach()
2212 * Pre NOVMVM vmx'en may detach from a queue pair vmci_qp_broker_detach()
2215 * recent VMX'en may detach from a queue pair in the vmci_qp_broker_detach()
2228 pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n", vmci_qp_broker_detach()
2293 * Establishes the necessary mappings for a queue pair given a
2294 * reference to the queue pair guest memory. This is usually
2314 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", vmci_qp_broker_map()
2322 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n", vmci_qp_broker_map()
2365 * Saves a snapshot of the queue headers for the given QP broker
2404 * Removes all references to the guest memory of a given queue pair, and
2405 * will move the queue pair from state *_MEM to *_NO_MEM. It is usually
2425 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n", vmci_qp_broker_unmap()
2433 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n", vmci_qp_broker_unmap()
2450 pr_warn("Failed to save queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n", vmci_qp_broker_unmap()
2456 * On hosted, when we unmap queue pairs, the VMX will also vmci_qp_broker_unmap()
2458 * registered memory. If the queue pair is mapped again at a vmci_qp_broker_unmap()
2481 * Destroys all guest queue pair endpoints. If active guest queue
2483 * queue pairs will be made. Any failure to detach is silently
2511 * Helper routine that will lock the queue pair before subsequent
2524 * Helper routine that unlocks the queue pair after calling
2533 * The queue headers may not be mapped at all times. If a queue is
2555 * headers of a given queue pair. If the guest memory of the
2556 * queue pair is currently not available, the saved queue headers
2580 * Callback from VMCI queue pair broker indicating that a queue
2600 * Makes the calling thread wait for the queue pair to become
2602 * woken up after queue pair state change, false otherwise.
2618 * Enqueues a given buffer to the produce queue using the provided
2619 * function. As many bytes as possible (space available in the queue)
2620 * are enqueued. Assumes the queue->mutex has been acquired. Returns
2622 * data, VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the
2623 * queue (as defined by the queue size), VMCI_ERROR_INVALID_ARGS, if
2625 * VMCI_ERROR_QUEUEPAIR_NOTATTACHED, if the queue pair pages aren't
2626 * available. Otherwise, the number of bytes written to the queue is
2627 * returned. Updates the tail pointer of the produce queue.
2678 * Dequeues data (if available) from the given consume queue. Writes data
2680 * Assumes the queue->mutex has been acquired.
2683 * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue
2684 * (as defined by the queue size).
2688 * Updates the head pointer of the consume queue.
2743 * vmci_qpair_alloc() - Allocates a queue pair.
2746 * @produce_qsize: Desired size of the producer queue.
2747 * @consume_qsize: Desired size of the consumer queue.
2754 * queue. If an error occurs allocating the memory for the
2855 * vmci_qpair_detach() - Detatches the client from a queue pair.
2895 * @qpair: Pointer to the queue pair struct.
2932 * @qpair: Pointer to the queue pair struct.
2968 * vmci_qpair_produce_free_space() - Retrieves free space in producer queue.
2969 * @qpair: Pointer to the queue pair struct.
3002 * vmci_qpair_consume_free_space() - Retrieves free space in consumer queue.
3003 * @qpair: Pointer to the queue pair struct.
3037 * producer queue.
3038 * @qpair: Pointer to the queue pair struct.
3072 * consumer queue.
3073 * @qpair: Pointer to the queue pair struct.
3106 * vmci_qpair_enqueue() - Throw data on the queue.
3107 * @qpair: Pointer to the queue pair struct.
3112 * This is the client interface for enqueueing data into the queue.
3147 * vmci_qpair_dequeue() - Get data from the queue.
3148 * @qpair: Pointer to the queue pair struct.
3153 * This is the client interface for dequeueing data from the queue.
3188 * vmci_qpair_peek() - Peek at the data in the queue.
3189 * @qpair: Pointer to the queue pair struct.
3194 * This is the client interface for peeking into a queue. (I.e.,
3195 * copy data from the queue without updating the head pointer.)
3230 * vmci_qpair_enquev() - Throw data on the queue using iov.
3231 * @qpair: Pointer to the queue pair struct.
3236 * This is the client interface for enqueueing data into the queue.
3272 * vmci_qpair_dequev() - Get data from the queue using iov.
3273 * @qpair: Pointer to the queue pair struct.
3278 * This is the client interface for dequeueing data from the queue.
3315 * vmci_qpair_peekv() - Peek at the data in the queue using iov.
3316 * @qpair: Pointer to the queue pair struct.
3321 * This is the client interface for peeking into a queue. (I.e.,
3322 * copy data from the queue without updating the head pointer.)
411 __qp_memcpy_from_queue(void *dest, const struct vmci_queue *queue, u64 queue_offset, size_t size, bool is_iovec) __qp_memcpy_from_queue() argument
577 qp_memcpy_from_queue(void *dest, size_t dest_offset, const struct vmci_queue *queue, u64 queue_offset, size_t size) qp_memcpy_from_queue() argument
605 qp_memcpy_from_queue_iov(void *dest, size_t dest_offset, const struct vmci_queue *queue, u64 queue_offset, size_t size) qp_memcpy_from_queue_iov() argument
H A Dvmci_queue_pair.h43 u64 ppn_va; /* Start VA of queue pair PPNs. */
52 u64 va; /* Start VA of queue pair PPNs. */
77 * These UVA's are of the mmap()'d queue contents backing files.
106 * struct vmci_qp_page_store describes how the memory of a given queue pair
107 * is backed. When the queue pair is between the host and a guest, the
110 * queue pair is mapped into the VMX address space.
113 /* Reference to pages backing the queue pair. */
120 * This data type contains the information about a queue.
121 * There are two queues (hence, queue pairs) per transaction model between a
122 * pair of end points, A & B. One queue is used by end point A to transmit
123 * commands and responses to B. The other queue is used by B to transmit
/linux-4.1.27/block/
H A Dblk-mq-cpumap.c2 * CPU <-> hardware queue mapping helpers
36 unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling; blk_mq_update_queue_map() local
52 queue = 0; for_each_possible_cpu()
65 map[i] = cpu_to_queue_index(nr_cpus, nr_queues, queue); for_each_possible_cpu()
66 queue++; for_each_possible_cpu()
73 * queue. for_each_possible_cpu()
78 queue); for_each_possible_cpu()
79 queue++; for_each_possible_cpu()
107 * queue init time, so runtime isn't important.
H A Dblk-exec.c2 * Functions related to setting various queue properties from drivers
37 * blk_execute_rq_nowait - insert a request into queue for execution
38 * @q: queue to insert the request in
41 * @at_head: insert request at head or tail of queue
45 * Insert a fully prepared request at the back of the I/O scheduler queue
49 * This function will invoke @done directly if the queue is dead.
91 /* the queue is stopped so it won't be run */ blk_execute_rq_nowait()
99 * blk_execute_rq - insert a request into queue for execution
100 * @q: queue to insert the request in
103 * @at_head: insert request at head or tail of queue
106 * Insert a fully prepared request at the back of the I/O scheduler queue
H A Dnoop-iosched.c12 struct list_head queue; member in struct:noop_data
25 if (!list_empty(&nd->queue)) { noop_dispatch()
27 rq = list_entry(nd->queue.next, struct request, queuelist); noop_dispatch()
39 list_add_tail(&rq->queuelist, &nd->queue); noop_add_request()
47 if (rq->queuelist.prev == &nd->queue) noop_former_request()
57 if (rq->queuelist.next == &nd->queue) noop_latter_request()
78 INIT_LIST_HEAD(&nd->queue); noop_init_queue()
90 BUG_ON(!list_empty(&nd->queue)); noop_exit_queue()
H A Dblk-tag.c13 * blk_queue_find_tag - find a request by its tag and queue
14 * @q: The request queue for the device
55 * @q: the request queue for the device
76 * @q: the request queue for the device
80 * queue in function.
144 * @depth: the maximum queue depth supported
154 * blk_queue_init_tags - initialize the queue tag info
155 * @q: the request queue for the device
156 * @depth: the maximum queue depth supported
197 * @q: the request queue for the device
201 * Must be called with the queue lock held.
253 * @q: the request queue for the device
263 * queue lock must be held.
297 * @q: the request queue for the device
302 * assigned as the queue &prep_rq_fn (in which case &struct request
307 * the request queue, so it's the drivers responsibility to readd
311 * queue lock must be held.
385 * @q: the request queue for the device
389 * In this case, we will safely clear the block side of the tag queue and
390 * readd all requests to the request queue in the right order.
393 * queue lock must be held.
H A Dblk-settings.c2 * Functions related to setting various queue properties from drivers
23 * blk_queue_prep_rq - set a prepare_request function for queue
24 * @q: queue
27 * It's possible for a queue to register a prepare_request callback which
40 * blk_queue_unprep_rq - set an unprepare_request function for queue
41 * @q: queue
44 * It's possible for a queue to register an unprepare_request callback
57 * blk_queue_merge_bvec - set a merge_bvec function for queue
58 * @q: queue
63 * are dynamic, and thus we have to query the queue whether it is ok to
69 * no merge_bvec_fn is defined for a queue, and only the fixed limits are
156 * @q: the request queue for the device to be affected
162 * queue, and then to allow the device driver to select requests
163 * off that queue when it is ready. This works well for many block
166 * request queue, and are served best by having the requests passed
198 * blk_queue_bounce_limit - set bounce buffer limit for queue
199 * @q: the request queue for the device
238 * @limits: the queue limits
249 * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
265 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
266 * @q: the request queue for the device
279 * blk_queue_chunk_sectors - set size of the chunk for this queue
280 * @q: the request queue for the device
300 * @q: the request queue for the device
312 * @q: the request queue for the device
323 * blk_queue_max_segments - set max hw segments for a request for this queue
324 * @q: the request queue for the device
345 * @q: the request queue for the device
365 * blk_queue_logical_block_size - set logical block size for the queue
366 * @q: the request queue for the device
387 * blk_queue_physical_block_size - set physical block size for the queue
388 * @q: the request queue for the device
410 * @q: the request queue for the device
429 * @limits: the queue limits
451 * blk_queue_io_min - set minimum request size for the queue
452 * @q: the request queue for the device
472 * @limits: the queue limits
490 * blk_queue_io_opt - set optimal request size for the queue
491 * @q: the request queue for the device
509 * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
522 * @b: the underlying queue limits (bottom, component device)
655 * bdev_stack_limits - adjust queue limits for stacked drivers
661 * Merges queue limits for a top device and a block_device. Returns
677 * disk_stack_limits - adjust queue limits for stacked drivers
689 struct request_queue *t = disk->queue; disk_stack_limits()
705 * @q: the request queue for the device
721 * @q: the request queue for the device
738 * @q: the request queue for the device
749 * does is adjust the queue so that the buf is always appended
775 * @q: the request queue for the device
792 * @q: the request queue for the device
797 * this is used when building direct io requests for the queue.
808 * @q: the request queue for the device
814 * the current queue alignment is updated to the new value, otherwise it
830 * blk_queue_flush - configure queue's cache flush capability
831 * @q: the request queue for the device
H A Dblk-core.c57 * For queue allocation
82 * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
85 * Locates the passed device's request queue and returns the address of its
187 * blk_start_queue - restart a previously stopped queue
191 * blk_start_queue() will clear the stop flag on the queue, and call
192 * the request_fn for the queue if it was in a stopped state when
205 * blk_stop_queue - stop a queue
210 * entries on the request queue when the request_fn strategy is called.
211 * Often this will not happen, because of hardware limitations (queue
212 * depth settings). If a device driver gets a 'queue full' response,
213 * or if it simply chooses not to queue more I/O at one point, it can
216 * blk_start_queue() to restart queue operations. Queue lock must be held.
226 * blk_sync_queue - cancel any pending callbacks on a queue
227 * @q: the queue
231 * on a queue, such as calling the unplug function after a timeout.
240 * and blkcg_exit_queue() to be called with queue lock initialized.
262 * __blk_run_queue_uncond - run a queue whether or not it has been stopped
263 * @q: The queue to run
266 * Invoke request handling on a queue if there are any pending requests.
268 * This variant runs the queue whether or not the queue has been
269 * stopped. Must be called with the queue lock held and interrupts
279 * the queue lock internally. As a result multiple threads may be __blk_run_queue_uncond()
290 * __blk_run_queue - run a single device queue
291 * @q: The queue to run
294 * See @blk_run_queue. This variant must be called with the queue lock
307 * blk_run_queue_async - run a single device queue in workqueue context
308 * @q: The queue to run
312 * of us. The caller must hold the queue lock.
322 * blk_run_queue - run a single device queue
323 * @q: The queue to run
326 * Invoke request handling on this queue, if it has pending work to do.
347 * @q: queue to drain
375 * This function might be called on a queue which failed
376 * driver init after queue creation or is not yet fully
378 * in such cases. Kick queue iff dispatch queue has
414 * With queue marked dead, any woken up waiter will fail the
428 * blk_queue_bypass_start - enter queue bypass mode
429 * @q: queue of interest
431 * In bypass mode, only the dispatch FIFO queue of @q is used. This
435 * inside queue or RCU read lock.
446 * complete. This avoids lenghty delays during queue init which blk_queue_bypass_start()
461 * blk_queue_bypass_end - leave queue bypass mode
462 * @q: queue of interest
496 * blk_cleanup_queue - shutdown a request queue
497 * @q: request queue to shutdown
512 * A dying queue is permanently in bypass mode till released. Note blk_cleanup_queue()
517 * called only after the queue refcnt drops to zero and nothing, blk_cleanup_queue()
518 * RCU or not, would be traversing the queue by then. blk_cleanup_queue()
562 /* Allocate memory local to the request queue */ alloc_request_struct()
656 * A queue starts its life with bypass turned on to avoid blk_alloc_queue_node()
658 * init. The initial bypass will be finished when the queue is blk_alloc_queue_node()
682 * blk_init_queue - prepare a request queue for use with a block device
684 * placed on the queue.
685 * @lock: Request queue spin lock
691 * are requests on the queue that need to be processed. If the device
693 * are available on the queue, but may be called at some time later instead.
695 * of the requests on the queue is needed, or due to memory pressure.
698 * queue, but only as many as it can handle at a time. If it does leave
699 * requests on the queue, it is responsible for arranging that the requests
702 * The queue spin lock must be held while manipulating the requests on the
703 * request queue; this lock will be taken also from interrupt context, so irq
706 * Function returns a pointer to the initialized request queue, or %NULL if
758 /* Override internal queue lock with supplied lock pointer */ blk_init_allocated_queue()
995 * The queue will fill after this allocation, so set __get_request()
1007 * The queue is full and the allocating __get_request()
1085 * OK, if the queue is under the request limit then requests need __get_request()
1119 * queue, but this is pretty rare. __get_request()
1222 * @q: target request queue
1293 * blk_requeue_request - put a request back on queue
1294 * @q: request queue where request should be inserted
1300 * on the queue. Must be called with queue lock held.
1346 * The average IO queue length and utilisation statistics are maintained
1347 * by observing the current state of the queue length and the amount of
1354 * /proc/diskstats. This accounts immediately for all queue usage up to
1378 * queue lock must be held blk_pm_put_request()
1522 * reliable access to the elevator outside queue lock. Only check basic
1648 * Returns with the queue unlocked. blk_queue_bio()
2004 * blk_rq_check_limits - Helper function to check a request for the queue limit
2005 * @q: the queue
2017 * Request stacking drivers like request-based dm may change the queue
2018 * limits while requests are in the queue (e.g. dm's table swapping).
2020 * the new queue limits again when they dispatch those requests,
2021 * although such checkings are also done against the old queue limits
2035 * queue's settings related to segment counting like q->bounce_pfn blk_rq_check_limits()
2037 * Recalculate it to check the request correctly on this queue's blk_rq_check_limits()
2052 * @q: the queue to submit the request
2184 * Don't process normal requests when queue is suspended
2241 * blk_peek_request - peek at the top of a request queue
2242 * @q: request queue to peek at
2397 * blk_fetch_request - fetch a request from a request queue
2398 * @q: request queue to fetch a request from
2584 * that were allocated to the request in the prep_rq_fn. The queue
2598 * queue lock must be held
2662 * __blk_end_bidi_request - Complete a bidi request with queue lock held
2669 * Identical to blk_end_bidi_request() except that queue lock is
2772 * Must be called with queue lock held unlike blk_end_request().
2790 * Completely finish @rq. Must be called with queue lock held.
2812 * be called with queue lock held.
2831 * with queue lock held.
2881 * @q : the queue of the device being checked
2890 * devices are busy. This behavior helps more I/O merging on the queue
3150 * queue lock we have to take. blk_flush_plug_list()
3159 * This drops the queue lock blk_flush_plug_list()
3188 * This drops the queue lock blk_flush_plug_list()
3208 * @q: the queue of the device
3209 * @dev: the device the queue belongs to
3215 * request queue @q has been allocated, and runtime PM for it can not happen
3238 * @q: the queue of the device
3242 * by examining if there are any requests pending in the queue. If there
3244 * the queue's status will be updated to SUSPENDING and the driver can
3275 * @q: the queue of the device
3279 * Update the queue's runtime status according to the return value of the
3301 * @q: the queue of the device
3304 * Update the queue's runtime status to RESUMING in preparation for the
3320 * @q: the queue of the device
3324 * Update the queue's runtime status according to the return value of the
3326 * the requests that are queued into the device's queue when it is resuming
/linux-4.1.27/arch/mips/cavium-octeon/executive/
H A Dcvmx-pko.c162 int queue; cvmx_pko_shutdown() local
166 for (queue = 0; queue < CVMX_PKO_MAX_OUTPUT_QUEUES; queue++) { cvmx_pko_shutdown()
171 config.s.queue = queue & 0x7f; cvmx_pko_shutdown()
177 config1.s.qid7 = queue >> 7; cvmx_pko_shutdown()
181 cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_PKO(queue)); cvmx_pko_shutdown()
191 * @base_queue: First queue number to associate with this port.
193 * @priority: Array of priority levels for each queue. Values are
209 uint64_t queue; cvmx_pko_config_port() local
224 ("ERROR: cvmx_pko_config_port: Invalid queue range %llu\n", cvmx_pko_config_port()
231 * Validate the static queue priority setup and set cvmx_pko_config_port()
235 for (queue = 0; queue < num_queues; queue++) { cvmx_pko_config_port()
236 /* Find first queue of static priority */ cvmx_pko_config_port()
238 && priority[queue] == cvmx_pko_config_port()
240 static_priority_base = queue; cvmx_pko_config_port()
241 /* Find last queue of static priority */ cvmx_pko_config_port()
244 && priority[queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY cvmx_pko_config_port()
245 && queue) cvmx_pko_config_port()
246 static_priority_end = queue - 1; cvmx_pko_config_port()
249 && queue == num_queues - 1) cvmx_pko_config_port()
251 static_priority_end = queue; cvmx_pko_config_port()
256 * queue 0. cvmx_pko_config_port()
259 && (int)queue > static_priority_end cvmx_pko_config_port()
260 && priority[queue] == cvmx_pko_config_port()
265 "base queue. q: %d, eq: %d\n", cvmx_pko_config_port()
266 (int)queue, static_priority_end); cvmx_pko_config_port()
273 "queue. sq: %d\n", cvmx_pko_config_port()
278 cvmx_dprintf("Port %d: Static priority queue base: %d, " cvmx_pko_config_port()
285 * are either both -1, or are valid start/end queue cvmx_pko_config_port()
297 for (queue = 0; queue < num_queues; queue++) { cvmx_pko_config_port()
301 config1.s.idx3 = queue >> 3; cvmx_pko_config_port()
302 config1.s.qid7 = (base_queue + queue) >> 7; cvmx_pko_config_port()
305 config.s.tail = queue == (num_queues - 1); cvmx_pko_config_port()
306 config.s.index = queue; cvmx_pko_config_port()
308 config.s.queue = base_queue + queue; cvmx_pko_config_port()
312 config.s.static_q = (int)queue <= static_priority_end; cvmx_pko_config_port()
313 config.s.s_tail = (int)queue == static_priority_end; cvmx_pko_config_port()
320 switch ((int)priority[queue]) { cvmx_pko_config_port()
357 (unsigned long long)priority[queue]); cvmx_pko_config_port()
366 (base_queue + queue), cvmx_pko_config_port()
388 ("ERROR: cvmx_pko_config_port: Command queue initialization failed.\n"); cvmx_pko_config_port()
396 (base_queue + queue)); cvmx_pko_config_port()
H A Dcvmx-cmd-queue.c39 #include <asm/octeon/cvmx-cmd-queue.h>
46 * This application uses this pointer to access the global queue
53 * Initialize the Global queue state pointer.
101 * Initialize a command queue for use. The initial FPA buffer is
103 * new command queue.
105 * @queue_id: Hardware command queue to initialize.
140 /* See if someone else has already initialized the queue */ cvmx_cmd_queue_initialize()
199 * Shutdown a queue a free it's command buffers to the FPA. The
200 * hardware connected to the queue must be stopped before this
212 "get queue information.\n"); cvmx_cmd_queue_shutdown()
235 * Return the number of command words pending in the queue. This
238 * @queue_id: Hardware command queue to query
258 * called with the queue lock, so that is a SLIGHT cvmx_cmd_queue_length()
296 * @queue_id: Command queue to query
H A Dcvmx-helper-util.c86 * @work: Work queue entry containing the packet to dump
171 * Setup Random Early Drop on a specific input queue
173 * @queue: Input queue to setup RED on (0-7)
182 int cvmx_helper_setup_red_queue(int queue, int pass_thresh, int drop_thresh) cvmx_helper_setup_red_queue() argument
193 cvmx_write_csr(CVMX_IPD_QOSX_RED_MARKS(queue), red_marks.u64); cvmx_helper_setup_red_queue()
195 /* Use the actual queue 0 counter, not the average */ cvmx_helper_setup_red_queue()
202 cvmx_write_csr(CVMX_IPD_RED_QUEX_PARAM(queue), red_param.u64); cvmx_helper_setup_red_queue()
222 int queue; cvmx_helper_setup_red() local
237 for (queue = 0; queue < 8; queue++) cvmx_helper_setup_red()
238 cvmx_helper_setup_red_queue(queue, pass_thresh, drop_thresh); cvmx_helper_setup_red()
/linux-4.1.27/arch/powerpc/include/uapi/asm/
H A Dmsgbuf.h24 unsigned long msg_cbytes; /* current number of bytes on queue */
25 unsigned long msg_qnum; /* number of messages in queue */
26 unsigned long msg_qbytes; /* max number of bytes on queue */
/linux-4.1.27/arch/ia64/include/uapi/asm/
H A Dmsgbuf.h18 unsigned long msg_cbytes; /* current number of bytes on queue */
19 unsigned long msg_qnum; /* number of messages in queue */
20 unsigned long msg_qbytes; /* max number of bytes on queue */
/linux-4.1.27/arch/alpha/include/uapi/asm/
H A Dmsgbuf.h18 unsigned long msg_cbytes; /* current number of bytes on queue */
19 unsigned long msg_qnum; /* number of messages in queue */
20 unsigned long msg_qbytes; /* max number of bytes on queue */
/linux-4.1.27/drivers/isdn/i4l/
H A Disdn_net.h85 lp = nd->queue; /* get lp on top of queue */ isdn_net_get_locked_lp()
86 while (isdn_net_lp_busy(nd->queue)) { isdn_net_get_locked_lp()
87 nd->queue = nd->queue->next; isdn_net_get_locked_lp()
88 if (nd->queue == lp) { /* not found -- should never happen */ isdn_net_get_locked_lp()
93 lp = nd->queue; isdn_net_get_locked_lp()
94 nd->queue = nd->queue->next; isdn_net_get_locked_lp()
114 lp = nd->queue; isdn_net_add_to_bundle()
121 nd->queue = nlp; isdn_net_add_to_bundle()
137 // __func__, lp->name, lp, master_lp->name, master_lp, lp->last, lp->next, master_lp->netdev->queue); isdn_net_rm_from_bundle()
141 if (master_lp->netdev->queue == lp) { isdn_net_rm_from_bundle()
142 master_lp->netdev->queue = lp->next; isdn_net_rm_from_bundle()
143 if (lp->next == lp) { /* last in queue */ isdn_net_rm_from_bundle()
144 master_lp->netdev->queue = master_lp->netdev->local; isdn_net_rm_from_bundle()
149 // __func__, master_lp->netdev->queue); isdn_net_rm_from_bundle()
/linux-4.1.27/net/netfilter/
H A Dnfnetlink_queue_core.c65 u_int16_t queue_num; /* number of this queue */
75 struct list_head queue_list; /* packets in queue */
158 static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
188 __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) __enqueue_entry() argument
190 list_add_tail(&entry->list, &queue->queue_list); __enqueue_entry()
191 queue->queue_total++; __enqueue_entry()
195 __dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) __dequeue_entry() argument
198 queue->queue_total--; __dequeue_entry()
202 find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id) find_dequeue_entry() argument
206 spin_lock_bh(&queue->lock); find_dequeue_entry()
208 list_for_each_entry(i, &queue->queue_list, list) { find_dequeue_entry()
216 __dequeue_entry(queue, entry); find_dequeue_entry()
218 spin_unlock_bh(&queue->lock); find_dequeue_entry()
224 nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data) nfqnl_flush() argument
228 spin_lock_bh(&queue->lock); nfqnl_flush()
229 list_for_each_entry_safe(entry, next, &queue->queue_list, list) { nfqnl_flush()
232 queue->queue_total--; nfqnl_flush()
236 spin_unlock_bh(&queue->lock); nfqnl_flush()
282 nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue, nfqnl_build_packet_message() argument
325 switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) { nfqnl_build_packet_message()
331 if (!(queue->flags & NFQA_CFG_F_GSO) && nfqnl_build_packet_message()
336 data_len = ACCESS_ONCE(queue->copy_range); nfqnl_build_packet_message()
347 if (queue->flags & NFQA_CFG_F_CONNTRACK) nfqnl_build_packet_message()
350 if (queue->flags & NFQA_CFG_F_UID_GID) { nfqnl_build_packet_message()
355 skb = nfnetlink_alloc_skb(net, size, queue->peer_portid, nfqnl_build_packet_message()
373 nfmsg->res_id = htons(queue->queue_num); nfqnl_build_packet_message()
478 if ((queue->flags & NFQA_CFG_F_UID_GID) && entskb->sk && nfqnl_build_packet_message()
517 __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue, __nfqnl_enqueue_packet() argument
525 nskb = nfqnl_build_packet_message(net, queue, entry, &packet_id_ptr); __nfqnl_enqueue_packet()
530 spin_lock_bh(&queue->lock); __nfqnl_enqueue_packet()
532 if (queue->queue_total >= queue->queue_maxlen) { __nfqnl_enqueue_packet()
533 if (queue->flags & NFQA_CFG_F_FAIL_OPEN) { __nfqnl_enqueue_packet()
537 queue->queue_dropped++; __nfqnl_enqueue_packet()
539 queue->queue_total); __nfqnl_enqueue_packet()
543 entry->id = ++queue->id_sequence; __nfqnl_enqueue_packet()
547 err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT); __nfqnl_enqueue_packet()
549 queue->queue_user_dropped++; __nfqnl_enqueue_packet()
553 __enqueue_entry(queue, entry); __nfqnl_enqueue_packet()
555 spin_unlock_bh(&queue->lock); __nfqnl_enqueue_packet()
561 spin_unlock_bh(&queue->lock); __nfqnl_enqueue_packet()
608 __nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue, __nfqnl_enqueue_packet_gso() argument
619 ret = __nfqnl_enqueue_packet(net, queue, entry); __nfqnl_enqueue_packet_gso()
630 ret = __nfqnl_enqueue_packet(net, queue, entry_seg); __nfqnl_enqueue_packet_gso()
641 struct nfqnl_instance *queue; nfqnl_enqueue_packet() local
649 queue = instance_lookup(q, queuenum); nfqnl_enqueue_packet()
650 if (!queue) nfqnl_enqueue_packet()
653 if (queue->copy_mode == NFQNL_COPY_NONE) nfqnl_enqueue_packet()
667 if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(skb)) nfqnl_enqueue_packet()
668 return __nfqnl_enqueue_packet(net, queue, entry); nfqnl_enqueue_packet()
683 err = __nfqnl_enqueue_packet_gso(net, queue, nfqnl_enqueue_packet()
735 nfqnl_set_mode(struct nfqnl_instance *queue, nfqnl_set_mode() argument
740 spin_lock_bh(&queue->lock); nfqnl_set_mode()
744 queue->copy_mode = mode; nfqnl_set_mode()
745 queue->copy_range = 0; nfqnl_set_mode()
749 queue->copy_mode = mode; nfqnl_set_mode()
751 queue->copy_range = NFQNL_MAX_COPY_RANGE; nfqnl_set_mode()
753 queue->copy_range = range; nfqnl_set_mode()
760 spin_unlock_bh(&queue->lock); nfqnl_set_mode()
788 /* drop all packets with either indev or outdev == ifindex from all queue
895 struct nfqnl_instance *queue; verdict_instance_lookup() local
897 queue = instance_lookup(q, queue_num); verdict_instance_lookup()
898 if (!queue) verdict_instance_lookup()
901 if (queue->peer_portid != nlportid) verdict_instance_lookup()
904 return queue; verdict_instance_lookup()
937 struct nfqnl_instance *queue; nfqnl_recv_verdict_batch() local
944 queue = verdict_instance_lookup(q, queue_num, nfqnl_recv_verdict_batch()
946 if (IS_ERR(queue)) nfqnl_recv_verdict_batch()
947 return PTR_ERR(queue); nfqnl_recv_verdict_batch()
956 spin_lock_bh(&queue->lock); nfqnl_recv_verdict_batch()
958 list_for_each_entry_safe(entry, tmp, &queue->queue_list, list) { nfqnl_recv_verdict_batch()
961 __dequeue_entry(queue, entry); nfqnl_recv_verdict_batch()
965 spin_unlock_bh(&queue->lock); nfqnl_recv_verdict_batch()
987 struct nfqnl_instance *queue; nfqnl_recv_verdict() local
996 queue = instance_lookup(q, queue_num); nfqnl_recv_verdict()
997 if (!queue) nfqnl_recv_verdict()
998 queue = verdict_instance_lookup(q, queue_num, nfqnl_recv_verdict()
1000 if (IS_ERR(queue)) nfqnl_recv_verdict()
1001 return PTR_ERR(queue); nfqnl_recv_verdict()
1009 entry = find_dequeue_entry(queue, ntohl(vhdr->id)); nfqnl_recv_verdict()
1066 struct nfqnl_instance *queue; nfqnl_recv_config() local
1075 /* Obsolete commands without queue context */ nfqnl_recv_config()
1083 queue = instance_lookup(q, queue_num); nfqnl_recv_config()
1084 if (queue && queue->peer_portid != NETLINK_CB(skb).portid) { nfqnl_recv_config()
1092 if (queue) { nfqnl_recv_config()
1096 queue = instance_create(q, queue_num, nfqnl_recv_config()
1098 if (IS_ERR(queue)) { nfqnl_recv_config()
1099 ret = PTR_ERR(queue); nfqnl_recv_config()
1104 if (!queue) { nfqnl_recv_config()
1108 instance_destroy(q, queue); nfqnl_recv_config()
1122 if (!queue) { nfqnl_recv_config()
1127 nfqnl_set_mode(queue, params->copy_mode, nfqnl_recv_config()
1134 if (!queue) { nfqnl_recv_config()
1139 spin_lock_bh(&queue->lock); nfqnl_recv_config()
1140 queue->queue_maxlen = ntohl(*queue_maxlen); nfqnl_recv_config()
1141 spin_unlock_bh(&queue->lock); nfqnl_recv_config()
1147 if (!queue) { nfqnl_recv_config()
1168 spin_lock_bh(&queue->lock); nfqnl_recv_config()
1169 queue->flags &= ~mask; nfqnl_recv_config()
1170 queue->flags |= flags & mask; nfqnl_recv_config()
1171 spin_unlock_bh(&queue->lock); nfqnl_recv_config()
1378 MODULE_DESCRIPTION("netfilter packet queue handler");
H A Dxt_NFQUEUE.c1 /* iptables module for using new netfilter netlink queue
42 u32 queue = info->queuenum; nfqueue_tg_v1() local
45 queue = nfqueue_hash(skb, queue, info->queues_total, nfqueue_tg_v1()
48 return NF_QUEUE_NR(queue); nfqueue_tg_v1()
91 u32 queue = info->queuenum; nfqueue_tg_v3() local
98 queue = info->queuenum + cpu % info->queues_total; nfqueue_tg_v3()
100 queue = nfqueue_hash(skb, queue, info->queues_total, nfqueue_tg_v3()
105 ret = NF_QUEUE_NR(queue); nfqueue_tg_v3()
H A Dnft_queue.c35 u32 queue = priv->queuenum; nft_queue_eval() local
42 queue = priv->queuenum + cpu % priv->queues_total; nft_queue_eval()
44 queue = nfqueue_hash(pkt->skb, queue, nft_queue_eval()
50 ret = NF_QUEUE_NR(queue); nft_queue_eval()
110 .name = "queue",
132 MODULE_ALIAS_NFT_EXPR("queue");
/linux-4.1.27/include/asm-generic/
H A Dqrwlock_types.h8 * The queue read/write lock data structure
H A Dqrwlock.h44 * @lock: Pointer to queue rwlock structure
53 * @lock: Pointer to queue rwlock structure
61 * queue_read_trylock - try to acquire read lock of a queue rwlock
62 * @lock : Pointer to queue rwlock structure
80 * queue_write_trylock - try to acquire write lock of a queue rwlock
81 * @lock : Pointer to queue rwlock structure
96 * queue_read_lock - acquire read lock of a queue rwlock
97 * @lock: Pointer to queue rwlock structure
112 * queue_write_lock - acquire write lock of a queue rwlock
113 * @lock : Pointer to queue rwlock structure
125 * queue_read_unlock - release read lock of a queue rwlock
126 * @lock : Pointer to queue rwlock structure
139 * queue_write_unlock - release write lock of a queue rwlock
140 * @lock : Pointer to queue rwlock structure
155 * queue rwlock functions.
/linux-4.1.27/include/linux/
H A Dmsg.h17 /* one msq_queue structure for each present queue on the system */
23 unsigned long q_cbytes; /* current number of bytes on queue */
24 unsigned long q_qnum; /* number of messages in queue */
25 unsigned long q_qbytes; /* max number of bytes on queue */
H A Dosq_lock.h16 * Stores an encoded value of the CPU # of the tail node in the queue.
17 * If the queue is empty, then it's set to OSQ_UNLOCKED_VAL.
H A Dvirtio_mmio.h80 /* Maximum size of the currently selected queue - Read Only */
83 /* Queue size for the currently selected queue - Write Only */
89 /* Used Ring alignment for the currently selected queue - Write Only */
92 /* Guest's PFN for the currently selected queue - Read Write */
98 /* Ready bit for the currently selected queue - Read Write */
113 /* Selected queue's Descriptor Table address, 64 bits in two halves */
117 /* Selected queue's Available Ring address, 64 bits in two halves */
121 /* Selected queue's Used Ring address, 64 bits in two halves */
H A Ddynamic_queue_limits.h2 * Dynamic queue limits (dql) - Definitions
6 * This header file contains the definitions for dynamic queue limits (dql).
7 * dql would be used in conjunction with a producer/consumer type queue
8 * (possibly a HW queue). Such a queue would have these general properties:
26 * were retired from the queue
52 unsigned int prev_num_queued; /* Previous queue total */
70 * availability in the queue with dql_avail.
H A Dpxa168_eth.h21 * Override default RX/TX queue sizes if nonzero.
H A Dbsg.h12 struct request_queue *queue; member in struct:bsg_class_device
/linux-4.1.27/include/uapi/linux/
H A Dmsg.h13 #define MSG_COPY 040000 /* copy (not remove) all queue messages */
18 struct msg *msg_first; /* first message on queue,unused */
19 struct msg *msg_last; /* last message in queue,unused */
25 unsigned short msg_cbytes; /* current number of bytes on queue */
26 unsigned short msg_qnum; /* number of messages in queue */
27 unsigned short msg_qbytes; /* max number of bytes on queue */
63 * MSGMNB is the default size of a new message queue. Non-root tasks can
65 * (actually: CAP_SYS_RESOURCE) can both increase and decrease the queue
72 * the queue. This is also an arbitrary choice (since 2.6.0).
75 #define MSGMNI 32000 /* <= IPCMNI */ /* max # of msg queue identifiers */
77 #define MSGMNB 16384 /* <= INT_MAX */ /* default max size of a message queue */
H A Dgen_stats.h53 * @qlen: queue length
54 * @backlog: backlog size of queue
H A Dnetfilter.h18 /* we overload the higher bits for encoding auxiliary data such as the queue
26 /* queue number (NF_QUEUE) or errno (NF_DROP) */
H A Dcoda_psdev.h19 wait_queue_head_t uc_sleep; /* process' wait queue */
/linux-4.1.27/drivers/scsi/lpfc/
H A Dlpfc_debugfs.h90 /* queue info */
93 /* queue acc */
282 * lpfc_debug_dump_qe - dump an specific entry from a queue
283 * @q: Pointer to the queue descriptor.
284 * @idx: Index to the entry on the queue.
286 * This function dumps an entry indexed by @idx from a queue specified by the
287 * queue descriptor @q.
331 * lpfc_debug_dump_q - dump all entries from an specific queue
332 * @q: Pointer to the queue descriptor.
334 * This function dumps all entries from a queue specified by the queue
361 * lpfc_debug_dump_fcp_wq - dump all entries from a fcp work queue
363 * @fcp_wqidx: Index to a FCP work queue.
365 * This function dumps all entries from a FCP work queue specified by the
381 * lpfc_debug_dump_fcp_cq - dump all entries from a fcp work queue's cmpl queue
383 * @fcp_wqidx: Index to a FCP work queue.
385 * This function dumps all entries from a FCP complete queue which is
386 * associated to the FCP work queue specified by the @fcp_wqidx.
416 * lpfc_debug_dump_hba_eq - dump all entries from a fcp work queue's evt queue
418 * @fcp_wqidx: Index to a FCP work queue.
420 * This function dumps all entries from a FCP event queue which is
421 * associated to the FCP work queue specified by the @fcp_wqidx.
457 * lpfc_debug_dump_els_wq - dump all entries from the els work queue
460 * This function dumps all entries from the ELS work queue.
471 * lpfc_debug_dump_mbx_wq - dump all entries from the mbox work queue
474 * This function dumps all entries from the MBOX work queue.
485 * lpfc_debug_dump_dat_rq - dump all entries from the receive data queue
488 * This function dumps all entries from the receive data queue.
499 * lpfc_debug_dump_hdr_rq - dump all entries from the receive header queue
502 * This function dumps all entries from the receive header queue.
513 * lpfc_debug_dump_els_cq - dump all entries from the els complete queue
516 * This function dumps all entries from the els complete queue.
528 * lpfc_debug_dump_mbx_cq - dump all entries from the mbox complete queue
531 * This function dumps all entries from the mbox complete queue.
543 * lpfc_debug_dump_wq_by_id - dump all entries from a work queue by queue id
545 * @qid: Work queue identifier.
547 * This function dumps all entries from a work queue identified by the queue
571 * lpfc_debug_dump_mq_by_id - dump all entries from a mbox queue by queue id
573 * @qid: Mbox work queue identifier.
575 * This function dumps all entries from a mbox work queue identified by the
576 * queue identifier.
588 * lpfc_debug_dump_rq_by_id - dump all entries from a receive queue by queue id
590 * @qid: Receive queue identifier.
592 * This function dumps all entries from a receive queue identified by the
593 * queue identifier.
610 * lpfc_debug_dump_cq_by_id - dump all entries from a cmpl queue by queue id
612 * @qid: Complete queue identifier.
614 * This function dumps all entries from a complete queue identified by the
615 * queue identifier.
646 * lpfc_debug_dump_eq_by_id - dump all entries from an event queue by queue id
648 * @qid: Complete queue identifier.
650 * This function dumps all entries from an event queue identified by the
651 * queue identifier.
/linux-4.1.27/sound/core/seq/oss/
H A Dseq_oss_timer.c43 * if queue is not started yet, start it.
68 * if no more timer exists, stop the queue.
150 ev.queue = dp->queue; send_timer_event()
151 ev.data.queue.queue = dp->queue; send_timer_event()
152 ev.data.queue.param.value = value; send_timer_event()
157 * set queue tempo and start queue
169 tmprec.queue = dp->queue; snd_seq_oss_timer_start()
182 * stop queue
196 * continue queue
210 * change queue tempo
H A Dseq_oss_init.c41 MODULE_PARM_DESC(maxqlen, "maximum queue length");
58 static int delete_seq_queue(int queue);
196 dp->queue = -1; snd_seq_oss_open()
227 /* allocate queue */ snd_seq_oss_open()
235 /*dp->addr.queue = dp->queue;*/ snd_seq_oss_open()
243 /* initialize read queue */ snd_seq_oss_open()
252 /* initialize write queue */ snd_seq_oss_open()
286 delete_seq_queue(dp->queue); snd_seq_oss_open()
358 * allocate a queue
372 dp->queue = qinfo.queue; alloc_seq_queue()
377 * release queue
380 delete_seq_queue(int queue) delete_seq_queue() argument
385 if (queue < 0) delete_seq_queue()
388 qinfo.queue = queue; delete_seq_queue()
391 pr_err("ALSA: seq_oss: unable to delete queue %d (%d)\n", queue, rc); delete_seq_queue()
420 int queue; snd_seq_oss_release() local
431 queue = dp->queue; snd_seq_oss_release()
434 delete_seq_queue(queue); snd_seq_oss_release()
506 snd_iprintf(buf, "port %d : queue %d\n", dp->port, dp->queue); snd_seq_oss_system_info_read()
513 snd_iprintf(buf, " max queue length %d\n", maxqlen); snd_seq_oss_system_info_read()
H A Dseq_oss_readq.c4 * seq_oss_readq.c - MIDI input queue
43 * create a read queue
72 * delete the read queue
84 * reset the read queue
122 * copy an event to input queue:
151 * pop queue
232 snd_iprintf(buf, " read queue [%s] length = %d : tick = %ld\n", snd_seq_oss_readq_info_read()
/linux-4.1.27/drivers/block/rsxx/
H A Ddev.c262 card->queue = blk_alloc_queue(GFP_KERNEL); rsxx_setup_dev()
263 if (!card->queue) { rsxx_setup_dev()
264 dev_err(CARD_TO_DEV(card), "Failed queue alloc\n"); rsxx_setup_dev()
272 blk_cleanup_queue(card->queue); rsxx_setup_dev()
279 blk_queue_dma_alignment(card->queue, blk_size - 1); rsxx_setup_dev()
280 blk_queue_logical_block_size(card->queue, blk_size); rsxx_setup_dev()
283 blk_queue_make_request(card->queue, rsxx_make_request); rsxx_setup_dev()
284 blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY); rsxx_setup_dev()
285 blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors); rsxx_setup_dev()
286 blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE); rsxx_setup_dev()
288 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue); rsxx_setup_dev()
289 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, card->queue); rsxx_setup_dev()
291 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, card->queue); rsxx_setup_dev()
292 blk_queue_max_discard_sectors(card->queue, rsxx_setup_dev()
294 card->queue->limits.discard_granularity = RSXX_HW_BLK_SIZE; rsxx_setup_dev()
295 card->queue->limits.discard_alignment = RSXX_HW_BLK_SIZE; rsxx_setup_dev()
296 card->queue->limits.discard_zeroes_data = 1; rsxx_setup_dev()
299 card->queue->queuedata = card; rsxx_setup_dev()
308 card->gendisk->queue = card->queue; rsxx_setup_dev()
321 blk_cleanup_queue(card->queue); rsxx_destroy_dev()
322 card->queue->queuedata = NULL; rsxx_destroy_dev()
/linux-4.1.27/net/sctp/
H A Dinqueue.c10 * An SCTP inqueue is a queue into which you push SCTP packets
47 void sctp_inq_init(struct sctp_inq *queue) sctp_inq_init() argument
49 INIT_LIST_HEAD(&queue->in_chunk_list); sctp_inq_init()
50 queue->in_progress = NULL; sctp_inq_init()
53 INIT_WORK(&queue->immediate, NULL); sctp_inq_init()
57 void sctp_inq_free(struct sctp_inq *queue) sctp_inq_free() argument
61 /* Empty the queue. */ sctp_inq_free()
62 list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) { sctp_inq_free()
70 if (queue->in_progress) { sctp_inq_free()
71 sctp_chunk_free(queue->in_progress); sctp_inq_free()
72 queue->in_progress = NULL; sctp_inq_free()
99 struct sctp_chunkhdr *sctp_inq_peek(struct sctp_inq *queue) sctp_inq_peek() argument
104 chunk = queue->in_progress; sctp_inq_peek()
119 * WARNING: If you need to put the chunk on another queue, you need to
122 struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) sctp_inq_pop() argument
131 if ((chunk = queue->in_progress)) { sctp_inq_pop()
139 chunk = queue->in_progress = NULL; sctp_inq_pop()
149 /* Do we need to take the next packet out of the queue to process? */ sctp_inq_pop()
153 /* Is the queue empty? */ sctp_inq_pop()
154 if (list_empty(&queue->in_chunk_list)) sctp_inq_pop()
157 entry = queue->in_chunk_list.next; sctp_inq_pop()
158 chunk = queue->in_progress = sctp_inq_pop()
/linux-4.1.27/drivers/misc/sgi-gru/
H A Dgrukservices.h26 * These function allow the user to create a message queue for
40 * - user options for dealing with timeouts, queue full, etc.
45 void *mq; /* message queue vaddress */
47 int qlines; /* queue size in CL */
55 * a message queue. The caller must ensure that the queue is
58 * Message queue size is the total number of bytes allocated
59 * to the queue including a 2 cacheline header that is used
60 * to manage the queue.
63 * mqd pointer to message queue descriptor
65 * bytes size of message queue in bytes
78 * Send a message to a message queue.
80 * Note: The message queue transport mechanism uses the first 32
85 * mqd pointer to message queue descriptor
100 #define MQE_QUEUE_FULL 2 /* queue is full */
102 #define MQE_PAGE_OVERFLOW 10 /* BUG - queue overflowed a page */
106 * Advance the receive pointer for the message queue to the next message.
111 * mqd pointer to message queue descriptor
118 * Get next message from message queue. Returns pointer to
121 * in order to move the queue pointers to next message.
124 * mqd pointer to message queue descriptor
/linux-4.1.27/kernel/locking/
H A Dqrwlock.c27 * @lock : Pointer to queue rwlock structure
28 * @writer: Current queue rwlock writer status byte
30 * In interrupt context or at the head of the queue, the reader will just
43 * queue_read_lock_slowpath - acquire read lock of a queue rwlock
44 * @lock: Pointer to queue rwlock structure
56 * available without waiting in the queue. queue_read_lock_slowpath()
65 * Put the reader into the wait queue queue_read_lock_slowpath()
70 * At the head of the wait queue now, wait until the writer state queue_read_lock_slowpath()
83 * Signal the next one in queue to become queue head queue_read_lock_slowpath()
90 * queue_write_lock_slowpath - acquire write lock of a queue rwlock
91 * @lock : Pointer to queue rwlock structure
97 /* Put the writer into the wait queue */ queue_write_lock_slowpath()
/linux-4.1.27/tools/testing/selftests/ipc/
H A Dmsgque.c53 printf("Failed to create queue\n"); restore_queue()
58 printf("Restored queue has wrong id (%d instead of %d)\n", restore_queue()
76 printf("Failed to destroy queue: %d\n", -errno); restore_queue()
123 printf("Failed to destroy queue: %d\n", -errno); check_and_destroy_queue()
140 printf("Failed to get stats for IPC queue with id %d\n", dump_queue()
151 printf("Failed to get stats for IPC queue\n"); dump_queue()
212 printf("Can't create queue: %d\n", err); main()
218 printf("Failed to fill queue: %d\n", err); main()
224 printf("Failed to dump queue: %d\n", err); main()
230 printf("Failed to check and destroy queue: %d\n", err); main()
236 printf("Failed to restore queue: %d\n", err); main()
242 printf("Failed to test queue: %d\n", err); main()
249 printf("Failed to destroy queue: %d\n", -errno); main()
/linux-4.1.27/drivers/staging/unisys/uislib/
H A Duisqueue.c47 * - If insertion fails due to a full queue, the caller will determine the
51 * 1 if the insertion succeeds, 0 if the queue was full.
53 unsigned char spar_signal_insert(struct channel_header __iomem *ch, u32 queue, spar_signal_insert() argument
62 + queue; spar_signal_insert()
68 /* queue is full if (head + 1) % n equals tail */ spar_signal_insert()
106 * - pSignal points to a memory area large enough to hold queue's SignalSize
109 * 1 if the removal succeeds, 0 if the queue was empty.
112 spar_signal_remove(struct channel_header __iomem *ch, u32 queue, void *sig) spar_signal_remove() argument
118 readq(&ch->ch_space_offset)) + queue; spar_signal_remove()
124 /* queue is empty if the head index equals the tail index */ spar_signal_remove()
166 unsigned int spar_signal_remove_all(struct channel_header *ch, u32 queue, spar_signal_remove_all() argument
173 ch->ch_space_offset) + queue; spar_signal_remove_all()
179 /* queue is empty if the head index equals the tail index */ spar_signal_remove_all()
208 * Determine whether a signal queue is empty.
215 * 1 if the signal queue is empty, 0 otherwise.
218 u32 queue) spar_signalqueue_empty()
222 readq(&ch->ch_space_offset)) + queue; spar_signalqueue_empty()
299 return 0; /* failed to queue */ uisqueue_put_cmdrsp_with_lock_client()
309 /* uisqueue_get_cmdrsp gets the cmdrsp entry at the head of the queue
310 * returns NULL if queue is empty */
217 spar_signalqueue_empty(struct channel_header __iomem *ch, u32 queue) spar_signalqueue_empty() argument
/linux-4.1.27/net/core/
H A Drequest_sock.c24 * Maximum number of SYN_RECV sockets in queue per LISTEN socket.
40 int reqsk_queue_alloc(struct request_sock_queue *queue, reqsk_queue_alloc() argument
61 spin_lock_init(&queue->syn_wait_lock); reqsk_queue_alloc()
62 queue->rskq_accept_head = NULL; reqsk_queue_alloc()
66 spin_lock_bh(&queue->syn_wait_lock); reqsk_queue_alloc()
67 queue->listen_opt = lopt; reqsk_queue_alloc()
68 spin_unlock_bh(&queue->syn_wait_lock); reqsk_queue_alloc()
73 void __reqsk_queue_destroy(struct request_sock_queue *queue) __reqsk_queue_destroy() argument
76 kvfree(queue->listen_opt); __reqsk_queue_destroy()
80 struct request_sock_queue *queue) reqsk_queue_yank_listen_sk()
84 spin_lock_bh(&queue->syn_wait_lock); reqsk_queue_yank_listen_sk()
85 lopt = queue->listen_opt; reqsk_queue_yank_listen_sk()
86 queue->listen_opt = NULL; reqsk_queue_yank_listen_sk()
87 spin_unlock_bh(&queue->syn_wait_lock); reqsk_queue_yank_listen_sk()
92 void reqsk_queue_destroy(struct request_sock_queue *queue) reqsk_queue_destroy() argument
95 struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue); reqsk_queue_destroy()
103 spin_lock_bh(&queue->syn_wait_lock); reqsk_queue_destroy()
110 spin_unlock_bh(&queue->syn_wait_lock); reqsk_queue_destroy()
115 spin_lock_bh(&queue->syn_wait_lock); reqsk_queue_destroy()
117 spin_unlock_bh(&queue->syn_wait_lock); reqsk_queue_destroy()
79 reqsk_queue_yank_listen_sk( struct request_sock_queue *queue) reqsk_queue_yank_listen_sk() argument
H A Dnet-sysfs.c618 struct netdev_rx_queue *queue = to_rx_queue(kobj); rx_queue_attr_show() local
623 return attribute->show(queue, attribute, buf); rx_queue_attr_show()
630 struct netdev_rx_queue *queue = to_rx_queue(kobj); rx_queue_attr_store() local
635 return attribute->store(queue, attribute, buf, count); rx_queue_attr_store()
644 static ssize_t show_rps_map(struct netdev_rx_queue *queue, show_rps_map() argument
655 map = rcu_dereference(queue->rps_map); show_rps_map()
667 static ssize_t store_rps_map(struct netdev_rx_queue *queue, store_rps_map() argument
708 old_map = rcu_dereference_protected(queue->rps_map, store_rps_map()
710 rcu_assign_pointer(queue->rps_map, map); store_rps_map()
723 static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, show_rps_dev_flow_table_cnt() argument
731 flow_table = rcu_dereference(queue->rps_flow_table); show_rps_dev_flow_table_cnt()
746 static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, store_rps_dev_flow_table_cnt() argument
794 old_table = rcu_dereference_protected(queue->rps_flow_table, store_rps_dev_flow_table_cnt()
796 rcu_assign_pointer(queue->rps_flow_table, table); store_rps_dev_flow_table_cnt()
824 struct netdev_rx_queue *queue = to_rx_queue(kobj); rx_queue_release() local
830 map = rcu_dereference_protected(queue->rps_map, 1); rx_queue_release()
832 RCU_INIT_POINTER(queue->rps_map, NULL); rx_queue_release()
836 flow_table = rcu_dereference_protected(queue->rps_flow_table, 1); rx_queue_release()
838 RCU_INIT_POINTER(queue->rps_flow_table, NULL); rx_queue_release()
844 dev_put(queue->dev); rx_queue_release()
849 struct netdev_rx_queue *queue = to_rx_queue(kobj); rx_queue_namespace() local
850 struct device *dev = &queue->dev->dev; rx_queue_namespace()
868 struct netdev_rx_queue *queue = dev->_rx + index; rx_queue_add_kobject() local
869 struct kobject *kobj = &queue->kobj; rx_queue_add_kobject()
885 dev_hold(queue->dev); rx_queue_add_kobject()
932 ssize_t (*show)(struct netdev_queue *queue,
934 ssize_t (*store)(struct netdev_queue *queue,
946 struct netdev_queue *queue = to_netdev_queue(kobj); netdev_queue_attr_show() local
951 return attribute->show(queue, attribute, buf); netdev_queue_attr_show()
959 struct netdev_queue *queue = to_netdev_queue(kobj); netdev_queue_attr_store() local
964 return attribute->store(queue, attribute, buf, count); netdev_queue_attr_store()
972 static ssize_t show_trans_timeout(struct netdev_queue *queue, show_trans_timeout() argument
978 spin_lock_irq(&queue->_xmit_lock); show_trans_timeout()
979 trans_timeout = queue->trans_timeout; show_trans_timeout()
980 spin_unlock_irq(&queue->_xmit_lock); show_trans_timeout()
986 static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue) get_netdev_queue_index() argument
988 struct net_device *dev = queue->dev; get_netdev_queue_index()
992 if (queue == &dev->_tx[i]) get_netdev_queue_index()
1000 static ssize_t show_tx_maxrate(struct netdev_queue *queue, show_tx_maxrate() argument
1004 return sprintf(buf, "%lu\n", queue->tx_maxrate); show_tx_maxrate()
1007 static ssize_t set_tx_maxrate(struct netdev_queue *queue, set_tx_maxrate() argument
1011 struct net_device *dev = queue->dev; set_tx_maxrate()
1012 int err, index = get_netdev_queue_index(queue); set_tx_maxrate()
1028 queue->tx_maxrate = rate; set_tx_maxrate()
1044 * Byte queue limits sysfs structures and functions.
1072 static ssize_t bql_show_hold_time(struct netdev_queue *queue, bql_show_hold_time() argument
1076 struct dql *dql = &queue->dql; bql_show_hold_time()
1081 static ssize_t bql_set_hold_time(struct netdev_queue *queue, bql_set_hold_time() argument
1085 struct dql *dql = &queue->dql; bql_set_hold_time()
1102 static ssize_t bql_show_inflight(struct netdev_queue *queue, bql_show_inflight() argument
1106 struct dql *dql = &queue->dql; bql_show_inflight()
1115 static ssize_t bql_show_ ## NAME(struct netdev_queue *queue, \
1119 return bql_show(buf, queue->dql.FIELD); \
1122 static ssize_t bql_set_ ## NAME(struct netdev_queue *queue, \
1126 return bql_set(buf, len, &queue->dql.FIELD); \
1153 static ssize_t show_xps_map(struct netdev_queue *queue, show_xps_map() argument
1156 struct net_device *dev = queue->dev; show_xps_map()
1165 index = get_netdev_queue_index(queue); show_xps_map()
1191 static ssize_t store_xps_map(struct netdev_queue *queue, store_xps_map() argument
1195 struct net_device *dev = queue->dev; store_xps_map()
1206 index = get_netdev_queue_index(queue); store_xps_map()
1236 struct netdev_queue *queue = to_netdev_queue(kobj); netdev_queue_release() local
1239 dev_put(queue->dev); netdev_queue_release()
1244 struct netdev_queue *queue = to_netdev_queue(kobj); netdev_queue_namespace() local
1245 struct device *dev = &queue->dev->dev; netdev_queue_namespace()
1263 struct netdev_queue *queue = dev->_tx + index; netdev_queue_add_kobject() local
1264 struct kobject *kobj = &queue->kobj; netdev_queue_add_kobject()
1280 dev_hold(queue->dev); netdev_queue_add_kobject()
1305 struct netdev_queue *queue = dev->_tx + i; netdev_queue_update_kobjects() local
1308 sysfs_remove_group(&queue->kobj, &dql_group); netdev_queue_update_kobjects()
1310 kobject_put(&queue->kobj); netdev_queue_update_kobjects()
/linux-4.1.27/arch/tile/include/hv/
H A Ddrv_xgbe_impl.h52 #define SIZE_SMALL (1) /**< Small packet queue. */
53 #define SIZE_LARGE (2) /**< Large packet queue. */
54 #define SIZE_JUMBO (0) /**< Jumbo packet queue. */
79 /** A queue of packets.
81 * This structure partially defines a queue of packets waiting to be
82 * processed. The queue as a whole is written to by an interrupt handler and
84 * interrupt handler. The other part of the queue state, the read offset, is
88 * The read offset (__packet_receive_read in the user part of the queue
90 * equal to the write offset, the queue is empty; therefore the queue must
91 * contain one more slot than the required maximum queue size.
106 * This queue has 10 slots, and thus can hold 9 packets (_last_packet_plus_one
113 * packet on the queue, sizeof (netio_pkt_t) for the second packet on the
114 * queue, etc. */
124 /** A queue of buffers.
126 * This structure partially defines a queue of empty buffers which have been
127 * obtained via requests to the IPP. (The elements of the queue are packet
129 * retrieved.) The queue as a whole is written to by an interrupt handler and
131 * interrupt handler. The other parts of the queue state, the read offset and
135 * The read offset (__buffer_read in the user part of the queue structure)
137 * write offset, the queue is empty; therefore the queue must contain one more
138 * slot than the required maximum queue size.
141 * the queue structure) points to the slot which will hold the next buffer we
150 * buffers, and means that the value which defines the queue size,
151 * __last_buffer, is different than in the packet queue. Also, the offset
155 * more like this queue.)
171 * This queue has 10 slots, and thus can hold 9 buffers (_last_buffer = 9).
181 * the queue, 1 for the second slot in the queue, etc. */
196 /** The queue of packets waiting to be received. */
205 /** The queue ID that this queue represents. */
226 * An object for managing the user end of a NetIO queue.
/linux-4.1.27/drivers/mfd/
H A Dpcf50633-adc.c47 struct pcf50633_adc_request *queue[PCF50633_MAX_ADC_FIFO_DEPTH]; member in struct:pcf50633_adc
78 if (!adc->queue[head]) trigger_next_adc_job_if_any()
81 adc_setup(pcf, adc->queue[head]->mux, adc->queue[head]->avg); trigger_next_adc_job_if_any()
95 if (adc->queue[tail]) { adc_enqueue_request()
97 dev_err(pcf->dev, "ADC queue is full, dropping request\n"); adc_enqueue_request()
101 adc->queue[tail] = req; adc_enqueue_request()
182 req = adc->queue[head]; pcf50633_adc_irq()
184 dev_err(pcf->dev, "pcf50633-adc irq: ADC queue empty!\n"); pcf50633_adc_irq()
188 adc->queue[head] = NULL; pcf50633_adc_irq()
230 if (WARN_ON(adc->queue[head])) pcf50633_adc_remove()
235 kfree(adc->queue[i]); pcf50633_adc_remove()
/linux-4.1.27/drivers/crypto/
H A Dn2_core.h131 * ARG1: Real address of queue, or handle for unconfigure
132 * ARG2: Number of entries in queue, zero for unconfigure
134 * RET1: queue handle
136 * Configure a queue in the stream processing unit.
141 * The queue size can range from a minimum of 2 to a maximum
142 * of 64. The queue size must be a power of two.
144 * To unconfigure a queue, specify a length of zero and place
145 * the queue handle into ARG1.
149 * queue. The LAST register will be set to point to the last
150 * entry in the queue.
170 * RET1: queue head offset
179 * RET1: queue tail offset
/linux-4.1.27/arch/sparc/include/uapi/asm/
H A Dmsgbuf.h29 unsigned long msg_cbytes; /* current number of bytes on queue */
30 unsigned long msg_qnum; /* number of messages in queue */
31 unsigned long msg_qbytes; /* max number of bytes on queue */
/linux-4.1.27/arch/s390/include/uapi/asm/
H A Dmsgbuf.h28 unsigned long msg_cbytes; /* current number of bytes on queue */
29 unsigned long msg_qnum; /* number of messages in queue */
30 unsigned long msg_qbytes; /* max number of bytes on queue */
/linux-4.1.27/arch/mn10300/include/uapi/asm/
H A Dmsgbuf.h22 unsigned long msg_cbytes; /* current number of bytes on queue */
23 unsigned long msg_qnum; /* number of messages in queue */
24 unsigned long msg_qbytes; /* max number of bytes on queue */
/linux-4.1.27/arch/parisc/include/uapi/asm/
H A Dmsgbuf.h30 unsigned int msg_cbytes; /* current number of bytes on queue */
31 unsigned int msg_qnum; /* number of messages in queue */
32 unsigned int msg_qbytes; /* max number of bytes on queue */
/linux-4.1.27/arch/avr32/include/uapi/asm/
H A Dmsgbuf.h22 unsigned long msg_cbytes; /* current number of bytes on queue */
23 unsigned long msg_qnum; /* number of messages in queue */
24 unsigned long msg_qbytes; /* max number of bytes on queue */
/linux-4.1.27/arch/cris/include/uapi/asm/
H A Dmsgbuf.h24 unsigned long msg_cbytes; /* current number of bytes on queue */
25 unsigned long msg_qnum; /* number of messages in queue */
26 unsigned long msg_qbytes; /* max number of bytes on queue */
/linux-4.1.27/arch/frv/include/uapi/asm/
H A Dmsgbuf.h22 unsigned long msg_cbytes; /* current number of bytes on queue */
23 unsigned long msg_qnum; /* number of messages in queue */
24 unsigned long msg_qbytes; /* max number of bytes on queue */
/linux-4.1.27/arch/m32r/include/uapi/asm/
H A Dmsgbuf.h22 unsigned long msg_cbytes; /* current number of bytes on queue */
23 unsigned long msg_qnum; /* number of messages in queue */
24 unsigned long msg_qbytes; /* max number of bytes on queue */
/linux-4.1.27/arch/m68k/emu/
H A Dnfblock.c58 struct request_queue *queue; member in struct:nfhd_device
62 static void nfhd_make_request(struct request_queue *queue, struct bio *bio) nfhd_make_request() argument
64 struct nfhd_device *dev = queue->queuedata; nfhd_make_request()
120 dev->queue = blk_alloc_queue(GFP_KERNEL); nfhd_init_one()
121 if (dev->queue == NULL) nfhd_init_one()
124 dev->queue->queuedata = dev; nfhd_init_one()
125 blk_queue_make_request(dev->queue, nfhd_make_request); nfhd_init_one()
126 blk_queue_logical_block_size(dev->queue, bsize); nfhd_init_one()
138 dev->disk->queue = dev->queue; nfhd_init_one()
147 blk_cleanup_queue(dev->queue); nfhd_init_one()
186 blk_cleanup_queue(dev->queue); nfhd_exit()
/linux-4.1.27/drivers/staging/unisys/visorchannel/
H A Dvisorchannel_funcs.c299 /** Return offset of a specific queue entry (data) from the beginning of a
309 #define SIG_WRITE_FIELD(channel, queue, sig_hdr, FIELD) \
311 SIG_QUEUE_OFFSET(&channel->chan_hdr, queue)+ \
317 sig_read_header(struct visorchannel *channel, u32 queue, sig_read_header() argument
328 SIG_QUEUE_OFFSET(&channel->chan_hdr, queue), sig_read_header()
339 sig_do_data(struct visorchannel *channel, u32 queue, sig_do_data() argument
344 int signal_data_offset = SIG_DATA_OFFSET(&channel->chan_hdr, queue, sig_do_data()
364 sig_read_data(struct visorchannel *channel, u32 queue, sig_read_data() argument
367 return sig_do_data(channel, queue, sig_hdr, slot, data, FALSE); sig_read_data()
371 sig_write_data(struct visorchannel *channel, u32 queue, sig_write_data() argument
374 return sig_do_data(channel, queue, sig_hdr, slot, data, TRUE); sig_write_data()
398 signalremove_inner(struct visorchannel *channel, u32 queue, void *msg) signalremove_inner() argument
402 if (!sig_read_header(channel, queue, &sig_hdr)) signalremove_inner()
408 if (!sig_read_data(channel, queue, &sig_hdr, sig_hdr.tail, msg)) { signalremove_inner()
417 if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, tail)) signalremove_inner()
419 if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_received)) signalremove_inner()
425 visorchannel_signalremove(struct visorchannel *channel, u32 queue, void *msg) visorchannel_signalremove() argument
431 rc = signalremove_inner(channel, queue, msg); visorchannel_signalremove()
434 rc = signalremove_inner(channel, queue, msg); visorchannel_signalremove()
442 signalinsert_inner(struct visorchannel *channel, u32 queue, void *msg) signalinsert_inner() argument
446 if (!sig_read_header(channel, queue, &sig_hdr)) signalinsert_inner()
454 queue) + signalinsert_inner()
462 if (!sig_write_data(channel, queue, &sig_hdr, sig_hdr.head, msg)) signalinsert_inner()
471 if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, head)) signalinsert_inner()
473 if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, num_sent)) { signalinsert_inner()
481 visorchannel_signalinsert(struct visorchannel *channel, u32 queue, void *msg) visorchannel_signalinsert() argument
487 rc = signalinsert_inner(channel, queue, msg); visorchannel_signalinsert()
490 rc = signalinsert_inner(channel, queue, msg); visorchannel_signalinsert()
498 visorchannel_signalqueue_slots_avail(struct visorchannel *channel, u32 queue) visorchannel_signalqueue_slots_avail() argument
504 if (!sig_read_header(channel, queue, &sig_hdr)) visorchannel_signalqueue_slots_avail()
517 visorchannel_signalqueue_max_slots(struct visorchannel *channel, u32 queue) visorchannel_signalqueue_max_slots() argument
521 if (!sig_read_header(channel, queue, &sig_hdr)) visorchannel_signalqueue_max_slots()
619 "failed to read signal queue #%d from channel @0x%-16.16Lx errcode=%d\n", visorchannel_debug()
H A Dvisorchannel.h55 BOOL visorchannel_signalremove(struct visorchannel *channel, u32 queue,
57 BOOL visorchannel_signalinsert(struct visorchannel *channel, u32 queue,
60 u32 queue);
61 int visorchannel_signalqueue_max_slots(struct visorchannel *channel, u32 queue);
/linux-4.1.27/drivers/net/fddi/skfp/h/
H A Dhwmtm.h111 SMbuf *mb_free ; /* free queue */
150 struct s_smt_tx_queue *tx_p ; /* pointer to the transmit queue */
230 * txd *HWM_GET_TX_USED(smc,queue)
234 * number of used TxDs for the queue, specified by the index.
236 * para queue the number of the send queue: Can be specified by
239 * return number of used TxDs for this send queue
243 #define HWM_GET_TX_USED(smc,queue) (int) (smc)->hw.fp.tx_q[queue].tx_used
247 * txd *HWM_GET_CURR_TXD(smc,queue)
251 * pointer to the TxD which points to the current queue put
254 * para queue the number of the send queue: Can be specified by
261 #define HWM_GET_CURR_TXD(smc,queue) (struct s_smt_fp_txd volatile *)\
262 (smc)->hw.fp.tx_q[queue].tx_curr_put
303 * the count of used RXDs in receive queue 1.
305 * return the used RXD count of receive queue 1
320 * the rxd_free count of receive queue 1.
322 * return the rxd_free count of receive queue 1
334 * pointer to the RxD which points to the current queue put
379 #define HWM_E0004_MSG "HWM: Parity error rx queue 1"
381 #define HWM_E0005_MSG "HWM: Encoding error rx queue 1"
383 #define HWM_E0006_MSG "HWM: Encoding error async tx queue"
385 #define HWM_E0007_MSG "HWM: Encoding error sync tx queue"
H A Dfplustm.h108 struct s_smt_fp_rxd volatile *rx_curr_put ; /* next RxD to queue into */
142 u_short rx1_fifo_start ; /* rx queue start address */
143 u_short rx1_fifo_size ; /* rx queue size */
144 u_short rx2_fifo_start ; /* rx queue start address */
145 u_short rx2_fifo_size ; /* rx queue size */
146 u_short tx_s_start ; /* sync queue start address */
147 u_short tx_s_size ; /* sync queue size */
148 u_short tx_a0_start ; /* async queue A0 start address */
149 u_short tx_a0_size ; /* async queue A0 size */
187 * queue pointers; points to the queue dependent variables
193 * queue dependent variables
H A Dsupern_2.h50 #define FS_MSVALID (1<<15) /* end of queue */
274 #define FM_PRI0 0x1c /* r/w priority r. for asyn.-queue 0 */
275 #define FM_PRI1 0x1d /* r/w priority r. for asyn.-queue 1 */
276 #define FM_PRI2 0x1e /* r/w priority r. for asyn.-queue 2 */
281 #define FM_EARV 0x23 /* r/w end addr of receive queue */
285 #define FM_EAS 0x24 /* r/w end addr of synchr. queue */
286 #define FM_EAA0 0x25 /* r/w end addr of asyn. queue 0 */
287 #define FM_EAA1 0x26 /* r/w end addr of asyn. queue 1 */
288 #define FM_EAA2 0x27 /* r/w end addr of asyn. queue 2 */
328 /* Supernet 3: extensions for 2. receive queue etc. */
444 #define FM_STBFLA 0x0200 /* asynchr.-queue trans. buffer full */
445 #define FM_STBFLS 0x0400 /* synchr.-queue transm. buffer full */
446 #define FM_STXABRS 0x0800 /* synchr. queue transmit-abort */
447 #define FM_STXABRA0 0x1000 /* asynchr. queue 0 transmit-abort */
448 #define FM_STXABRA1 0x2000 /* asynchr. queue 1 transmit-abort */
449 #define FM_STXABRA2 0x4000 /* asynchr. queue 2 transmit-abort */
456 #define FM_SQLCKS 0x0001 /* queue lock for synchr. queue */
457 #define FM_SQLCKA0 0x0002 /* queue lock for asynchr. queue 0 */
458 #define FM_SQLCKA1 0x0004 /* queue lock for asynchr. queue 1 */
459 #define FM_SQLCKA2 0x0008 /* queue lock for asynchr. queue 2 */
469 #define FM_SPCEPDS 0x0100 /* parity/coding error: syn. queue */
528 #define FM_SRQUNLCK1 0x0001 /* receive queue unlocked queue 1 */
529 #define FM_SRQUNLCK2 0x0002 /* receive queue unlocked queue 2 */
530 #define FM_SRPERRQ1 0x0004 /* receive parity error rx queue 1 */
531 #define FM_SRPERRQ2 0x0008 /* receive parity error rx queue 2 */
533 #define FM_SRCVOVR2 0x0800 /* receive FIFO overfull rx queue 2 */
534 #define FM_SRBFL2 0x1000 /* receive buffer full rx queue 2 */
535 #define FM_SRABT2 0x2000 /* receive abort rx queue 2 */
536 #define FM_SRBMT2 0x4000 /* receive buf empty rx queue 2 */
537 #define FM_SRCOMP2 0x8000 /* receive comp rx queue 2 */
577 #define FM_MENDRCV 0x0800 /* Ena dual receive queue operation */
586 #define FM_RECV1 0x000f /* options for receive queue 1 */
597 #define FM_RECV2 0x00f0 /* options for receive queue 2 */
675 #define FM_ICLLS 0x11 /* clear synchronous queue lock */
676 #define FM_ICLLA0 0x12 /* clear asynchronous queue 0 lock */
677 #define FM_ICLLA1 0x14 /* clear asynchronous queue 1 lock */
678 #define FM_ICLLA2 0x18 /* clear asynchronous queue 2 lock */
680 #define FM_ICLLR 0x20 /* clear receive queue (SN3:1) lock */
681 #define FM_ICLLR2 0x21 /* SN3: clear receive queue 2 lock */
684 #define FM_ICLLAL 0x3f /* clear all queue locks */
689 #define FM_ITRS 0x01 /* transmit synchronous queue */
691 #define FM_ITRA0 0x02 /* transmit asynchronous queue 0 */
693 #define FM_ITRA1 0x04 /* transmit asynchronous queue 1 */
695 #define FM_ITRA2 0x08 /* transmit asynchronous queue 2 */
1042 #define RQ_RRQ 3 /* read request: receive queue */
1043 #define RQ_WSQ 4 /* write request: synchronous queue */
1044 #define RQ_WA0 5 /* write requ.: asynchronous queue 0 */
1045 #define RQ_WA1 6 /* write requ.: asynchronous queue 1 */
1046 #define RQ_WA2 7 /* write requ.: asynchronous queue 2 */
/linux-4.1.27/include/xen/interface/io/
H A Dnetif.h38 * that it cannot safely queue packets (as it may not be kicked to send them).
55 * If supported, the backend will write the key "multi-queue-max-queues" to
59 * key "multi-queue-num-queues", set to the number they wish to use, which
61 * in "multi-queue-max-queues".
67 * Each queue consists of one shared ring pair, i.e. there must be the same
70 * For frontends requesting just one queue, the usual event-channel and
73 * multi-queue feature, and one that does, but requested only one queue.
77 * instead writing those keys under sub-keys having the name "queue-N" where
78 * N is the integer ID of the queue for which those keys belong. Queues
80 * event channels must write the following set of queue-related keys:
82 * /local/domain/1/device/vif/0/multi-queue-num-queues = "2"
83 * /local/domain/1/device/vif/0/queue-0 = ""
84 * /local/domain/1/device/vif/0/queue-0/tx-ring-ref = "<ring-ref-tx0>"
85 * /local/domain/1/device/vif/0/queue-0/rx-ring-ref = "<ring-ref-rx0>"
86 * /local/domain/1/device/vif/0/queue-0/event-channel-tx = "<evtchn-tx0>"
87 * /local/domain/1/device/vif/0/queue-0/event-channel-rx = "<evtchn-rx0>"
88 * /local/domain/1/device/vif/0/queue-1 = ""
89 * /local/domain/1/device/vif/0/queue-1/tx-ring-ref = "<ring-ref-tx1>"
90 * /local/domain/1/device/vif/0/queue-1/rx-ring-ref = "<ring-ref-rx1"
91 * /local/domain/1/device/vif/0/queue-1/event-channel-tx = "<evtchn-tx1>"
92 * /local/domain/1/device/vif/0/queue-1/event-channel-rx = "<evtchn-rx1>"
101 * between the two. Guests are free to transmit packets on any queue
103 * prepared to receive packets on any queue they have requested be set up.
/linux-4.1.27/arch/mips/include/asm/octeon/
H A Dcvmx-cmd-queue.h33 * The common command queue infrastructure abstracts out the
34 * software necessary for adding to Octeon's chained queue
40 * call cvmx-cmd-queue functions directly. Instead the hardware
46 * cvmx-cmd-queue, knowledge of its internal working can help
49 * Command queue pointers are stored in a global named block
51 * hardware queue is stored in its own cache line to reduce SMP
53 * every 16th queue is next to each other in memory. This scheme
56 * the first queue for each port is in the same cache area. The
63 * In addition to the memory pointer layout, cvmx-cmd-queue
84 * don't use it and it slows down the command queue processing
100 #define CVMX_CMD_QUEUE_PKO(queue) \
101 ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_PKO_BASE + (0xffff&(queue))))
108 #define CVMX_CMD_QUEUE_DMA(queue) \
109 ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_DMA_BASE + (0xffff&(queue))))
115 * Command write operations can fail if the command queue needs
149 * cahce line that queue information to reduce the contention on the
151 * of queue state causes the ll/sc to fail quite often.
159 * Initialize a command queue for use. The initial FPA buffer is
161 * new command queue.
163 * @queue_id: Hardware command queue to initialize.
175 * Shutdown a queue a free it's command buffers to the FPA. The
176 * hardware connected to the queue must be stopped before this
186 * Return the number of command words pending in the queue. This
189 * @queue_id: Hardware command queue to query
201 * @queue_id: Command queue to query
208 * Get the index into the state arrays for the supplied queue id.
220 * every 16th queue. This reduces cache thrashing when you are __cvmx_cmd_queue_get_index()
230 * Lock the supplied queue so nobody else is updating it at the same
234 * @qptr: Pointer to the queue's global state
285 * Unlock the queue, flushing all writes.
296 * Get the queue state structure for the given queue id
312 * Write an arbitrary number of command words to a command queue.
316 * @queue_id: Hardware command queue to write to
318 * Use internal locking to ensure exclusive access for queue
334 /* Make sure nobody else is updating the same queue */ cvmx_cmd_queue_write()
339 * If a max queue length was specified then make sure we don't cvmx_cmd_queue_write()
412 * queue.
414 * @queue_id: Hardware command queue to write to
416 * Use internal locking to ensure exclusive access for queue
432 /* Make sure nobody else is updating the same queue */ cvmx_cmd_queue_write2()
437 * If a max queue length was specified then make sure we don't cvmx_cmd_queue_write2()
511 * queue.
513 * @queue_id: Hardware command queue to write to
515 * Use internal locking to ensure exclusive access for queue
533 /* Make sure nobody else is updating the same queue */ cvmx_cmd_queue_write3()
538 * If a max queue length was specified then make sure we don't cvmx_cmd_queue_write3()
H A Dcvmx-pko.h47 * maintaining PKO queue pointers. These are now stored in a
51 * queue locking correctly applies across all operating
63 #include <asm/octeon/cvmx-cmd-queue.h>
101 * the same queue at the same time
106 * to the output queue. This will maintain packet ordering on
111 * PKO uses the common command queue locks to insure exclusive
112 * access to the output queue. This is a memory based
145 * addition to the output queue,
149 * The output queue to send the packet to (0-127 are
152 uint64_t queue:9; member in struct:__anon1994::__anon1995
157 uint64_t queue:9;
212 * a work queue entry.
304 * @base_queue: First queue number to associate with this port.
306 * @priority: Array of priority levels for each queue. Values are
323 * @queue: Queue the packet is for
326 static inline void cvmx_pko_doorbell(uint64_t port, uint64_t queue, cvmx_pko_doorbell() argument
336 ptr.s.queue = queue; cvmx_pko_doorbell()
338 * Need to make sure output queue data is in DRAM before cvmx_pko_doorbell()
347 * get exclusive access to the output queue structure, and
351 * and must be called with the same port/queue/use_locking arguments.
358 * is accessing the same queue at the same time.
361 * access to the output queue. This will maintain
364 * - PKO uses the common command queue locks to insure
365 * exclusive access to the output queue. This is a
373 * @queue: Queue to use
378 static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue, cvmx_pko_send_packet_prepare() argument
396 (CVMX_TAG_SUBGROUP_MASK & queue); cvmx_pko_send_packet_prepare()
409 * @queue: Queue to use
421 uint64_t queue, cvmx_pko_send_packet_finish()
429 result = cvmx_cmd_queue_write2(CVMX_CMD_QUEUE_PKO(queue), cvmx_pko_send_packet_finish()
433 cvmx_pko_doorbell(port, queue, 2); cvmx_pko_send_packet_finish()
450 * @queue: Queue to use
454 * @addr: Plysical address of a work queue entry or physical address
464 uint64_t queue, cvmx_pko_send_packet_finish3()
473 result = cvmx_cmd_queue_write3(CVMX_CMD_QUEUE_PKO(queue), cvmx_pko_send_packet_finish3()
477 cvmx_pko_doorbell(port, queue, 3); cvmx_pko_send_packet_finish3()
488 * Return the pko output queue associated with a port and a specific core.
490 * is the base queue.
493 * @core: Core to get queue for
495 * Returns Core-specific output queue
532 * are assigned an illegal queue number */ cvmx_pko_get_base_queue_per_core()
537 * For a given port number, return the base pko output queue
541 * Returns Base output queue
419 cvmx_pko_send_packet_finish( uint64_t port, uint64_t queue, cvmx_pko_command_word0_t pko_command, union cvmx_buf_ptr packet, cvmx_pko_lock_t use_locking) cvmx_pko_send_packet_finish() argument
462 cvmx_pko_send_packet_finish3( uint64_t port, uint64_t queue, cvmx_pko_command_word0_t pko_command, union cvmx_buf_ptr packet, uint64_t addr, cvmx_pko_lock_t use_locking) cvmx_pko_send_packet_finish3() argument
/linux-4.1.27/drivers/staging/i2o/
H A Di2o_block.h26 * Added a queue depth.
29 * Removed queue walk, fixed for 64bitness.
34 * Heavily chop down the queue depths
59 /* request queue sizes */
75 spinlock_t lock; /* queue lock */
78 unsigned int open_queue_depth; /* number of requests in the queue */
89 struct list_head queue; member in struct:i2o_block_request
100 struct request_queue *queue; member in struct:i2o_block_delayed_request
H A Di2o_block.c26 * Added a queue depth.
29 * Removed queue walk, fixed for 64bitness.
34 * Heavily chop down the queue depths
88 * Frees the request queue, gendisk and the i2o_block_device structure.
92 blk_cleanup_queue(dev->gd->queue); i2o_block_device_free()
289 INIT_LIST_HEAD(&ireq->queue); i2o_block_request_alloc()
356 * @q: request queue for the request
393 * i2o_block_delayed_request_fn - delayed request queue function
394 * @work: the delayed request with the queue to start
396 * If the request queue is stopped for a disk, and there is no open
398 * the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never
406 struct request_queue *q = dreq->queue; i2o_block_delayed_request_fn()
440 list_del(&ireq->queue); i2o_block_end_request()
867 list_add_tail(&ireq->queue, &dev->open_queue); i2o_block_transfer()
885 * i2o_block_request_fn - request queue handling function
886 * @q: request queue from which the request could be fetched
888 * Takes the next request from the queue, transfers it and if no error
889 * occurs dequeue it from the queue. On arrival of the reply the message
915 /* stop the queue and retry later */ i2o_block_request_fn()
920 dreq->queue = q; i2o_block_request_fn()
954 * queue and initialize them as far as no additional information is needed.
963 struct request_queue *queue; i2o_block_device_alloc() local
986 /* initialize the request queue */ i2o_block_device_alloc()
987 queue = blk_init_queue(i2o_block_request_fn, &dev->lock); i2o_block_device_alloc()
988 if (!queue) { i2o_block_device_alloc()
989 osm_err("Insufficient memory to allocate request queue.\n"); i2o_block_device_alloc()
994 blk_queue_prep_rq(queue, i2o_block_prep_req_fn); i2o_block_device_alloc()
997 gd->queue = queue; i2o_block_device_alloc()
1030 struct request_queue *queue; i2o_block_probe() local
1077 /* setup request queue */ i2o_block_probe()
1078 queue = gd->queue; i2o_block_probe()
1079 queue->queuedata = i2o_blk_dev; i2o_block_probe()
1081 blk_queue_max_hw_sectors(queue, max_sectors); i2o_block_probe()
1082 blk_queue_max_segments(queue, i2o_sg_tablesize(c, body_size)); i2o_block_probe()
1084 osm_debug("max sectors = %d\n", queue->max_sectors); i2o_block_probe()
1085 osm_debug("phys segments = %d\n", queue->max_phys_segments); i2o_block_probe()
1086 osm_debug("max hw segments = %d\n", queue->max_hw_segments); i2o_block_probe()
1094 blk_queue_logical_block_size(queue, le32_to_cpu(blocksize)); i2o_block_probe()
/linux-4.1.27/scripts/
H A Dheaderdep.pl88 my @queue = @_;
89 while(@queue) {
90 my $header = pop @queue;
105 push @queue, $dep;
142 my @queue = map { [[0, $_]] } @_;
143 while(@queue) {
144 my $top = pop @queue;
158 push @queue, $chain;
/linux-4.1.27/drivers/net/wireless/b43/
H A Dpio.h13 /* TX queue. */
27 /* RX queue. */
34 /* TX queue */
47 /* RX queue */
59 /* Pointer to the TX queue we belong to. */
60 struct b43_pio_txqueue *queue; member in struct:b43_pio_txpacket
73 /* The device queue buffer size in bytes. */
75 /* The number of used bytes in the device queue buffer. */
82 /* True, if the mac80211 queue was stopped due to overflow at TX. */
84 /* Our b43 queue index number */
86 /* The mac80211 QoS queue priority. */
/linux-4.1.27/lib/
H A Ddynamic_queue_limits.c2 * Dynamic byte queue limits. See include/linux/dynamic_queue_limits.h
16 /* Records completed count and recalculates the queue limit */ dql_completed()
25 /* Can't complete more than what's in queue */ dql_completed()
39 * - The queue was over-limit in the last interval, dql_completed()
40 * and there is no more data in the queue. dql_completed()
42 * - The queue was over-limit in the previous interval and dql_completed()
44 * had been consumed. This covers the case when queue dql_completed()
48 * When queue is starved increase the limit by the amount dql_completed()
59 * A decrease is only considered if the queue has been busy in dql_completed()
63 * the the amount needed to prevent starvation, the queue limit dql_completed()
72 * - The queue limit plus previous over-limit minus twice dql_completed()
/linux-4.1.27/drivers/net/wireless/iwlwifi/pcie/
H A Dinternal.h77 * struct iwl_rxq - Rx queue
81 * @queue:
99 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; member in struct:iwl_rxq
119 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
128 * iwl_queue_dec_wrap - decrement queue index, wrap back to end
143 * Generic queue structure
150 * queues, n_window, which is the size of the software queue data
151 * is also 256; however, for the command queue, n_window is only
161 * data is a window overlayed over the HW queue.
166 /* use for monitoring and recovering the stuck queue */
168 int n_window; /* safe queue window */
170 int low_mark; /* low watermark, resume queue if free
172 int high_mark; /* high watermark, stop queue if free
206 * @q: generic Rx/Tx queue descriptor
210 * for each command on the queue
213 * @lock: queue lock
214 * @stuck_timer: timer that fires if queue gets stuck
217 * @active: stores if queue is active
218 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
219 * @wd_timeout: queue watchdog timeout (jiffies) - per queue
220 * @frozen: tx stuck queue timer is frozen
223 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
252 * @rxq: all the RX queue data
262 * @ucode_write_waitq: wait queue for uCode load
263 * @cmd_queue - command queue number
266 * @scd_set_active: should the transport configure the SCD for HCMD queue
378 void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn,
381 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
/linux-4.1.27/crypto/
H A Dcryptd.c37 struct crypto_queue queue; member in struct:cryptd_cpu_queue
47 struct cryptd_queue *queue; member in struct:cryptd_instance_ctx
52 struct cryptd_queue *queue; member in struct:hashd_instance_ctx
57 struct cryptd_queue *queue; member in struct:aead_instance_ctx
87 static int cryptd_init_queue(struct cryptd_queue *queue, cryptd_init_queue() argument
93 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue); cryptd_init_queue()
94 if (!queue->cpu_queue) cryptd_init_queue()
97 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); for_each_possible_cpu()
98 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); for_each_possible_cpu()
104 static void cryptd_fini_queue(struct cryptd_queue *queue) cryptd_fini_queue() argument
110 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); for_each_possible_cpu()
111 BUG_ON(cpu_queue->queue.qlen); for_each_possible_cpu()
113 free_percpu(queue->cpu_queue);
116 static int cryptd_enqueue_request(struct cryptd_queue *queue, cryptd_enqueue_request() argument
123 cpu_queue = this_cpu_ptr(queue->cpu_queue); cryptd_enqueue_request()
124 err = crypto_enqueue_request(&cpu_queue->queue, request); cryptd_enqueue_request()
148 backlog = crypto_get_backlog(&cpu_queue->queue); cryptd_queue_worker()
149 req = crypto_dequeue_request(&cpu_queue->queue); cryptd_queue_worker()
160 if (cpu_queue->queue.qlen) cryptd_queue_worker()
168 return ictx->queue; cryptd_get_queue()
254 struct cryptd_queue *queue; cryptd_blkcipher_enqueue() local
256 queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm)); cryptd_blkcipher_enqueue()
260 return cryptd_enqueue_request(queue, &req->base); cryptd_blkcipher_enqueue()
333 struct cryptd_queue *queue) cryptd_create_blkcipher()
354 ctx->queue = queue; cryptd_create_blkcipher()
441 struct cryptd_queue *queue = cryptd_hash_enqueue() local
447 return cryptd_enqueue_request(queue, &req->base); cryptd_hash_enqueue()
594 struct cryptd_queue *queue) cryptd_create_hash()
618 ctx->queue = queue; cryptd_create_hash()
701 struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm)); cryptd_aead_enqueue() local
705 return cryptd_enqueue_request(queue, &req->base); cryptd_aead_enqueue()
744 struct cryptd_queue *queue) cryptd_create_aead()
765 ctx->queue = queue; cryptd_create_aead()
801 static struct cryptd_queue queue; variable in typeref:struct:cryptd_queue
813 return cryptd_create_blkcipher(tmpl, tb, &queue); cryptd_create()
815 return cryptd_create_hash(tmpl, tb, &queue); cryptd_create()
817 return cryptd_create_aead(tmpl, tb, &queue); cryptd_create()
969 err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN); cryptd_init()
975 cryptd_fini_queue(&queue); cryptd_init()
982 cryptd_fini_queue(&queue); cryptd_exit()
331 cryptd_create_blkcipher(struct crypto_template *tmpl, struct rtattr **tb, struct cryptd_queue *queue) cryptd_create_blkcipher() argument
593 cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, struct cryptd_queue *queue) cryptd_create_hash() argument
742 cryptd_create_aead(struct crypto_template *tmpl, struct rtattr **tb, struct cryptd_queue *queue) cryptd_create_aead() argument
H A Dmcryptd.c45 struct mcryptd_queue *queue; member in struct:hashd_instance_ctx
68 static int mcryptd_init_queue(struct mcryptd_queue *queue, mcryptd_init_queue() argument
74 queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue); mcryptd_init_queue()
75 pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue); mcryptd_init_queue()
76 if (!queue->cpu_queue) mcryptd_init_queue()
79 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); for_each_possible_cpu()
80 pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue); for_each_possible_cpu()
81 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); for_each_possible_cpu()
87 static void mcryptd_fini_queue(struct mcryptd_queue *queue) mcryptd_fini_queue() argument
93 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); for_each_possible_cpu()
94 BUG_ON(cpu_queue->queue.qlen); for_each_possible_cpu()
96 free_percpu(queue->cpu_queue);
99 static int mcryptd_enqueue_request(struct mcryptd_queue *queue, mcryptd_enqueue_request() argument
107 cpu_queue = this_cpu_ptr(queue->cpu_queue); mcryptd_enqueue_request()
110 err = crypto_enqueue_request(&cpu_queue->queue, request); mcryptd_enqueue_request()
173 backlog = crypto_get_backlog(&cpu_queue->queue); mcryptd_queue_worker()
174 req = crypto_dequeue_request(&cpu_queue->queue); mcryptd_queue_worker()
186 if (!cpu_queue->queue.qlen) mcryptd_queue_worker()
190 if (cpu_queue->queue.qlen) mcryptd_queue_worker()
225 return ictx->queue; mcryptd_get_queue()
324 struct mcryptd_queue *queue = mcryptd_hash_enqueue() local
330 ret = mcryptd_enqueue_request(queue, &req->base, rctx); mcryptd_hash_enqueue()
491 struct mcryptd_queue *queue) mcryptd_create_hash()
516 ctx->queue = queue; mcryptd_create_hash()
490 mcryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb, struct mcryptd_queue *queue) mcryptd_create_hash() argument
H A Dalgapi.c851 void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen) crypto_init_queue() argument
853 INIT_LIST_HEAD(&queue->list); crypto_init_queue()
854 queue->backlog = &queue->list; crypto_init_queue()
855 queue->qlen = 0; crypto_init_queue()
856 queue->max_qlen = max_qlen; crypto_init_queue()
860 int crypto_enqueue_request(struct crypto_queue *queue, crypto_enqueue_request() argument
865 if (unlikely(queue->qlen >= queue->max_qlen)) { crypto_enqueue_request()
869 if (queue->backlog == &queue->list) crypto_enqueue_request()
870 queue->backlog = &request->list; crypto_enqueue_request()
873 queue->qlen++; crypto_enqueue_request()
874 list_add_tail(&request->list, &queue->list); crypto_enqueue_request()
881 void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset) __crypto_dequeue_request() argument
885 if (unlikely(!queue->qlen)) __crypto_dequeue_request()
888 queue->qlen--; __crypto_dequeue_request()
890 if (queue->backlog != &queue->list) __crypto_dequeue_request()
891 queue->backlog = queue->backlog->next; __crypto_dequeue_request()
893 request = queue->list.next; __crypto_dequeue_request()
901 struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue) crypto_dequeue_request() argument
903 return __crypto_dequeue_request(queue, 0); crypto_dequeue_request()
907 int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm) crypto_tfm_in_queue() argument
911 list_for_each_entry(req, &queue->list, list) { crypto_tfm_in_queue()
/linux-4.1.27/drivers/scsi/csiostor/
H A Dcsio_wr.c70 /* Size of the egress queue status page */
83 * number of bytes in the freelist queue. This translates to atleast csio_wr_ring_fldb()
94 /* Write a 0 cidx increment value to enable SGE interrupts for this queue */
105 * csio_wr_fill_fl - Populate the FL buffers of a FL queue.
107 * @flq: Freelist queue.
145 * @flq: Freelist queue.
162 * csio_wr_alloc_q - Allocate a WR queue and initialize it.
164 * @qsize: Size of the queue in bytes
165 * @wrsize: Since of WR in this queue, if fixed.
166 * @type: Type of queue (Ingress/Egress/Freelist)
167 * @owner: Module that owns this queue.
170 * @iq_int_handler: Ingress queue handler in INTx mode.
172 * This function allocates and sets up a queue for the caller
174 * be free entries being available in the queue array. If one is found,
175 * it is initialized with the allocated queue, marked as being used (owner),
176 * and a handle returned to the caller in form of the queue's index
179 * another queue (with its own index into q_arr) for the freelist. Allocate
181 * idx in the ingress queue's flq.idx. This is how a Freelist is associated
182 * with its owning ingress queue.
213 csio_err(hw, "Invalid Ingress queue WR size:%d\n", csio_wr_alloc_q()
229 csio_err(hw, "Invalid queue type: 0x%x\n", type); csio_wr_alloc_q()
239 "queue at id: %d size: %d\n", free_idx, qsize); csio_wr_alloc_q()
252 /* Since queue area is set to zero */ csio_wr_alloc_q()
256 * Ingress queue status page size is always the size of csio_wr_alloc_q()
257 * the ingress queue entry. csio_wr_alloc_q()
270 "Failed to allocate FL queue" csio_wr_alloc_q()
284 "Failed to allocate FL queue bufs" csio_wr_alloc_q()
331 * @iq_idx: Ingress queue that got created.
374 * ingress context of this queue. This will block interrupts to csio_wr_iq_create_rsp()
375 * this queue until the next GTS write. Therefore, we do a csio_wr_iq_create_rsp()
376 * 0-cidx increment GTS write for this queue just to clear the csio_wr_iq_create_rsp()
378 * queue. csio_wr_iq_create_rsp()
401 * csio_wr_iq_create - Configure an Ingress queue with FW.
404 * @iq_idx: Ingress queue index in the WR module.
406 * @portid: PCIE Channel to be associated with this queue.
407 * @async: Is this a FW asynchronous message handling queue?
410 * This API configures an ingress queue with FW by issuing a FW_IQ_CMD mailbox
434 /* For interrupt forwarding queue only */ csio_wr_iq_create()
451 /* Pass in the ingress queue cmd parameters */ csio_wr_iq_create()
511 * @eq_idx: Egress queue that got created.
542 * csio_wr_eq_create - Configure an Egress queue with FW.
545 * @eq_idx: Egress queue index in the WR module.
546 * @iq_idx: Associated ingress queue index.
549 * This API configures a offload egress queue with FW by issuing a
599 * @iq_idx: Ingress queue that was freed.
618 * csio_wr_iq_destroy - Free an ingress queue.
621 * @iq_idx: Ingress queue index to destroy
624 * This API frees an ingress queue by issuing the FW_IQ_CMD
673 * @eq_idx: Egress queue that was freed.
692 * csio_wr_eq_destroy - Free an Egress queue.
695 * @eq_idx: Egress queue index to destroy
698 * This API frees an Egress queue by issuing the FW_EQ_OFLD_CMD
734 * csio_wr_cleanup_eq_stpg - Cleanup Egress queue status page
736 * @qidx: Egress queue index
738 * Cleanup the Egress queue status page.
752 * @qidx: Ingress queue index
754 * Cleanup the footer entries in the given ingress queue,
839 * csio_wr_get - Get requested size of WR entry/entries from queue.
841 * @qidx: Index of queue.
851 * A WR can start towards the end of a queue, and then continue at the
852 * beginning, since the queue is considered to be circular. This will
881 /* cidx == pidx, empty queue */ csio_wr_get()
888 * credits = 1 implies queue is full. csio_wr_get()
898 * If it does, use the first addr/size to cover the queue until csio_wr_get()
900 * of queue and return it in the second addr/len. Set pidx csio_wr_get()
919 /* We are the end of queue, roll back pidx to top of queue */ csio_wr_get()
968 * @qidx: Index of queue.
972 * in queue into the register.
1001 return 0; /* cidx == pidx, empty queue */ csio_wr_avail_qcredits()
1007 * @flq: The freelist queue.
1012 * queue cidx.
1028 * @q: The ingress queue attached to the Freelist.
1029 * @wr: The freelist completion WR in the ingress queue.
1098 * csio_is_new_iqwr - Is this a new Ingress queue entry ?
1100 * @ftr: Ingress queue WR SGE footer.
1112 * csio_wr_process_iq - Process elements in Ingress queue.
1114 * @qidx: Index of queue
1115 * @iq_handler: Handler for this queue
1118 * This routine walks through every entry of the ingress queue, calling
1181 /* Call the queue handler. */ csio_wr_process_iq()
1196 * queue. csio_wr_process_iq()
1200 /* Roll over to start of queue */ csio_wr_process_iq()
1568 * Allocates memory for an array of queue pointers starting at q_arr.
/linux-4.1.27/drivers/staging/rtl8723au/include/
H A Dosdep_service.h51 struct list_head queue; member in struct:rtw_queue
55 static inline struct list_head *get_list_head(struct rtw_queue *queue) get_list_head() argument
57 return &queue->queue; get_list_head()
/linux-4.1.27/drivers/gpu/drm/ttm/
H A Dttm_lock.c48 init_waitqueue_head(&lock->queue); ttm_lock_init()
60 wake_up_all(&lock->queue); ttm_read_unlock()
88 ret = wait_event_interruptible(lock->queue, ttm_read_lock()
91 wait_event(lock->queue, __ttm_read_lock(lock)); ttm_read_lock()
127 (lock->queue, __ttm_read_trylock(lock, &locked)); ttm_read_trylock()
129 wait_event(lock->queue, __ttm_read_trylock(lock, &locked)); ttm_read_trylock()
143 wake_up_all(&lock->queue); ttm_write_unlock()
174 ret = wait_event_interruptible(lock->queue, ttm_write_lock()
179 wake_up_all(&lock->queue); ttm_write_lock()
183 wait_event(lock->queue, __ttm_read_lock(lock)); ttm_write_lock()
197 wake_up_all(&lock->queue); __ttm_vt_unlock()
237 ret = wait_event_interruptible(lock->queue, ttm_vt_lock()
242 wake_up_all(&lock->queue); ttm_vt_lock()
247 wait_event(lock->queue, __ttm_vt_lock(lock)); ttm_vt_lock()
277 wake_up_all(&lock->queue); ttm_suspend_unlock()
300 wait_event(lock->queue, __ttm_suspend_lock(lock)); ttm_suspend_lock()
/linux-4.1.27/include/drm/
H A Ddrm_os_linux.h6 #include <linux/interrupt.h> /* For task queue support */
43 #define DRM_WAIT_ON( ret, queue, timeout, condition ) \
47 add_wait_queue(&(queue), &entry); \
64 remove_wait_queue(&(queue), &entry); \
/linux-4.1.27/tools/testing/selftests/mqueue/
H A Dmq_open_tests.c18 * open a posix message queue and then reports whether or not they
39 " path Path name of the message queue to create\n"
55 mqd_t queue = -1; variable
86 if (queue != -1) shutdown()
87 if (mq_close(queue)) shutdown()
154 printf("Current rlimit value for POSIX message queue bytes is " validate_current_settings()
165 printf("Temporarily lowering default queue parameters " validate_current_settings()
177 printf("Temporarily lowering maximum queue parameters " validate_current_settings()
181 "queue parameters to the maximum queue " validate_current_settings()
192 * test_queue - Test opening a queue, shutdown if we fail. This should
194 * after ourselves and return the queue attributes in *result.
201 if ((queue = mq_open(queue_path, flags, perms, attr)) == -1) test_queue()
203 if (mq_getattr(queue, result)) test_queue()
205 if (mq_close(queue)) test_queue()
207 queue = -1; test_queue()
215 * 0 - Failed to create a queue
216 * 1 - Created a queue, attributes in *result
223 if ((queue = mq_open(queue_path, flags, perms, attr)) == -1) test_queue_fail()
225 if (mq_getattr(queue, result)) test_queue_fail()
227 if (mq_close(queue)) test_queue_fail()
229 queue = -1; test_queue_fail()
240 fprintf(stderr, "Must pass a valid queue name\n\n"); main()
246 * Although we can create a msg queue with a non-absolute path name, main()
295 printf("\tUsing queue path:\t\t%s\n", queue_path); main()
399 "queue parameters when\ndefaults > " main()
410 printf("Kernel creates queue even though defaults " main()
414 printf("Kernel properly fails to create queue when " main()
H A Dmq_perf_tests.c17 * Tests various types of message queue workloads, concentrating on those
18 * situations that invole large message sizes, large message queue depths,
19 * or both, and reports back useful metrics about kernel message queue
45 " -c # Skip most tests and go straight to a high queue depth test\n"
49 " queue impacts the performance of other programs). The number\n"
65 " path Path name of the message queue to create\n"
96 mqd_t queue = -1; variable
107 .descrip = "Run continuous tests at a high queue depth in "
114 "messages to the message queue in a tight loop will "
186 if (queue != -1) shutdown()
187 if (mq_close(queue)) shutdown()
277 * open_queue - open the global queue for testing
278 * @attr - An attr struct specifying the desired queue traits
279 * @result - An attr struct that lists the actual traits the queue has
283 * queue to open, the queue descriptor is saved in the global queue
291 queue = mq_open(queue_path, flags, perms, attr); open_queue()
292 if (queue == -1) open_queue()
294 if (mq_getattr(queue, &result)) open_queue()
328 while (mq_send(queue, buff, sizeof(buff), 0) == 0) cont_thread()
330 mq_receive(queue, buff, sizeof(buff), &priority); cont_thread()
335 while (mq_receive(queue, buff, MSG_SIZE, &prio_in) == MSG_SIZE)
339 if (mq_send(queue, buff, MSG_SIZE, prio_out)) \
346 if (mq_send(queue, buff, MSG_SIZE, prio_out)) \
349 if (mq_receive(queue, buff, MSG_SIZE, &prio_in) != MSG_SIZE) \
396 {"\n\tTest #2a: Time send/recv message, queue full, constant prio\n",
398 {"\n\tTest #2b: Time send/recv message, queue full, increasing prio\n",
400 {"\n\tTest #2c: Time send/recv message, queue full, decreasing prio\n",
402 {"\n\tTest #2d: Time send/recv message, queue full, random prio\n",
410 * 1) Time to add/remove message with 0 messages on queue
412 * 2) Time to add/remove message when queue close to capacity:
448 printf("\n\tTest #1: Time send/recv message, queue empty\n"); perf_test_thread()
477 printf("\t\tFilling queue..."); perf_test_thread()
506 printf("\t\tDraining queue..."); perf_test_thread()
597 * Although we can create a msg queue with a main()
654 printf("\tUsing queue path:\t\t\t%s\n", queue_path); main()
/linux-4.1.27/drivers/net/ethernet/intel/i40e/
H A Di40e_adminq.h56 /* used for queue tracking */
87 struct i40e_adminq_ring arq; /* receive queue */
88 struct i40e_adminq_ring asq; /* send queue */
89 u32 asq_cmd_timeout; /* send queue cmd write back timeout*/
90 u16 num_arq_entries; /* receive queue depth */
91 u16 num_asq_entries; /* send queue depth */
92 u16 arq_buf_size; /* receive queue buffer size */
93 u16 asq_buf_size; /* send queue buffer size */
101 struct mutex asq_mutex; /* Send queue lock */
102 struct mutex arq_mutex; /* Receive queue lock */
/linux-4.1.27/drivers/net/ethernet/intel/i40evf/
H A Di40e_adminq.h56 /* used for queue tracking */
87 struct i40e_adminq_ring arq; /* receive queue */
88 struct i40e_adminq_ring asq; /* send queue */
89 u32 asq_cmd_timeout; /* send queue cmd write back timeout*/
90 u16 num_arq_entries; /* receive queue depth */
91 u16 num_asq_entries; /* send queue depth */
92 u16 arq_buf_size; /* receive queue buffer size */
93 u16 asq_buf_size; /* send queue buffer size */
101 struct mutex asq_mutex; /* Send queue lock */
102 struct mutex arq_mutex; /* Receive queue lock */
/linux-4.1.27/include/trace/events/
H A Dblock.h100 * @q: queue containing the block operation request
104 * queue @q is aborted. The fields in the operation request @rq
116 * block_rq_requeue - place block IO request back on a queue
117 * @q: queue holding operation
120 * The block operation request @rq is being placed back into queue
122 * put back in the queue.
133 * @q: queue containing the block operation request
214 * block_rq_insert - insert block operation request into queue
215 * @q: target queue
219 * into queue @q. The fields in the operation request @rq struct can
232 * @q: queue holding operation
235 * Called when block operation request @rq from queue @q is sent to a
247 * @q: queue holding the block operation
287 * @q: queue holding the block operation
352 * @q: queue holding operation
357 * in queue @q.
368 * @q: queue holding operation
373 * operation in queue @q.
383 * block_bio_queue - putting new block IO operation in queue
384 * @q: queue holding operation
387 * About to place the block IO operation @bio into queue @q.
447 * block_getrq - get a free request entry in queue for block IO operations
448 * @q: queue for operations
452 * A request struct for queue @q has been allocated to handle the
463 * block_sleeprq - waiting to get a free request entry in queue for block IO operation
464 * @q: queue for operation
468 * In the case where a request struct cannot be provided for queue @q
481 * block_plug - keep operations requests in request queue
482 * @q: request queue to plug
484 * Plug the request queue @q. Do not allow block operation requests
486 * the queue to improve throughput performance of the block device.
525 * block_unplug - release of operations requests in request queue
526 * @q: request queue to unplug
527 * @depth: number of requests just added to the queue
530 * Unplug request queue @q because device driver is scheduled to work
531 * on elements in the request queue.
542 * @q: queue containing the bio
546 * The bio request @bio in request queue @q needs to be split into two
583 * @q: queue holding the operation
626 * @q: queue holding the operation
/linux-4.1.27/virt/kvm/
H A Dasync_pf.c68 INIT_LIST_HEAD(&vcpu->async_pf.queue); kvm_async_pf_vcpu_init()
106 /* cancel outstanding work queue item */ kvm_clear_async_pf_completion_queue()
107 while (!list_empty(&vcpu->async_pf.queue)) { kvm_clear_async_pf_completion_queue()
109 list_entry(vcpu->async_pf.queue.next, kvm_clear_async_pf_completion_queue()
110 typeof(*work), queue); kvm_clear_async_pf_completion_queue()
111 list_del(&work->queue); kvm_clear_async_pf_completion_queue()
152 list_del(&work->queue); kvm_check_async_pf_completion()
194 list_add_tail(&work->queue, &vcpu->async_pf.queue); kvm_setup_async_pf()
217 INIT_LIST_HEAD(&work->queue); /* for list_del to work */ kvm_async_pf_wakeup_all()
/linux-4.1.27/include/net/netfilter/
H A Dnf_queue.h48 /* packets in either direction go into same queue */ hash_v4()
83 nfqueue_hash(const struct sk_buff *skb, u16 queue, u16 queues_total, u8 family, nfqueue_hash() argument
87 queue += ((u64) hash_v4(skb, jhash_initval) * queues_total) >> 32; nfqueue_hash()
90 queue += ((u64) hash_v6(skb, jhash_initval) * queues_total) >> 32; nfqueue_hash()
93 return queue; nfqueue_hash()
/linux-4.1.27/include/uapi/sound/
H A Dasequencer.h74 #define SNDRV_SEQ_EVENT_SETPOS_TICK 33 /* set tick queue position */
75 #define SNDRV_SEQ_EVENT_SETPOS_TIME 34 /* set realtime queue position */
79 #define SNDRV_SEQ_EVENT_QUEUE_SKEW 38 /* skew queue tempo */
249 /* queue timer control */
251 unsigned char queue; /* affected queue */ member in struct:snd_seq_ev_queue_control
277 unsigned char queue; /* schedule queue */ member in struct:snd_seq_event
290 struct snd_seq_ev_queue_control queue; member in union:snd_seq_event::__anon13841
394 unsigned char queue; /* Queue for REMOVE_DEST */ member in struct:snd_seq_remove_events
463 unsigned char time_queue; /* queue # for timestamping */
468 /* queue flags */
471 /* queue information */
473 int queue; /* queue id */ member in struct:snd_seq_queue_info
476 * security settings, only owner of this queue can start/stop timer
477 * etc. if the queue is locked for other clients
479 int owner; /* client id for owner of the queue */
480 unsigned locked:1; /* timing queue locked for other queues */
481 char name[64]; /* name of this queue */
487 /* queue info/status */
489 int queue; /* queue id */ member in struct:snd_seq_queue_status
490 int events; /* read-only - queue size */
493 int running; /* running state of queue */
499 /* queue tempo */
501 int queue; /* sequencer queue */ member in struct:snd_seq_queue_tempo
504 unsigned int skew_value; /* queue skew */
505 unsigned int skew_base; /* queue skew base */
515 /* queue timer info */
517 int queue; /* sequencer queue */ member in struct:snd_seq_queue_timer
530 int queue; /* sequencer queue */ member in struct:snd_seq_queue_client
532 int used; /* queue is used with this client
548 unsigned char queue; /* input time-stamp queue (optional) */ member in struct:snd_seq_port_subscribe
563 unsigned char queue; /* R/O: result */ member in struct:snd_seq_query_subs
/linux-4.1.27/drivers/net/fddi/skfp/
H A Dhwmtm.c85 static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue);
86 static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue);
358 struct s_smt_tx_queue *queue ; init_txd_ring() local
366 queue = smc->hw.fp.tx[QUEUE_A0] ; init_txd_ring()
372 queue->tx_curr_put = queue->tx_curr_get = ds ; init_txd_ring()
374 queue->tx_free = HWM_ASYNC_TXD_COUNT ; init_txd_ring()
375 queue->tx_used = 0 ; init_txd_ring()
380 queue = smc->hw.fp.tx[QUEUE_S] ; init_txd_ring()
386 queue->tx_curr_put = queue->tx_curr_get = ds ; init_txd_ring()
387 queue->tx_free = HWM_SYNC_TXD_COUNT ; init_txd_ring()
388 queue->tx_used = 0 ; init_txd_ring()
395 struct s_smt_rx_queue *queue ; init_rxd_ring() local
402 queue = smc->hw.fp.rx[QUEUE_R1] ; init_rxd_ring()
408 queue->rx_curr_put = queue->rx_curr_get = ds ; init_rxd_ring()
409 queue->rx_free = SMT_R1_RXD_COUNT ; init_rxd_ring()
410 queue->rx_used = 0 ; init_rxd_ring()
517 * free MBuf queue smt_free_mbuf()
586 static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue) repair_txd_ring() argument
596 t = queue->tx_curr_get ; repair_txd_ring()
597 tx_used = queue->tx_used ; repair_txd_ring()
598 for (i = tx_used+queue->tx_free-1 ; i ; i-- ) { repair_txd_ring()
603 t = queue->tx_curr_get ; repair_txd_ring()
638 static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue) repair_rxd_ring() argument
648 r = queue->rx_curr_get ; repair_rxd_ring()
649 rx_used = queue->rx_used ; repair_rxd_ring()
655 r = queue->rx_curr_get ; repair_rxd_ring()
791 if (is & IS_R1_P) { /* Parity error rx queue 1 */ fddi_isr()
796 if (is & IS_R1_C) { /* Encoding error rx queue 1 */ fddi_isr()
817 DB_GEN("Fast tx complete queue",0,0,6) ; fddi_isr()
1039 * process receive queue
1047 struct s_smt_rx_queue *queue ; /* points to the queue ctl struct */ process_receive() local
1060 queue = smc->hw.fp.rx[QUEUE_R1] ; process_receive()
1063 r = queue->rx_curr_get ; process_receive()
1064 rx_used = queue->rx_used ; process_receive()
1156 rxd = queue->rx_curr_get ; process_receive()
1157 queue->rx_curr_get = r ; process_receive()
1158 queue->rx_free += frag_count ; process_receive()
1159 queue->rx_used = rx_used ; process_receive()
1361 DB_RX("next RxD is %x ",queue->rx_curr_get,0,3) ; process_receive()
1362 NDD_TRACE("RHx1",queue->rx_curr_get,0,0) ; process_receive()
1370 DB_RX("next RxD is %x ",queue->rx_curr_get,0,3) ; process_receive()
1371 NDD_TRACE("RHx2",queue->rx_curr_get,0,0) ; process_receive()
1449 * In this case, the frames in the receive queue are obsolete and
1467 struct s_smt_rx_queue *queue ; mac_drv_clear_rx_queue() local
1477 queue = smc->hw.fp.rx[QUEUE_R1] ; mac_drv_clear_rx_queue()
1483 r = queue->rx_curr_get ; mac_drv_clear_rx_queue()
1484 while (queue->rx_used) { mac_drv_clear_rx_queue()
1492 while (r != queue->rx_curr_put && mac_drv_clear_rx_queue()
1504 for (r=queue->rx_curr_get,i=frag_count; i ; r=r->rxd_next,i--){ mac_drv_clear_rx_queue()
1510 (void *)queue->rx_curr_get,frag_count,5) ; mac_drv_clear_rx_queue()
1511 mac_drv_clear_rxd(smc,queue->rx_curr_get,frag_count) ; mac_drv_clear_rx_queue()
1513 queue->rx_curr_get = next_rxd ; mac_drv_clear_rx_queue()
1514 queue->rx_used -= frag_count ; mac_drv_clear_rx_queue()
1515 queue->rx_free += frag_count ; mac_drv_clear_rx_queue()
1532 * corresponding send queue.
1535 * send queue the frame should be transmitted.
1543 * frame_status status of the frame, the send queue bit is already
1632 struct s_smt_tx_queue *queue ; hwm_tx_frag() local
1635 queue = smc->os.hwm.tx_p ; hwm_tx_frag()
1641 * Set: t = queue->tx_curr_put here ! hwm_tx_frag()
1643 t = queue->tx_curr_put ; hwm_tx_frag()
1659 outpd(queue->tx_bmu_ctl,CSR_START) ; hwm_tx_frag()
1669 queue->tx_free-- ; hwm_tx_frag()
1670 queue->tx_used++ ; hwm_tx_frag()
1671 queue->tx_curr_put = t->txd_next ; hwm_tx_frag()
1727 NDD_TRACE("THfE",t,queue->tx_free,0) ; hwm_tx_frag()
1816 struct s_smt_tx_queue *queue ; smt_send_mbuf() local
1852 queue = smc->hw.fp.tx[QUEUE_A0] ; smt_send_mbuf()
1863 if (!smc->hw.mac_ring_is_up || frag_count > queue->tx_free) { smt_send_mbuf()
1881 t = queue->tx_curr_put ; smt_send_mbuf()
1900 outpd(queue->tx_bmu_ctl,CSR_START) ; smt_send_mbuf()
1906 queue->tx_curr_put = t = t->txd_next ; smt_send_mbuf()
1907 queue->tx_free-- ; smt_send_mbuf()
1908 queue->tx_used++ ; smt_send_mbuf()
1915 DB_TX("pass Mbuf to LLC queue",0,0,5) ; smt_send_mbuf()
1924 NDD_TRACE("THSE",t,queue->tx_free,frag_count) ; smt_send_mbuf()
1943 struct s_smt_tx_queue *queue ; mac_drv_clear_txd() local
1954 queue = smc->hw.fp.tx[i] ; mac_drv_clear_txd()
1955 t1 = queue->tx_curr_get ; mac_drv_clear_txd()
1966 if (tbctrl & BMU_OWN || !queue->tx_used){ mac_drv_clear_txd()
1967 DB_TX("End of TxDs queue %d",i,0,4) ; mac_drv_clear_txd()
1968 goto free_next_queue ; /* next queue */ mac_drv_clear_txd()
1974 t1 = queue->tx_curr_get ; mac_drv_clear_txd()
1995 queue->tx_curr_get,0,4) ; mac_drv_clear_txd()
1996 mac_drv_tx_complete(smc,queue->tx_curr_get) ; mac_drv_clear_txd()
1999 queue->tx_curr_get = t1 ; mac_drv_clear_txd()
2000 queue->tx_free += frag_count ; mac_drv_clear_txd()
2001 queue->tx_used -= frag_count ; mac_drv_clear_txd()
2034 struct s_smt_tx_queue *queue ; mac_drv_clear_tx_queue() local
2045 queue = smc->hw.fp.tx[i] ; mac_drv_clear_tx_queue()
2051 t = queue->tx_curr_get ; mac_drv_clear_tx_queue()
2052 tx_used = queue->tx_used ; mac_drv_clear_tx_queue()
2069 queue = smc->hw.fp.tx[i] ; mac_drv_clear_tx_queue()
2070 t = queue->tx_curr_get ; mac_drv_clear_tx_queue()
2084 queue->tx_curr_put = queue->tx_curr_get->txd_next ; mac_drv_clear_tx_queue()
2085 queue->tx_curr_get = queue->tx_curr_put ; mac_drv_clear_tx_queue()
H A Dfplustm.c196 * initialize the pointer for receive queue 1 set_recvptr()
204 * initialize the pointer for receive queue 2 set_recvptr()
228 * initialize the pointer for asynchronous transmit queue set_txptr()
236 * initialize the pointer for synchronous transmit queue set_txptr()
278 struct s_smt_rx_queue *queue ; init_rx() local
281 * init all tx data structures for receive queue 1 init_rx()
283 smc->hw.fp.rx[QUEUE_R1] = queue = &smc->hw.fp.rx_q[QUEUE_R1] ; init_rx()
284 queue->rx_bmu_ctl = (HW_PTR) ADDR(B0_R1_CSR) ; init_rx()
285 queue->rx_bmu_dsc = (HW_PTR) ADDR(B4_R1_DA) ; init_rx()
288 * init all tx data structures for receive queue 2 init_rx()
290 smc->hw.fp.rx[QUEUE_R2] = queue = &smc->hw.fp.rx_q[QUEUE_R2] ; init_rx()
291 queue->rx_bmu_ctl = (HW_PTR) ADDR(B0_R2_CSR) ; init_rx()
292 queue->rx_bmu_dsc = (HW_PTR) ADDR(B4_R2_DA) ; init_rx()
308 struct s_smt_tx_queue *queue ; init_tx() local
311 * init all tx data structures for the synchronous queue init_tx()
313 smc->hw.fp.tx[QUEUE_S] = queue = &smc->hw.fp.tx_q[QUEUE_S] ; init_tx()
314 queue->tx_bmu_ctl = (HW_PTR) ADDR(B0_XS_CSR) ; init_tx()
315 queue->tx_bmu_dsc = (HW_PTR) ADDR(B5_XS_DA) ; init_tx()
322 * init all tx data structures for the asynchronous queue 0 init_tx()
324 smc->hw.fp.tx[QUEUE_A0] = queue = &smc->hw.fp.tx_q[QUEUE_A0] ; init_tx()
325 queue->tx_bmu_ctl = (HW_PTR) ADDR(B0_XA_CSR) ; init_tx()
326 queue->tx_bmu_dsc = (HW_PTR) ADDR(B5_XA_DA) ; init_tx()
452 * enable FORMAC to send endless queue of directed beacon directed_beacon()
529 /* end of claim/beacon queue */ build_claim_beacon()
567 void enable_tx_irq(smc, queue)
569 u_short queue ;
573 interrupt of the queue.
575 Para queue = QUEUE_S: synchronous queue
576 = QUEUE_A0: asynchronous queue
581 the transmit complete interrupt of a queue,
590 void enable_tx_irq(struct s_smc *smc, u_short queue)
591 /* u_short queue; 0 = synchronous queue, 1 = asynchronous queue 0 */
597 if (queue == 0) {
600 if (queue == 1) {
608 void disable_tx_irq(smc, queue)
610 u_short queue ;
614 interrupt of the queue
616 Para queue = QUEUE_S: synchronous queue
617 = QUEUE_A0: asynchronous queue
625 void disable_tx_irq(struct s_smc *smc, u_short queue)
626 /* u_short queue; 0 = synchronous queue, 1 = asynchronous queue 0 */
632 if (queue == 0) {
635 if (queue == 1) {
658 llc_restart_tx(smc) ; /* TX queue */ mac_ring_up()
677 * mac2_irq: status bits for the receive queue 1, and ring status
804 * mac3_irq: receive queue 2 bits and address detection bits
817 if (code_s3u & FM_SRPERRQ2) { /* parity error receive queue 2 */ mac3_irq()
820 if (code_s3u & FM_SRPERRQ1) { /* parity error receive queue 2 */ mac3_irq()
966 /* Auto unlock receice threshold for receive queue 1 and 2 */ init_mac()
1379 rx queue 1 | RX_FIFO_SPACE | RX_LARGE_FIFO| ------------- * 63,75 kB smt_split_up_fifo()
1383 rx queue 2 | 0 kB | RX_SMALL_FIFO| ------------- * 63,75 kB smt_split_up_fifo()
1423 queue | | TX_MEDIUM_FIFO | TX_LARGE_FIFO smt_split_up_fifo()
1426 queue | TX_FIFO_SPACE| TX_MEDIUM_FIFO | TX_SMALL_FIFO smt_split_up_fifo()
1485 * bandwidth becomes available but no synchronous queue is formac_reinit_tx()
H A Dqueue.c26 static const char ID_sccs[] = "@(#)queue.c 2.9 97/08/04 (C) SK " ;
32 * init event queue management
40 * add event to queue
44 PRINTF("queue class %d event %d\n",class,event) ; queue_event()
68 * while event queue is not empty
69 * get event from queue
75 struct event_queue *ev ; /* pointer into queue */ ev_dispatcher()
/linux-4.1.27/drivers/watchdog/
H A Dmtx-1_wdt.c64 int queue; member in struct:__anon10690
81 if (mtx1_wdt_device.queue && ticks) mtx1_wdt_trigger()
99 if (!mtx1_wdt_device.queue) { mtx1_wdt_start()
100 mtx1_wdt_device.queue = 1; mtx1_wdt_start()
114 if (mtx1_wdt_device.queue) { mtx1_wdt_stop()
115 mtx1_wdt_device.queue = 0; mtx1_wdt_stop()
220 mtx1_wdt_device.queue = 0; mtx1_wdt_probe()
238 if (mtx1_wdt_device.queue) { mtx1_wdt_remove()
239 mtx1_wdt_device.queue = 0; mtx1_wdt_remove()
/linux-4.1.27/drivers/net/ethernet/cadence/
H A Dmacb.c69 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue, macb_tx_desc() argument
72 return &queue->tx_ring[macb_tx_ring_wrap(index)]; macb_tx_desc()
75 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue, macb_tx_skb() argument
78 return &queue->tx_skb[macb_tx_ring_wrap(index)]; macb_tx_skb()
81 static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index) macb_tx_dma() argument
87 return queue->tx_ring_dma + offset; macb_tx_dma()
500 struct macb_queue *queue = container_of(work, struct macb_queue, macb_tx_error_task() local
502 struct macb *bp = queue->bp; macb_tx_error_task()
510 (unsigned int)(queue - bp->queues), macb_tx_error_task()
511 queue->tx_tail, queue->tx_head); macb_tx_error_task()
513 /* Prevent the queue IRQ handlers from running: each of them may call macb_tx_error_task()
521 /* Make sure nobody is trying to queue up new packets */ macb_tx_error_task()
534 * Treat frames in TX queue including the ones that caused the error. macb_tx_error_task()
537 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) { macb_tx_error_task()
540 desc = macb_tx_desc(queue, tail); macb_tx_error_task()
542 tx_skb = macb_tx_skb(queue, tail); macb_tx_error_task()
550 tx_skb = macb_tx_skb(queue, tail); macb_tx_error_task()
579 /* Set end of TX queue */ macb_tx_error_task()
580 desc = macb_tx_desc(queue, 0); macb_tx_error_task()
587 /* Reinitialize the TX desc queue */ macb_tx_error_task()
588 queue_writel(queue, TBQP, queue->tx_ring_dma); macb_tx_error_task()
590 queue->tx_head = 0; macb_tx_error_task()
591 queue->tx_tail = 0; macb_tx_error_task()
595 queue_writel(queue, IER, MACB_TX_INT_FLAGS); macb_tx_error_task()
604 static void macb_tx_interrupt(struct macb_queue *queue) macb_tx_interrupt() argument
609 struct macb *bp = queue->bp; macb_tx_interrupt()
610 u16 queue_index = queue - bp->queues; macb_tx_interrupt()
616 queue_writel(queue, ISR, MACB_BIT(TCOMP)); macb_tx_interrupt()
621 head = queue->tx_head; macb_tx_interrupt()
622 for (tail = queue->tx_tail; tail != head; tail++) { macb_tx_interrupt()
628 desc = macb_tx_desc(queue, tail); macb_tx_interrupt()
643 tx_skb = macb_tx_skb(queue, tail); macb_tx_interrupt()
666 queue->tx_tail = tail; macb_tx_interrupt()
668 CIRC_CNT(queue->tx_head, queue->tx_tail, macb_tx_interrupt()
984 struct macb_queue *queue = dev_id; macb_interrupt() local
985 struct macb *bp = queue->bp; macb_interrupt()
989 status = queue_readl(queue, ISR); macb_interrupt()
999 queue_writel(queue, IDR, -1); macb_interrupt()
1003 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n", macb_interrupt()
1004 (unsigned int)(queue - bp->queues), macb_interrupt()
1015 queue_writel(queue, IDR, MACB_RX_INT_FLAGS); macb_interrupt()
1017 queue_writel(queue, ISR, MACB_BIT(RCOMP)); macb_interrupt()
1026 queue_writel(queue, IDR, MACB_TX_INT_FLAGS); macb_interrupt()
1027 schedule_work(&queue->tx_error_task); macb_interrupt()
1030 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS); macb_interrupt()
1036 macb_tx_interrupt(queue); macb_interrupt()
1066 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR)); macb_interrupt()
1073 * (work queue?) macb_interrupt()
1078 queue_writel(queue, ISR, MACB_BIT(HRESP)); macb_interrupt()
1081 status = queue_readl(queue, ISR); macb_interrupt()
1097 struct macb_queue *queue; macb_poll_controller() local
1102 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) macb_poll_controller()
1103 macb_interrupt(dev->irq, queue); macb_poll_controller()
1115 struct macb_queue *queue, macb_tx_map()
1119 unsigned int len, entry, i, tx_head = queue->tx_head; macb_tx_map()
1133 tx_skb = &queue->tx_skb[entry]; macb_tx_map()
1162 tx_skb = &queue->tx_skb[entry]; macb_tx_map()
1196 * to set the end of TX queue macb_tx_map()
1201 desc = &queue->tx_ring[entry]; macb_tx_map()
1207 tx_skb = &queue->tx_skb[entry]; macb_tx_map()
1208 desc = &queue->tx_ring[entry]; macb_tx_map()
1225 } while (i != queue->tx_head); macb_tx_map()
1227 queue->tx_head = tx_head; macb_tx_map()
1234 for (i = queue->tx_head; i != tx_head; i++) { macb_tx_map()
1235 tx_skb = macb_tx_skb(queue, i); macb_tx_map()
1247 struct macb_queue *queue = &bp->queues[queue_index]; macb_start_xmit() local
1253 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n", macb_start_xmit()
1274 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < count) { macb_start_xmit()
1278 queue->tx_head, queue->tx_tail); macb_start_xmit()
1283 if (!macb_tx_map(bp, queue, skb)) { macb_start_xmit()
1295 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < 1) macb_start_xmit()
1364 struct macb_queue *queue; macb_free_consistent() local
1374 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { macb_free_consistent()
1375 kfree(queue->tx_skb); macb_free_consistent()
1376 queue->tx_skb = NULL; macb_free_consistent()
1377 if (queue->tx_ring) { macb_free_consistent()
1379 queue->tx_ring, queue->tx_ring_dma); macb_free_consistent()
1380 queue->tx_ring = NULL; macb_free_consistent()
1418 struct macb_queue *queue; macb_alloc_consistent() local
1422 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { macb_alloc_consistent()
1424 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, macb_alloc_consistent()
1425 &queue->tx_ring_dma, macb_alloc_consistent()
1427 if (!queue->tx_ring) macb_alloc_consistent()
1430 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n", macb_alloc_consistent()
1431 q, size, (unsigned long)queue->tx_ring_dma, macb_alloc_consistent()
1432 queue->tx_ring); macb_alloc_consistent()
1435 queue->tx_skb = kmalloc(size, GFP_KERNEL); macb_alloc_consistent()
1436 if (!queue->tx_skb) macb_alloc_consistent()
1461 struct macb_queue *queue; gem_init_rings() local
1465 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { gem_init_rings()
1467 queue->tx_ring[i].addr = 0; gem_init_rings()
1468 queue->tx_ring[i].ctrl = MACB_BIT(TX_USED); gem_init_rings()
1470 queue->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); gem_init_rings()
1471 queue->tx_head = 0; gem_init_rings()
1472 queue->tx_tail = 0; gem_init_rings()
1507 struct macb_queue *queue; macb_reset_hw() local
1524 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { macb_reset_hw()
1525 queue_writel(queue, IDR, -1); macb_reset_hw()
1526 queue_readl(queue, ISR); macb_reset_hw()
1642 struct macb_queue *queue; macb_init_hw() local
1670 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { macb_init_hw()
1671 queue_writel(queue, TBQP, queue->tx_ring_dma); macb_init_hw()
1674 queue_writel(queue, IER, macb_init_hw()
2195 /* bit 0 is never set but queue 0 always exists */ macb_probe_queues()
2262 struct macb_queue *queue; macb_init() local
2266 /* set the queue register mapping once for all: queue0 has a special macb_init()
2267 * register mapping but we don't want to test the queue index then macb_init()
2274 queue = &bp->queues[q]; macb_init()
2275 queue->bp = bp; macb_init()
2277 queue->ISR = GEM_ISR(hw_q - 1); macb_init()
2278 queue->IER = GEM_IER(hw_q - 1); macb_init()
2279 queue->IDR = GEM_IDR(hw_q - 1); macb_init()
2280 queue->IMR = GEM_IMR(hw_q - 1); macb_init()
2281 queue->TBQP = GEM_TBQP(hw_q - 1); macb_init()
2284 queue->ISR = MACB_ISR; macb_init()
2285 queue->IER = MACB_IER; macb_init()
2286 queue->IDR = MACB_IDR; macb_init()
2287 queue->IMR = MACB_IMR; macb_init()
2288 queue->TBQP = MACB_TBQP; macb_init()
2291 /* get irq: here we use the linux queue index, not the hardware macb_init()
2292 * queue index. the queue irq definitions in the device tree macb_init()
2294 * hardware queue mask. macb_init()
2296 queue->irq = platform_get_irq(pdev, q); macb_init()
2297 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt, macb_init()
2298 IRQF_SHARED, dev->name, queue); macb_init()
2302 queue->irq, err); macb_init()
2306 INIT_WORK(&queue->tx_error_task, macb_tx_error_task); macb_init()
1114 macb_tx_map(struct macb *bp, struct macb_queue *queue, struct sk_buff *skb) macb_tx_map() argument
/linux-4.1.27/drivers/net/wireless/rsi/
H A Drsi_91x_core.c21 * rsi_determine_min_weight_queue() - This function determines the queue with
25 * Return: q_num: Corresponding queue number.
45 * corresponding to each queue.
85 * @q_num: the queue from which pkts have to be dequeued
126 * rsi_core_determine_hal_queue() - This function determines the queue from
130 * Return: q_num: Corresponding queue number on success.
160 /* Selecting the queue with least back off */ rsi_core_determine_hal_queue()
179 /* If any queues are freshly contended and the selected queue rsi_core_determine_hal_queue()
181 * then get the queue number again with fresh values rsi_core_determine_hal_queue()
202 * rsi_core_queue_pkt() - This functions enqueues the packet to the queue
203 * specified by the queue number.
224 * rsi_core_dequeue_pkt() - This functions dequeues the packet from the queue
225 * specified by the queue number.
244 * rsi_core_qos_processor() - This function is used to determine the wmm queue
246 * dequeued from the selected hal queue and sent to
364 rsi_dbg(ERR_ZONE, "%s: sw queue full\n", __func__); rsi_core_xmit()
378 rsi_dbg(ERR_ZONE, "%s: Failed to queue packet\n", __func__); rsi_core_xmit()
/linux-4.1.27/drivers/staging/lustre/lustre/obdclass/
H A Dcl_io.c326 * Check whether \a queue contains locks matching \a need.
328 * \retval +ve there is a matching lock in the \a queue
329 * \retval 0 there are no matching locks in the \a queue
331 int cl_queue_match(const struct list_head *queue, cl_queue_match() argument
336 list_for_each_entry(scan, queue, cill_linkage) { list_for_each_entry()
344 static int cl_queue_merge(const struct list_head *queue, cl_queue_merge() argument
349 list_for_each_entry(scan, queue, cill_linkage) { list_for_each_entry()
742 struct cl_2queue *queue; cl_io_read_page() local
751 queue = &io->ci_queue; cl_io_read_page()
753 cl_2queue_init(queue); cl_io_read_page()
776 result = cl_io_submit_rw(env, io, CRT_READ, queue);
780 cl_page_list_disown(env, io, &queue->c2_qin);
781 cl_2queue_fini(env, queue);
863 * queue->c2_qout queue, and queue->c2_qin contain both the pages don't need
870 enum cl_req_type crt, struct cl_2queue *queue) cl_io_submit_rw()
881 queue); cl_io_for_each()
888 LASSERT(ergo(result != 0, list_empty(&queue->c2_qout.pl_pages)));
898 enum cl_req_type iot, struct cl_2queue *queue, cl_io_submit_sync()
905 cl_page_list_for_each(pg, &queue->c2_qin) { cl_io_submit_sync()
910 cl_sync_io_init(anchor, queue->c2_qin.pl_nr); cl_io_submit_sync()
911 rc = cl_io_submit_rw(env, io, iot, queue); cl_io_submit_sync()
919 cl_page_list_for_each(pg, &queue->c2_qin) { cl_io_submit_sync()
925 rc = cl_sync_io_wait(env, io, &queue->c2_qout, cl_io_submit_sync()
928 LASSERT(list_empty(&queue->c2_qout.pl_pages)); cl_io_submit_sync()
929 cl_page_list_for_each(pg, &queue->c2_qin) cl_io_submit_sync()
940 struct cl_page_list *queue) cl_io_cancel()
946 cl_page_list_for_each(page, queue) { cl_page_list_for_each()
1071 lu_ref_add_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist); cl_page_list_add()
1090 lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist); cl_page_list_del()
1108 lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue", cl_page_list_move()
1133 * Disowns pages in a queue.
1160 lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", cl_page_list_for_each_safe()
1168 * Releases pages from queue.
1184 * Owns all pages in a queue.
1210 * Assumes all pages in a queue.
1225 * Discards all pages in a queue.
1239 * Unmaps all pages in a queue from user virtual memory.
1259 * Initialize dual page queue.
1261 void cl_2queue_init(struct cl_2queue *queue) cl_2queue_init() argument
1263 cl_page_list_init(&queue->c2_qin); cl_2queue_init()
1264 cl_page_list_init(&queue->c2_qout); cl_2queue_init()
1269 * Add a page to the incoming page list of 2-queue.
1271 void cl_2queue_add(struct cl_2queue *queue, struct cl_page *page) cl_2queue_add() argument
1273 cl_page_list_add(&queue->c2_qin, page); cl_2queue_add()
1278 * Disown pages in both lists of a 2-queue.
1281 struct cl_io *io, struct cl_2queue *queue) cl_2queue_disown()
1283 cl_page_list_disown(env, io, &queue->c2_qin); cl_2queue_disown()
1284 cl_page_list_disown(env, io, &queue->c2_qout); cl_2queue_disown()
1289 * Discard (truncate) pages in both lists of a 2-queue.
1292 struct cl_io *io, struct cl_2queue *queue) cl_2queue_discard()
1294 cl_page_list_discard(env, io, &queue->c2_qin); cl_2queue_discard()
1295 cl_page_list_discard(env, io, &queue->c2_qout); cl_2queue_discard()
1303 struct cl_io *io, struct cl_2queue *queue) cl_2queue_assume()
1305 cl_page_list_assume(env, io, &queue->c2_qin); cl_2queue_assume()
1306 cl_page_list_assume(env, io, &queue->c2_qout); cl_2queue_assume()
1311 * Finalize both page lists of a 2-queue.
1313 void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue) cl_2queue_fini() argument
1315 cl_page_list_fini(env, &queue->c2_qout); cl_2queue_fini()
1316 cl_page_list_fini(env, &queue->c2_qin); cl_2queue_fini()
1321 * Initialize a 2-queue to contain \a page in its incoming page list.
1323 void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page) cl_2queue_init_page() argument
1325 cl_2queue_init(queue); cl_2queue_init_page()
1326 cl_2queue_add(queue, page); cl_2queue_init_page()
1612 struct cl_page_list *queue, struct cl_sync_io *anchor, cl_sync_io_wait()
1628 (void)cl_io_cancel(env, io, queue); cl_sync_io_wait()
1638 cl_page_list_assume(env, io, queue); cl_sync_io_wait()
869 cl_io_submit_rw(const struct lu_env *env, struct cl_io *io, enum cl_req_type crt, struct cl_2queue *queue) cl_io_submit_rw() argument
897 cl_io_submit_sync(const struct lu_env *env, struct cl_io *io, enum cl_req_type iot, struct cl_2queue *queue, long timeout) cl_io_submit_sync() argument
939 cl_io_cancel(const struct lu_env *env, struct cl_io *io, struct cl_page_list *queue) cl_io_cancel() argument
1280 cl_2queue_disown(const struct lu_env *env, struct cl_io *io, struct cl_2queue *queue) cl_2queue_disown() argument
1291 cl_2queue_discard(const struct lu_env *env, struct cl_io *io, struct cl_2queue *queue) cl_2queue_discard() argument
1302 cl_2queue_assume(const struct lu_env *env, struct cl_io *io, struct cl_2queue *queue) cl_2queue_assume() argument
1611 cl_sync_io_wait(const struct lu_env *env, struct cl_io *io, struct cl_page_list *queue, struct cl_sync_io *anchor, long timeout) cl_sync_io_wait() argument
/linux-4.1.27/drivers/media/pci/ivtv/
H A Divtv-queue.c23 #include "ivtv-queue.h"
56 /* clear the buffer if it is going to be enqueued to the free queue */ ivtv_enqueue()
105 /* Move 'needed_bytes' worth of buffers from queue 'from' into queue 'to'.
107 If 'steal' != NULL, then buffers may also taken from that queue if
108 needed, but only if 'from' is the free queue.
110 The buffer is automatically cleared if it goes to the free queue. It is
111 also cleared if buffers need to be taken from the 'steal' queue and
112 the 'from' queue is the free queue.
116 bytesused value. For the 'steal' queue the total available buffer
148 /* move buffers from the tail of the 'steal' queue to the tail of the ivtv_queue_move()
149 'from' queue. Always copy all the buffers with the same dma_xfer_cnt ivtv_queue_move()
H A DMakefile4 ivtv-mailbox.o ivtv-queue.o ivtv-streams.o ivtv-udma.o \
/linux-4.1.27/drivers/staging/rtl8188eu/os_dep/
H A Dxmit_linux.c108 u16 queue; rtw_os_pkt_complete() local
111 queue = skb_get_queue_mapping(pkt); rtw_os_pkt_complete()
113 if (__netif_subqueue_stopped(padapter->pnetdev, queue) && rtw_os_pkt_complete()
114 (pxmitpriv->hwxmits[queue].accnt < WMM_XMIT_THRESHOLD)) rtw_os_pkt_complete()
115 netif_wake_subqueue(padapter->pnetdev, queue); rtw_os_pkt_complete()
117 if (__netif_subqueue_stopped(padapter->pnetdev, queue)) rtw_os_pkt_complete()
118 netif_wake_subqueue(padapter->pnetdev, queue); rtw_os_pkt_complete()
151 u16 queue; rtw_check_xmit_resource() local
153 queue = skb_get_queue_mapping(pkt); rtw_check_xmit_resource()
156 if (pxmitpriv->hwxmits[queue].accnt > WMM_XMIT_THRESHOLD) rtw_check_xmit_resource()
157 netif_stop_subqueue(padapter->pnetdev, queue); rtw_check_xmit_resource()
160 if (!netif_tx_queue_stopped(netdev_get_tx_queue(padapter->pnetdev, queue))) rtw_check_xmit_resource()
161 netif_stop_subqueue(padapter->pnetdev, queue); rtw_check_xmit_resource()
/linux-4.1.27/fs/notify/
H A Dnotification.c20 * Basic idea behind the notification queue: An fsnotify group (like inotify)
23 * event to the group notify queue. Since a single event might need to be on
25 * queue and instead add a small "event_holder" to each queue. This event_holder
27 * going to end up on one, and only one, notification queue we embed one
63 /* return true if the notify queue is empty, false otherwise */ fsnotify_notify_queue_is_empty()
82 * Add an event to the group notification queue. The group can later pull this
83 * event off the queue to deal with. The function returns 0 if the event was
84 * added to the queue, 1 if the event was merged with some other queued event,
85 * 2 if the queue of events has overflown.
107 goto queue; fsnotify_add_event()
118 queue: fsnotify_add_event()
129 * Remove @event from group's notification queue. It is the responsibility of
/linux-4.1.27/drivers/infiniband/hw/ipath/
H A Dipath_cq.c41 * ipath_cq_enter - add a new entry to the completion queue
42 * @cq: completion queue
61 wc = cq->queue; ipath_cq_enter()
120 * @ibcq: the completion queue to poll
137 /* The kernel can only poll a kernel completion queue */ ipath_poll_cq()
145 wc = cq->queue; ipath_poll_cq()
189 * ipath_create_cq - create a completion queue
190 * @ibdev: the device this completion queue is attached to
191 * @entries: the minimum size of the completion queue
195 * Returns a pointer to the completion queue or negative errno values
215 /* Allocate the completion queue structure. */ ipath_create_cq()
223 * Allocate the completion queue entries and head/tail pointers. ipath_create_cq()
290 cq->queue = wc; ipath_create_cq()
307 * ipath_destroy_cq - destroy a completion queue
308 * @ibcq: the completion queue to destroy.
326 vfree(cq->queue); ipath_destroy_cq()
333 * ipath_req_notify_cq - change the notification type for a completion queue
334 * @ibcq: the completion queue
357 cq->queue->head != cq->queue->tail) ipath_req_notify_cq()
367 * @ibcq: the completion queue
413 old_wc = cq->queue; ipath_resize_cq()
441 cq->queue = wc; ipath_resize_cq()
/linux-4.1.27/drivers/infiniband/hw/qib/
H A Dqib_cq.c44 * qib_cq_enter - add a new entry to the completion queue
45 * @cq: completion queue
64 wc = cq->queue; qib_cq_enter()
127 * @ibcq: the completion queue to poll
144 /* The kernel can only poll a kernel completion queue */ qib_poll_cq()
152 wc = cq->queue; qib_poll_cq()
204 * qib_create_cq - create a completion queue
205 * @ibdev: the device this completion queue is attached to
206 * @entries: the minimum size of the completion queue
210 * Returns a pointer to the completion queue or negative errno values
230 /* Allocate the completion queue structure. */ qib_create_cq()
238 * Allocate the completion queue entries and head/tail pointers. qib_create_cq()
306 cq->queue = wc; qib_create_cq()
323 * qib_destroy_cq - destroy a completion queue
324 * @ibcq: the completion queue to destroy.
342 vfree(cq->queue); qib_destroy_cq()
349 * qib_req_notify_cq - change the notification type for a completion queue
350 * @ibcq: the completion queue
373 cq->queue->head != cq->queue->tail) qib_req_notify_cq()
383 * @ibcq: the completion queue
429 old_wc = cq->queue; qib_resize_cq()
457 cq->queue = wc; qib_resize_cq()
/linux-4.1.27/drivers/net/ethernet/marvell/
H A Dmvneta.c116 /* bits 0..7 = TXQ SENT, one bit per queue.
117 * bits 8..15 = RXQ OCCUP, one bit per queue.
118 * bits 16..23 = RXQ FREE, one bit per queue.
399 /* Number of this TX queue, in the range 0-7 */
443 /* rx queue number, in the range 0-7 */
469 * the first one to be used. Therefore, let's just allocate one queue.
663 /* Set rx queue offset */ mvneta_rxq_offset_set()
745 int queue; mvneta_port_up() local
751 for (queue = 0; queue < txq_number; queue++) { mvneta_port_up()
752 struct mvneta_tx_queue *txq = &pp->txqs[queue]; mvneta_port_up()
754 q_map |= (1 << queue); mvneta_port_up()
760 for (queue = 0; queue < rxq_number; queue++) { mvneta_port_up()
761 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; mvneta_port_up()
763 q_map |= (1 << queue); mvneta_port_up()
866 /* Set all entries in Unicast MAC Table; queue==-1 means reject all */ mvneta_set_ucast_table()
867 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue) mvneta_set_ucast_table() argument
872 if (queue == -1) { mvneta_set_ucast_table()
875 val = 0x1 | (queue << 1); mvneta_set_ucast_table()
883 /* Set all entries in Special Multicast MAC Table; queue==-1 means reject all */ mvneta_set_special_mcast_table()
884 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue) mvneta_set_special_mcast_table() argument
889 if (queue == -1) { mvneta_set_special_mcast_table()
892 val = 0x1 | (queue << 1); mvneta_set_special_mcast_table()
901 /* Set all entries in Other Multicast MAC Table. queue==-1 means reject all */ mvneta_set_other_mcast_table()
902 static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) mvneta_set_other_mcast_table() argument
907 if (queue == -1) { mvneta_set_other_mcast_table()
912 val = 0x1 | (queue << 1); mvneta_set_other_mcast_table()
932 int queue; mvneta_defaults_set() local
949 /* Set CPU queue access map - all CPUs have access to all RX mvneta_defaults_set()
963 for (queue = 0; queue < txq_number; queue++) { mvneta_defaults_set()
964 mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0); mvneta_defaults_set()
965 mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0); mvneta_defaults_set()
1040 int queue; mvneta_txq_max_tx_size_set() local
1062 for (queue = 0; queue < txq_number; queue++) { mvneta_txq_max_tx_size_set()
1063 val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue)); mvneta_txq_max_tx_size_set()
1070 mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val); mvneta_txq_max_tx_size_set()
1077 int queue) mvneta_set_ucast_addr()
1094 if (queue == -1) { mvneta_set_ucast_addr()
1099 unicast_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); mvneta_set_ucast_addr()
1107 int queue) mvneta_mac_addr_set()
1112 if (queue != -1) { mvneta_mac_addr_set()
1122 mvneta_set_ucast_addr(pp, addr[5], queue); mvneta_mac_addr_set()
1302 /* Return tx queue pointer (find last set bit) according to <cause> returned
1304 * valid queue for matching the first one found in <cause>.
1309 int queue = fls(cause) - 1; mvneta_tx_done_policy() local
1311 return &pp->txqs[queue]; mvneta_tx_done_policy()
1314 /* Free tx queue skbuffs */ mvneta_txq_bufs_free()
1429 /* Returns rx queue pointer (find last set bit) according to causeRxTx
1435 int queue = fls(cause >> 8) - 1; mvneta_rx_policy() local
1437 return (queue < 0 || queue >= rxq_number) ? NULL : &pp->rxqs[queue]; mvneta_rx_policy()
1898 int queue) mvneta_set_special_mcast_addr()
1912 if (queue == -1) mvneta_set_special_mcast_addr()
1916 smc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); mvneta_set_special_mcast_addr()
1933 int queue) mvneta_set_other_mcast_addr()
1944 if (queue == -1) { mvneta_set_other_mcast_addr()
1949 omc_table_reg |= ((0x01 | (queue << 1)) << (8 * reg_offset)); mvneta_set_other_mcast_addr()
1965 int queue) mvneta_mcast_addr_set()
1970 mvneta_set_special_mcast_addr(pp, p_addr[5], queue); mvneta_mcast_addr_set()
1975 if (queue == -1) { mvneta_mcast_addr_set()
1992 mvneta_set_other_mcast_addr(pp, crc_result, queue); mvneta_mcast_addr_set()
2098 * packets on the corresponding TXQ (Bit 0 is for TX queue 1).
2100 * packets on the corresponding RXQ (Bit 8 is for RX queue 0).
2143 /* get rx queue number from cause_rx_tx */ mvneta_poll()
2148 /* process the packet in that rx queue */ mvneta_poll()
2156 * will find the next rx queue where mvneta_poll()
2208 int queue; mvneta_tx_reset() local
2211 for (queue = 0; queue < txq_number; queue++) mvneta_tx_reset()
2212 mvneta_txq_done_force(pp, &pp->txqs[queue]); mvneta_tx_reset()
2224 /* Rx/Tx queue initialization/cleanup methods */
2226 /* Create a specified RX queue */ mvneta_rxq_init()
2245 /* Set Rx descriptors queue starting address */ mvneta_rxq_init()
2264 /* Cleanup Rx queue */ mvneta_rxq_deinit()
2282 /* Create and initialize a tx queue */ mvneta_txq_init()
2288 /* A queue must always have room for at least one skb. mvneta_txq_init()
2289 * Therefore, stop the queue when the free entries reaches mvneta_txq_init()
2313 /* Set Tx descriptors queue starting address */ mvneta_txq_init()
2365 /* Set Tx descriptors queue starting address and size */ mvneta_txq_deinit()
2373 int queue; mvneta_cleanup_txqs() local
2375 for (queue = 0; queue < txq_number; queue++) mvneta_cleanup_txqs()
2376 mvneta_txq_deinit(pp, &pp->txqs[queue]); mvneta_cleanup_txqs()
2382 int queue; mvneta_cleanup_rxqs() local
2384 for (queue = 0; queue < rxq_number; queue++) mvneta_cleanup_rxqs()
2385 mvneta_rxq_deinit(pp, &pp->rxqs[queue]); mvneta_cleanup_rxqs()
2392 int queue; mvneta_setup_rxqs() local
2394 for (queue = 0; queue < rxq_number; queue++) { mvneta_setup_rxqs()
2395 int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); mvneta_setup_rxqs()
2398 __func__, queue); mvneta_setup_rxqs()
2410 int queue; mvneta_setup_txqs() local
2412 for (queue = 0; queue < txq_number; queue++) { mvneta_setup_txqs()
2413 int err = mvneta_txq_init(pp, &pp->txqs[queue]); mvneta_setup_txqs()
2416 __func__, queue); mvneta_setup_txqs()
2794 int queue; mvneta_ethtool_set_coalesce() local
2796 for (queue = 0; queue < rxq_number; queue++) { mvneta_ethtool_set_coalesce()
2797 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; mvneta_ethtool_set_coalesce()
2804 for (queue = 0; queue < txq_number; queue++) { mvneta_ethtool_set_coalesce()
2805 struct mvneta_tx_queue *txq = &pp->txqs[queue]; mvneta_ethtool_set_coalesce()
2863 netdev_warn(dev, "TX queue size set to %u (requested %u)\n", mvneta_ethtool_set_ringparam()
2904 int queue; mvneta_init() local
2918 for (queue = 0; queue < txq_number; queue++) { mvneta_init()
2919 struct mvneta_tx_queue *txq = &pp->txqs[queue]; mvneta_init()
2920 txq->id = queue; mvneta_init()
2931 for (queue = 0; queue < rxq_number; queue++) { mvneta_init()
2932 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; mvneta_init()
2933 rxq->id = queue; mvneta_init()
3037 * allow the usage of the first RX queue mvneta_probe()
1076 mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble, int queue) mvneta_set_ucast_addr() argument
1106 mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr, int queue) mvneta_mac_addr_set() argument
1896 mvneta_set_special_mcast_addr(struct mvneta_port *pp, unsigned char last_byte, int queue) mvneta_set_special_mcast_addr() argument
1931 mvneta_set_other_mcast_addr(struct mvneta_port *pp, unsigned char crc8, int queue) mvneta_set_other_mcast_addr() argument
1964 mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr, int queue) mvneta_mcast_addr_set() argument
/linux-4.1.27/drivers/scsi/aacraid/
H A Dcomminit.c287 * queue headers. aac_comm_init()
297 /* Adapter to Host normal priority Command queue */ aac_comm_init()
298 comm->queue[HostNormCmdQueue].base = queues; aac_comm_init()
299 aac_queue_init(dev, &comm->queue[HostNormCmdQueue], headers, HOST_NORM_CMD_ENTRIES); aac_comm_init()
303 /* Adapter to Host high priority command queue */ aac_comm_init()
304 comm->queue[HostHighCmdQueue].base = queues; aac_comm_init()
305 aac_queue_init(dev, &comm->queue[HostHighCmdQueue], headers, HOST_HIGH_CMD_ENTRIES); aac_comm_init()
310 /* Host to adapter normal priority command queue */ aac_comm_init()
311 comm->queue[AdapNormCmdQueue].base = queues; aac_comm_init()
312 aac_queue_init(dev, &comm->queue[AdapNormCmdQueue], headers, ADAP_NORM_CMD_ENTRIES); aac_comm_init()
317 /* host to adapter high priority command queue */ aac_comm_init()
318 comm->queue[AdapHighCmdQueue].base = queues; aac_comm_init()
319 aac_queue_init(dev, &comm->queue[AdapHighCmdQueue], headers, ADAP_HIGH_CMD_ENTRIES); aac_comm_init()
324 /* adapter to host normal priority response queue */ aac_comm_init()
325 comm->queue[HostNormRespQueue].base = queues; aac_comm_init()
326 aac_queue_init(dev, &comm->queue[HostNormRespQueue], headers, HOST_NORM_RESP_ENTRIES); aac_comm_init()
330 /* adapter to host high priority response queue */ aac_comm_init()
331 comm->queue[HostHighRespQueue].base = queues; aac_comm_init()
332 aac_queue_init(dev, &comm->queue[HostHighRespQueue], headers, HOST_HIGH_RESP_ENTRIES); aac_comm_init()
337 /* host to adapter normal priority response queue */ aac_comm_init()
338 comm->queue[AdapNormRespQueue].base = queues; aac_comm_init()
339 aac_queue_init(dev, &comm->queue[AdapNormRespQueue], headers, ADAP_NORM_RESP_ENTRIES); aac_comm_init()
344 /* host to adapter high priority response queue */ aac_comm_init()
345 comm->queue[AdapHighRespQueue].base = queues; aac_comm_init()
346 aac_queue_init(dev, &comm->queue[AdapHighRespQueue], headers, ADAP_HIGH_RESP_ENTRIES); aac_comm_init()
348 comm->queue[AdapNormCmdQueue].lock = comm->queue[HostNormRespQueue].lock; aac_comm_init()
349 comm->queue[AdapHighCmdQueue].lock = comm->queue[HostHighRespQueue].lock; aac_comm_init()
350 comm->queue[AdapNormRespQueue].lock = comm->queue[HostNormCmdQueue].lock; aac_comm_init()
351 comm->queue[AdapHighRespQueue].lock = comm->queue[HostHighCmdQueue].lock; aac_comm_init()
H A Ddpcsup.c49 * know there is a response on our normal priority queue. We will pull off
51 * take a spinlock out on the queue before operating on it.
65 * Keep pulling response QEs off the response queue and waking aac_response_normal()
80 * Remove this fib from the Outstanding I/O queue. aac_response_normal()
87 atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending); aac_response_normal()
165 * @q: queue to process
168 * let us know there is a command on our normal priority queue. We will
170 * We will take a spinlock out on the queue before operating on it.
182 * Keep pulling response QEs off the response queue and waking aac_command_normal()
282 * know there is a response on our normal priority queue. We will pull off
293 struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue]; aac_intr_normal()
350 * Remove this fib from the Outstanding I/O queue. aac_intr_normal()
357 atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending); aac_intr_normal()
/linux-4.1.27/drivers/media/pci/cx18/
H A DMakefile2 cx18-queue.o cx18-streams.o cx18-fileops.o cx18-ioctl.o cx18-controls.o \
/linux-4.1.27/drivers/crypto/qce/
H A Dcore.h21 * @queue: crypto request queue
22 * @lock: the lock protects queue and req
38 struct crypto_queue queue; member in struct:qce_device
/linux-4.1.27/arch/sparc/include/asm/
H A Dintr_queue.h4 /* Sun4v interrupt queue registers, accessed via ASI_QUEUE. */
/linux-4.1.27/include/crypto/internal/
H A Dskcipher.h76 struct crypto_queue *queue, struct skcipher_givcrypt_request *request) skcipher_enqueue_givcrypt()
78 return ablkcipher_enqueue_request(queue, &request->creq); skcipher_enqueue_givcrypt()
82 struct crypto_queue *queue) skcipher_dequeue_givcrypt()
84 return skcipher_givcrypt_cast(crypto_dequeue_request(queue)); skcipher_dequeue_givcrypt()
75 skcipher_enqueue_givcrypt( struct crypto_queue *queue, struct skcipher_givcrypt_request *request) skcipher_enqueue_givcrypt() argument
81 skcipher_dequeue_givcrypt( struct crypto_queue *queue) skcipher_dequeue_givcrypt() argument
/linux-4.1.27/drivers/media/platform/s5p-mfc/
H A Ds5p_mfc_intr.c28 ret = wait_event_interruptible_timeout(dev->queue, s5p_mfc_wait_for_done_dev()
60 ret = wait_event_interruptible_timeout(ctx->queue, s5p_mfc_wait_for_done_ctx()
65 ret = wait_event_timeout(ctx->queue, s5p_mfc_wait_for_done_ctx()
/linux-4.1.27/drivers/s390/crypto/
H A Dap_bus.h43 * The ap_qid_t identifier of an ap queue. It contains a
44 * 6 bit device index and a 4 bit queue index (domain).
53 * structy ap_queue_status - Holds the AP queue status.
54 * @queue_empty: Shows if queue is empty
56 * @queue_full: Is 1 if the queue is full
62 * The ap queue status word is returned by all three AP functions
162 ap_qid_t qid; /* AP queue id. */
163 int queue_depth; /* AP queue depth.*/
171 int queue_count; /* # messages currently on AP queue. */
173 struct list_head pendingq; /* List of message sent to AP queue. */
205 unsigned int aqm[8]; /* AP queue mask */
229 * for the first time. Otherwise the ap message queue will get
/linux-4.1.27/arch/xtensa/include/uapi/asm/
H A Dmsgbuf.h39 unsigned long msg_cbytes; /* current number of bytes on queue */
40 unsigned long msg_qnum; /* number of messages in queue */
41 unsigned long msg_qbytes; /* max number of bytes on queue */
/linux-4.1.27/drivers/atm/
H A Dfore200e.h31 #define QUEUE_SIZE_CMD 16 /* command queue capacity */
32 #define QUEUE_SIZE_RX 64 /* receive queue capacity */
33 #define QUEUE_SIZE_TX 256 /* transmit queue capacity */
34 #define QUEUE_SIZE_BS 32 /* buffer supply queue capacity */
212 /* cp resident transmit queue entry */
220 /* cp resident receive queue entry */
228 /* cp resident buffer supply queue entry */
509 /* cp resident command queue */
518 /* host resident transmit queue entry */
521 struct cp_txq_entry __iomem *cp_entry; /* addr of cp resident tx queue entry */
533 /* host resident receive queue entry */
536 struct cp_rxq_entry __iomem *cp_entry; /* addr of cp resident rx queue entry */
543 /* host resident buffer supply queue entry */
546 struct cp_bsq_entry __iomem *cp_entry; /* addr of cp resident buffer supply queue entry */
553 /* host resident command queue entry */
556 struct cp_cmdq_entry __iomem *cp_entry; /* addr of cp resident cmd queue entry */
583 unsigned long index; /* buffer # in queue */
598 /* host resident command queue */
601 struct host_cmdq_entry host_entry[ QUEUE_SIZE_CMD ]; /* host resident cmd queue entries */
602 int head; /* head of cmd queue */
607 /* host resident transmit queue */
610 struct host_txq_entry host_entry[ QUEUE_SIZE_TX ]; /* host resident tx queue entries */
611 int head; /* head of tx queue */
612 int tail; /* tail of tx queue */
615 int txing; /* number of pending PDUs in tx queue */
619 /* host resident receive queue */
622 struct host_rxq_entry host_entry[ QUEUE_SIZE_RX ]; /* host resident rx queue entries */
623 int head; /* head of rx queue */
632 struct host_bsq_entry host_entry[ QUEUE_SIZE_BS ]; /* host resident buffer supply queue entries */
633 int head; /* head of buffer supply queue */
657 u32 queue_length; /* queue capacity */
665 /* initialization command block (one-time command, not in cmd queue) */
672 u32 cmd_queue_len; /* length of command queue */
673 u32 tx_queue_len; /* length of transmit queue */
674 u32 rx_queue_len; /* length of receive queue */
697 u32 cp_cmdq; /* command queue */
698 u32 cp_txq; /* transmit queue */
699 u32 cp_rxq; /* receive queue */
711 struct init_block init; /* one time cmd, not in cmd queue */
759 FORE200E_STATE_INIT_CMDQ, /* command queue initialized */
760 FORE200E_STATE_INIT_TXQ, /* transmit queue initialized */
761 FORE200E_STATE_INIT_RXQ, /* receive queue initialized */
762 FORE200E_STATE_INIT_BSQ, /* buffer supply queue initialized */
855 struct host_cmdq host_cmdq; /* host resident cmd queue */
856 struct host_txq host_txq; /* host resident tx queue */
857 struct host_rxq host_rxq; /* host resident rx queue */
868 spinlock_t q_lock; /* protects queue ops */
873 unsigned long tx_sat; /* tx queue saturation count */
/linux-4.1.27/include/uapi/asm-generic/
H A Dmsgbuf.h38 __kernel_ulong_t msg_cbytes; /* current number of bytes on queue */
39 __kernel_ulong_t msg_qnum; /* number of messages in queue */
40 __kernel_ulong_t msg_qbytes; /* max number of bytes on queue */
/linux-4.1.27/include/linux/platform_data/
H A Dmailbox-omap.h26 * @tx_id: mailbox queue id used for transmitting messages
27 * @rx_id: mailbox queue id on which messages are received
43 while programming mailbox queue interrupts
/linux-4.1.27/arch/mips/include/uapi/asm/
H A Dmsgbuf.h38 unsigned long msg_cbytes; /* current number of bytes on queue */
39 unsigned long msg_qnum; /* number of messages in queue */
40 unsigned long msg_qbytes; /* max number of bytes on queue */
/linux-4.1.27/drivers/block/
H A Dps3disk.c43 spinlock_t lock; /* Request queue spinlock */
44 struct request_queue *queue; member in struct:ps3disk_private
281 ps3disk_do_request(dev, priv->queue); ps3disk_interrupt()
409 struct request_queue *queue; ps3disk_probe() local
453 queue = blk_init_queue(ps3disk_request, &priv->lock); ps3disk_probe()
454 if (!queue) { ps3disk_probe()
461 priv->queue = queue; ps3disk_probe()
462 queue->queuedata = dev; ps3disk_probe()
464 blk_queue_bounce_limit(queue, BLK_BOUNCE_HIGH); ps3disk_probe()
466 blk_queue_max_hw_sectors(queue, dev->bounce_size >> 9); ps3disk_probe()
467 blk_queue_segment_boundary(queue, -1UL); ps3disk_probe()
468 blk_queue_dma_alignment(queue, dev->blk_size-1); ps3disk_probe()
469 blk_queue_logical_block_size(queue, dev->blk_size); ps3disk_probe()
471 blk_queue_flush(queue, REQ_FLUSH); ps3disk_probe()
473 blk_queue_max_segments(queue, -1); ps3disk_probe()
474 blk_queue_max_segment_size(queue, dev->bounce_size); ps3disk_probe()
488 gendisk->queue = queue; ps3disk_probe()
506 blk_cleanup_queue(queue); ps3disk_probe()
531 blk_cleanup_queue(priv->queue); ps3disk_remove()
/linux-4.1.27/net/irda/
H A Dirqueue.c5 * Description: General queue implementation
228 * Function enqueue_first (queue, proc)
230 * Insert item first in queue.
233 static void enqueue_first(irda_queue_t **queue, irda_queue_t* element) enqueue_first() argument
237 * Check if queue is empty. enqueue_first()
239 if ( *queue == NULL ) { enqueue_first()
241 * Queue is empty. Insert one element into the queue. enqueue_first()
243 element->q_next = element->q_prev = *queue = element; enqueue_first()
247 * Queue is not empty. Insert element into front of queue. enqueue_first()
249 element->q_next = (*queue); enqueue_first()
250 (*queue)->q_prev->q_next = element; enqueue_first()
251 element->q_prev = (*queue)->q_prev; enqueue_first()
252 (*queue)->q_prev = element; enqueue_first()
253 (*queue) = element; enqueue_first()
259 * Function dequeue (queue)
261 * Remove first entry in queue
264 static irda_queue_t *dequeue_first(irda_queue_t **queue) dequeue_first() argument
273 ret = *queue; dequeue_first()
275 if ( *queue == NULL ) { dequeue_first()
279 } else if ( (*queue)->q_next == *queue ) { dequeue_first()
284 *queue = NULL; dequeue_first()
289 (*queue)->q_prev->q_next = (*queue)->q_next; dequeue_first()
290 (*queue)->q_next->q_prev = (*queue)->q_prev; dequeue_first()
291 *queue = (*queue)->q_next; dequeue_first()
295 * Return the removed entry (or NULL of queue was empty). dequeue_first()
301 * Function dequeue_general (queue, element)
305 static irda_queue_t *dequeue_general(irda_queue_t **queue, irda_queue_t* element) dequeue_general() argument
314 ret = *queue; dequeue_general()
316 if ( *queue == NULL ) { dequeue_general()
320 } else if ( (*queue)->q_next == *queue ) { dequeue_general()
325 *queue = NULL; dequeue_general()
333 if ( (*queue) == element) dequeue_general()
334 (*queue) = element->q_next; dequeue_general()
338 * Return the removed entry (or NULL of queue was empty). dequeue_general()
391 irda_queue_t* queue; hashbin_delete() local
409 queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]); hashbin_delete()
410 while (queue ) { hashbin_delete()
412 (*free_func)(queue); hashbin_delete()
413 queue = dequeue_first( hashbin_delete()
884 * Make sure that we are not back at the beginning of the queue hashbin_get_next()
894 * Check that this is not the last queue in hashbin hashbin_get_next()
900 * Move to next queue in hashbin hashbin_get_next()
/linux-4.1.27/arch/s390/include/asm/
H A Dqdio.h26 * struct qdesfmt0 - queue descriptor, format 0
50 * struct qdr - queue description record (QDR)
51 * @qfmt: queue format
54 * @iqdcnt: input queue descriptor count
55 * @oqdcnt: output queue descriptor count
56 * @iqdsz: inpout queue descriptor size
57 * @oqdsz: output queue descriptor size
58 * @qiba: queue information block address
59 * @qkey: queue information block key
60 * @qdf0: queue descriptions
90 * struct qib - queue information block (QIB)
91 * @qfmt: queue format
126 * struct qaob - queue asynchronous operation block
331 * @q_format: queue format
342 * @queue_start_poll_array: polling handlers (one per input queue or NULL)
/linux-4.1.27/include/crypto/
H A Dalgapi.h178 void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
179 int crypto_enqueue_request(struct crypto_queue *queue,
181 void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset);
182 struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
183 int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
337 struct crypto_queue *queue) crypto_get_backlog()
339 return queue->backlog == &queue->list ? NULL : crypto_get_backlog()
340 container_of(queue->backlog, struct crypto_async_request, list); crypto_get_backlog()
343 static inline int ablkcipher_enqueue_request(struct crypto_queue *queue, ablkcipher_enqueue_request() argument
346 return crypto_enqueue_request(queue, &request->base); ablkcipher_enqueue_request()
350 struct crypto_queue *queue) ablkcipher_dequeue_request()
352 return ablkcipher_request_cast(crypto_dequeue_request(queue)); ablkcipher_dequeue_request()
360 static inline int ablkcipher_tfm_in_queue(struct crypto_queue *queue, ablkcipher_tfm_in_queue() argument
363 return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm)); ablkcipher_tfm_in_queue()
336 crypto_get_backlog( struct crypto_queue *queue) crypto_get_backlog() argument
349 ablkcipher_dequeue_request( struct crypto_queue *queue) ablkcipher_dequeue_request() argument
/linux-4.1.27/drivers/media/platform/xilinx/
H A Dxilinx-dma.h64 * @lock: protects the @format, @fmtinfo and @queue fields
67 * @queue: vb2 buffers queue
68 * @alloc_ctx: allocation context for the vb2 @queue
90 struct vb2_queue queue; member in struct:xvip_dma
H A Dxilinx-dma.c278 * videobuf2 queue operations
284 * @queue: buffer list entry in the DMA engine queued buffers list
289 struct list_head queue; member in struct:xvip_dma_buffer
301 list_del(&buf->queue); xvip_dma_complete()
348 if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { xvip_dma_buffer_queue()
377 list_add_tail(&buf->queue, &dma->queued_bufs); xvip_dma_buffer_queue()
382 if (vb2_is_streaming(&dma->queue)) xvip_dma_buffer_queue()
436 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) { xvip_dma_start_streaming()
438 list_del(&buf->queue); xvip_dma_start_streaming()
463 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) { xvip_dma_stop_streaming()
465 list_del(&buf->queue); xvip_dma_stop_streaming()
493 if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) xvip_dma_querycap()
608 if (vb2_is_busy(&dma->queue)) xvip_dma_set_format()
686 dma->video.queue = &dma->queue; xvip_dma_init()
700 /* ... and the buffers queue... */ xvip_dma_init()
712 dma->queue.type = type; xvip_dma_init()
713 dma->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; xvip_dma_init()
714 dma->queue.lock = &dma->lock; xvip_dma_init()
715 dma->queue.drv_priv = dma; xvip_dma_init()
716 dma->queue.buf_struct_size = sizeof(struct xvip_dma_buffer); xvip_dma_init()
717 dma->queue.ops = &xvip_dma_queue_qops; xvip_dma_init()
718 dma->queue.mem_ops = &vb2_dma_contig_memops; xvip_dma_init()
719 dma->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC xvip_dma_init()
721 ret = vb2_queue_init(&dma->queue); xvip_dma_init()
723 dev_err(dma->xdev->dev, "failed to initialize VB2 queue\n"); xvip_dma_init()
/linux-4.1.27/drivers/scsi/bnx2i/
H A Dbnx2i_sysfs.c31 * bnx2i_show_sq_info - return(s currently configured send queue (SQ) size
48 * bnx2i_set_sq_info - update send queue (SQ) size parameter
53 * Interface for user to change shared queue size allocated for each conn
92 * returns per-connection TCP history queue size parameter
109 * updates per-connection TCP history queue size parameter
/linux-4.1.27/drivers/s390/net/
H A Dqeth_core_main.c64 static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
68 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
394 dev_err(&card->gdev->dev, "Failed to create completion queue\n"); qeth_alloc_cq()
512 static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue) qeth_is_cq() argument
516 queue != 0 && qeth_is_cq()
517 queue == card->qdio.no_in_queues - 1; qeth_is_cq()
1247 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, qeth_clear_output_buffer() argument
1255 atomic_dec(&queue->set_pci_flags_count); qeth_clear_output_buffer()
1260 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) { qeth_clear_output_buffer()
1367 /* CHPP field bit 6 == 1 -> single queue */ qeth_update_from_chp_desc()
2868 /* inbound queue */ qeth_init_qdio_queues()
2890 /* outbound queue */ qeth_init_qdio_queues()
3311 struct qeth_qdio_q *queue = card->qdio.in_q; qeth_queue_input_buffer() local
3318 count = (index < queue->next_buf_to_init)? qeth_queue_input_buffer()
3320 (queue->next_buf_to_init - index) : qeth_queue_input_buffer()
3322 (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index); qeth_queue_input_buffer()
3325 for (i = queue->next_buf_to_init; qeth_queue_input_buffer()
3326 i < queue->next_buf_to_init + count; ++i) { qeth_queue_input_buffer()
3328 &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) { qeth_queue_input_buffer()
3371 queue->next_buf_to_init, count); qeth_queue_input_buffer()
3379 queue->next_buf_to_init = (queue->next_buf_to_init + count) % qeth_queue_input_buffer()
3413 * Switched to packing state if the number of used buffers on a queue
3416 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) qeth_switch_to_packing_if_needed() argument
3418 if (!queue->do_pack) { qeth_switch_to_packing_if_needed()
3419 if (atomic_read(&queue->used_buffers) qeth_switch_to_packing_if_needed()
3422 QETH_CARD_TEXT(queue->card, 6, "np->pack"); qeth_switch_to_packing_if_needed()
3423 if (queue->card->options.performance_stats) qeth_switch_to_packing_if_needed()
3424 queue->card->perf_stats.sc_dp_p++; qeth_switch_to_packing_if_needed()
3425 queue->do_pack = 1; qeth_switch_to_packing_if_needed()
3432 * buffer on the queue this buffer will be prepared to be flushed.
3436 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) qeth_switch_to_nonpacking_if_needed() argument
3441 if (queue->do_pack) { qeth_switch_to_nonpacking_if_needed()
3442 if (atomic_read(&queue->used_buffers) qeth_switch_to_nonpacking_if_needed()
3445 QETH_CARD_TEXT(queue->card, 6, "pack->np"); qeth_switch_to_nonpacking_if_needed()
3446 if (queue->card->options.performance_stats) qeth_switch_to_nonpacking_if_needed()
3447 queue->card->perf_stats.sc_p_dp++; qeth_switch_to_nonpacking_if_needed()
3448 queue->do_pack = 0; qeth_switch_to_nonpacking_if_needed()
3450 buffer = queue->bufs[queue->next_buf_to_fill]; qeth_switch_to_nonpacking_if_needed()
3457 queue->next_buf_to_fill = qeth_switch_to_nonpacking_if_needed()
3458 (queue->next_buf_to_fill + 1) % qeth_switch_to_nonpacking_if_needed()
3468 * Called to flush a packing buffer if no more pci flags are on the queue.
3472 static int qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue) qeth_flush_buffers_on_no_pci() argument
3476 buffer = queue->bufs[queue->next_buf_to_fill]; qeth_flush_buffers_on_no_pci()
3481 queue->next_buf_to_fill = qeth_flush_buffers_on_no_pci()
3482 (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q; qeth_flush_buffers_on_no_pci()
3488 static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, qeth_flush_buffers() argument
3498 buf = queue->bufs[bidx]; qeth_flush_buffers()
3502 if (queue->bufstates) qeth_flush_buffers()
3503 queue->bufstates[bidx].user = buf; qeth_flush_buffers()
3505 if (queue->card->info.type == QETH_CARD_TYPE_IQD) qeth_flush_buffers()
3508 if (!queue->do_pack) { qeth_flush_buffers()
3509 if ((atomic_read(&queue->used_buffers) >= qeth_flush_buffers()
3512 !atomic_read(&queue->set_pci_flags_count)) { qeth_flush_buffers()
3515 atomic_inc(&queue->set_pci_flags_count); qeth_flush_buffers()
3519 if (!atomic_read(&queue->set_pci_flags_count)) { qeth_flush_buffers()
3528 atomic_inc(&queue->set_pci_flags_count); qeth_flush_buffers()
3534 queue->card->dev->trans_start = jiffies; qeth_flush_buffers()
3535 if (queue->card->options.performance_stats) { qeth_flush_buffers()
3536 queue->card->perf_stats.outbound_do_qdio_cnt++; qeth_flush_buffers()
3537 queue->card->perf_stats.outbound_do_qdio_start_time = qeth_flush_buffers()
3541 if (atomic_read(&queue->set_pci_flags_count)) qeth_flush_buffers()
3543 rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags, qeth_flush_buffers()
3544 queue->queue_no, index, count); qeth_flush_buffers()
3545 if (queue->card->options.performance_stats) qeth_flush_buffers()
3546 queue->card->perf_stats.outbound_do_qdio_time += qeth_flush_buffers()
3548 queue->card->perf_stats.outbound_do_qdio_start_time; qeth_flush_buffers()
3549 atomic_add(count, &queue->used_buffers); qeth_flush_buffers()
3551 queue->card->stats.tx_errors += count; qeth_flush_buffers()
3555 QETH_CARD_TEXT(queue->card, 2, "flushbuf"); qeth_flush_buffers()
3556 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no); qeth_flush_buffers()
3557 QETH_CARD_TEXT_(queue->card, 2, " idx%d", index); qeth_flush_buffers()
3558 QETH_CARD_TEXT_(queue->card, 2, " c%d", count); qeth_flush_buffers()
3559 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc); qeth_flush_buffers()
3563 qeth_schedule_recovery(queue->card); qeth_flush_buffers()
3566 if (queue->card->options.performance_stats) qeth_flush_buffers()
3567 queue->card->perf_stats.bufs_sent += count; qeth_flush_buffers()
3570 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) qeth_check_outbound_queue() argument
3578 * we have to get a pci flag out on the queue qeth_check_outbound_queue()
3580 if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) || qeth_check_outbound_queue()
3581 !atomic_read(&queue->set_pci_flags_count)) { qeth_check_outbound_queue()
3582 if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) == qeth_check_outbound_queue()
3589 netif_stop_queue(queue->card->dev); qeth_check_outbound_queue()
3590 index = queue->next_buf_to_fill; qeth_check_outbound_queue()
3591 q_was_packing = queue->do_pack; qeth_check_outbound_queue()
3592 /* queue->do_pack may change */ qeth_check_outbound_queue()
3594 flush_cnt += qeth_switch_to_nonpacking_if_needed(queue); qeth_check_outbound_queue()
3596 !atomic_read(&queue->set_pci_flags_count)) qeth_check_outbound_queue()
3598 qeth_flush_buffers_on_no_pci(queue); qeth_check_outbound_queue()
3599 if (queue->card->options.performance_stats && qeth_check_outbound_queue()
3601 queue->card->perf_stats.bufs_sent_pack += qeth_check_outbound_queue()
3604 qeth_flush_buffers(queue, index, flush_cnt); qeth_check_outbound_queue()
3605 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); qeth_check_outbound_queue()
3610 void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue, qeth_qdio_start_poll() argument
3652 unsigned int queue, int first_element, int count) { qeth_qdio_cq_handler()
3657 if (!qeth_is_cq(card, queue)) qeth_qdio_cq_handler()
3697 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue, qeth_qdio_cq_handler()
3720 unsigned int queue, int first_elem, int count, qeth_qdio_input_handler()
3725 QETH_CARD_TEXT_(card, 2, "qihq%d", queue); qeth_qdio_input_handler()
3728 if (qeth_is_cq(card, queue)) qeth_qdio_input_handler()
3729 qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count); qeth_qdio_input_handler()
3742 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; qeth_qdio_output_handler() local
3760 buffer = queue->bufs[bidx]; qeth_qdio_output_handler()
3763 if (queue->bufstates && qeth_qdio_output_handler()
3764 (queue->bufstates[bidx].flags & qeth_qdio_output_handler()
3772 qeth_notify_skbs(queue, buffer, qeth_qdio_output_handler()
3775 buffer->aob = queue->bufstates[bidx].aob; qeth_qdio_output_handler()
3776 QETH_CARD_TEXT_(queue->card, 5, "pel%d", bidx); qeth_qdio_output_handler()
3777 QETH_CARD_TEXT(queue->card, 5, "aob"); qeth_qdio_output_handler()
3778 QETH_CARD_TEXT_(queue->card, 5, "%lx", qeth_qdio_output_handler()
3780 if (qeth_init_qdio_out_buf(queue, bidx)) { qeth_qdio_output_handler()
3790 qeth_notify_skbs(queue, buffer, n); qeth_qdio_output_handler()
3793 qeth_clear_output_buffer(queue, buffer, qeth_qdio_output_handler()
3796 qeth_cleanup_handled_pending(queue, bidx, 0); qeth_qdio_output_handler()
3798 atomic_sub(count, &queue->used_buffers); qeth_qdio_output_handler()
3799 /* check if we need to do something on this outbound queue */ qeth_qdio_output_handler()
3801 qeth_check_outbound_queue(queue); qeth_qdio_output_handler()
3803 netif_wake_queue(queue->card->dev); qeth_qdio_output_handler()
4001 static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue, qeth_fill_buffer() argument
4041 if (!queue->do_pack) { qeth_fill_buffer()
4042 QETH_CARD_TEXT(queue->card, 6, "fillbfnp"); qeth_fill_buffer()
4047 QETH_CARD_TEXT(queue->card, 6, "fillbfpa"); qeth_fill_buffer()
4048 if (queue->card->options.performance_stats) qeth_fill_buffer()
4049 queue->card->perf_stats.skbs_sent_pack++; qeth_fill_buffer()
4051 QETH_MAX_BUFFER_ELEMENTS(queue->card)) { qeth_fill_buffer()
4064 struct qeth_qdio_out_q *queue, struct sk_buff *skb, qeth_do_send_packet_fast()
4071 /* spin until we get the queue ... */ qeth_do_send_packet_fast()
4072 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, qeth_do_send_packet_fast()
4074 /* ... now we've got the queue */ qeth_do_send_packet_fast()
4075 index = queue->next_buf_to_fill; qeth_do_send_packet_fast()
4076 buffer = queue->bufs[queue->next_buf_to_fill]; qeth_do_send_packet_fast()
4083 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % qeth_do_send_packet_fast()
4085 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); qeth_do_send_packet_fast()
4086 qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len); qeth_do_send_packet_fast()
4087 qeth_flush_buffers(queue, index, 1); qeth_do_send_packet_fast()
4090 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); qeth_do_send_packet_fast()
4095 int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, qeth_do_send_packet() argument
4106 /* spin until we get the queue ... */ qeth_do_send_packet()
4107 while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, qeth_do_send_packet()
4109 start_index = queue->next_buf_to_fill; qeth_do_send_packet()
4110 buffer = queue->bufs[queue->next_buf_to_fill]; qeth_do_send_packet()
4116 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); qeth_do_send_packet()
4119 /* check if we need to switch packing state of this queue */ qeth_do_send_packet()
4120 qeth_switch_to_packing_if_needed(queue); qeth_do_send_packet()
4121 if (queue->do_pack) { qeth_do_send_packet()
4129 queue->next_buf_to_fill = qeth_do_send_packet()
4130 (queue->next_buf_to_fill + 1) % qeth_do_send_packet()
4132 buffer = queue->bufs[queue->next_buf_to_fill]; qeth_do_send_packet()
4137 qeth_flush_buffers(queue, start_index, qeth_do_send_packet()
4139 atomic_set(&queue->state, qeth_do_send_packet()
4145 tmp = qeth_fill_buffer(queue, buffer, skb, hdr, -1, 0); qeth_do_send_packet()
4146 queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) % qeth_do_send_packet()
4150 qeth_flush_buffers(queue, start_index, flush_count); qeth_do_send_packet()
4151 else if (!atomic_read(&queue->set_pci_flags_count)) qeth_do_send_packet()
4152 atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH); qeth_do_send_packet()
4154 * queue->state will go from LOCKED -> UNLOCKED or from qeth_do_send_packet()
4159 while (atomic_dec_return(&queue->state)) { qeth_do_send_packet()
4161 start_index = queue->next_buf_to_fill; qeth_do_send_packet()
4163 flush_count += qeth_switch_to_nonpacking_if_needed(queue); qeth_do_send_packet()
4166 * flag out on the queue qeth_do_send_packet()
4168 if (!flush_count && !atomic_read(&queue->set_pci_flags_count)) qeth_do_send_packet()
4169 flush_count += qeth_flush_buffers_on_no_pci(queue); qeth_do_send_packet()
4171 qeth_flush_buffers(queue, start_index, flush_count); qeth_do_send_packet()
4173 /* at this point the queue is UNLOCKED again */ qeth_do_send_packet()
4174 if (queue->card->options.performance_stats && do_pack) qeth_do_send_packet()
4175 queue->card->perf_stats.bufs_sent_pack += flush_count; qeth_do_send_packet()
5729 {"queue 0 buffer usage"},
5730 /* 20 */{"queue 1 buffer usage"},
5731 {"queue 2 buffer usage"},
5732 {"queue 3 buffer usage"},
3650 qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, unsigned int queue, int first_element, int count) qeth_qdio_cq_handler() argument
3719 qeth_qdio_input_handler(struct ccw_device *ccwdev, unsigned int qdio_err, unsigned int queue, int first_elem, int count, unsigned long card_ptr) qeth_qdio_input_handler() argument
4063 qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue, struct sk_buff *skb, struct qeth_hdr *hdr, int elements_needed, int offset, int hd_len) qeth_do_send_packet_fast() argument
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4/
H A Dt4_hw.h88 SGE_EQ_IDXSIZE = 64, /* egress queue pidx/cidx unit size */
91 SGE_INTRDST_IQ = 1, /* destination is an ingress queue */
93 SGE_UPDATEDEL_NONE = 0, /* ingress queue pidx update delivery */
98 SGE_HOSTFCMODE_NONE = 0, /* egress queue cidx updates */
99 SGE_HOSTFCMODE_IQ = 1, /* sent to ingress queue */
101 SGE_HOSTFCMODE_BOTH = 3, /* ingress queue and status page */
103 SGE_FETCHBURSTMIN_16B = 0,/* egress queue descriptor fetch minimum */
108 SGE_FETCHBURSTMAX_64B = 0,/* egress queue descriptor fetch maximum */
113 SGE_CIDXFLUSHTHRESH_1 = 0,/* egress queue cidx flush threshold */
122 SGE_INGPADBOUNDARY_SHIFT = 5,/* ingress queue pad boundary */
137 struct sge_qstat { /* data written to SGE queue status entries */
/linux-4.1.27/drivers/media/platform/omap3isp/
H A Dispvideo.h87 spinlock_t lock; /* Pipeline state and queue flags */
121 * @irqlist: List head for insertion into IRQ queue
133 /* Set if DMA queue becomes empty when ISP_PIPELINE_STREAM_CONTINUOUS */
135 /* Set when queuing buffer to an empty DMA queue */
144 * @queue: Resume streaming when a buffer is queued. Called on VIDIOC_QBUF
148 int(*queue)(struct isp_video *video, struct isp_buffer *buffer); member in struct:isp_video_operations
173 /* Video buffers queue */
175 struct vb2_queue *queue; member in struct:isp_video
176 struct mutex queue_lock; /* protects the queue */
189 struct vb2_queue queue; member in struct:isp_video_fh
196 container_of(q, struct isp_video_fh, queue)
/linux-4.1.27/drivers/net/wireless/prism54/
H A Disl_38xx.c57 /* data tx queue not empty */ isl38xx_handle_sleep_request()
61 /* management tx queue not empty */ isl38xx_handle_sleep_request()
66 /* data rx queue not empty */ isl38xx_handle_sleep_request()
70 /* management rx queue not empty */ isl38xx_handle_sleep_request()
102 /* either data or management transmit queue has a frame pending isl38xx_handle_wakeup()
223 isl38xx_in_queue(isl38xx_control_block *cb, int queue) isl38xx_in_queue() argument
225 const s32 delta = (le32_to_cpu(cb->driver_curr_frag[queue]) - isl38xx_in_queue()
226 le32_to_cpu(cb->device_curr_frag[queue])); isl38xx_in_queue()
228 /* determine the amount of fragments in the queue depending on the type isl38xx_in_queue()
229 * of the queue, either transmit or receive */ isl38xx_in_queue()
233 switch (queue) { isl38xx_in_queue()
/linux-4.1.27/drivers/ptp/
H A Dptp_clock.c46 /* time stamp event queue operations */
53 static void enqueue_external_timestamp(struct timestamp_event_queue *queue, enqueue_external_timestamp() argument
63 spin_lock_irqsave(&queue->lock, flags); enqueue_external_timestamp()
65 dst = &queue->buf[queue->tail]; enqueue_external_timestamp()
70 if (!queue_free(queue)) enqueue_external_timestamp()
71 queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS; enqueue_external_timestamp()
73 queue->tail = (queue->tail + 1) % PTP_MAX_TIMESTAMPS; enqueue_external_timestamp()
75 spin_unlock_irqrestore(&queue->lock, flags); enqueue_external_timestamp()
/linux-4.1.27/arch/xtensa/platforms/iss/
H A Dsimdisk.c31 struct request_queue *queue; member in struct:simdisk
281 dev->queue = blk_alloc_queue(GFP_KERNEL); simdisk_setup()
282 if (dev->queue == NULL) { simdisk_setup()
287 blk_queue_make_request(dev->queue, simdisk_make_request); simdisk_setup()
288 dev->queue->queuedata = dev; simdisk_setup()
298 dev->gd->queue = dev->queue; simdisk_setup()
308 blk_cleanup_queue(dev->queue); simdisk_setup()
309 dev->queue = NULL; simdisk_setup()
365 if (dev->queue) simdisk_teardown()
366 blk_cleanup_queue(dev->queue); simdisk_teardown()
/linux-4.1.27/net/sched/
H A Dsch_plug.c48 * State of the queue, when used for network output buffering:
62 * from head to end of the queue. The queue turns into
63 * a pass-through queue for newly arriving packets.
83 * Number of packets from the head of the queue, that can
111 /* No more packets to dequeue. Block the queue plug_dequeue()
153 * TCQ_PLUG_BUFFER: Inset a plug into the queue and
155 * TCQ_PLUG_RELEASE_ONE: Dequeue packets from queue head
157 * TCQ_PLUG_RELEASE_INDEFINITE: Dequeue all packets from queue.
159 * command is received (just act as a pass-thru queue).
160 * TCQ_PLUG_LIMIT: Increase/decrease queue size
/linux-4.1.27/drivers/md/
H A Ddm-queue-length.c5 * dm-queue-length.c
12 * queue-length path selector - choose a path with the least number of
25 #define DM_MSG_PREFIX "multipath queue-length"
123 *error = "queue-length ps: incorrect number of arguments"; ql_add_path()
128 *error = "queue-length ps: invalid repeat count"; ql_add_path()
135 *error = "queue-length ps: Error allocating path information"; ql_add_path()
221 .name = "queue-length",
/linux-4.1.27/drivers/gpu/drm/
H A Ddrm_flip_work.c47 * drm_flip_work_queue_task - queue a specific task
52 * func) on a work queue after drm_flip_work_commit() is called.
66 * drm_flip_work_queue - queue work
68 * @val: the value to queue
71 * func) on a work queue after drm_flip_work_commit() is called.
91 * @wq: the work-queue to run the queued work on
94 * on a workqueue. The typical usage would be to queue work (via
/linux-4.1.27/drivers/net/ethernet/sfc/
H A Dtx.c83 "TX queue %d transmission id %x complete\n", efx_dequeue_buffer()
84 tx_queue->queue, tx_queue->read_count); efx_dequeue_buffer()
153 * queue, it is possible for the completion path to race with efx_tx_maybe_stop_queue()
156 * Therefore we stop the queue first, then read read_count efx_tx_maybe_stop_queue()
158 * restart the queue if the fill level turns out to be low efx_tx_maybe_stop_queue()
318 * Add a socket buffer to a TX queue
321 * the TX queue. The queue's insert pointer will be incremented by
325 * the queue's insert pointer will be restored to its original value.
387 /* Add to TX queue, splitting across DMA boundaries */ efx_enqueue_skb()
437 /* There could be packets left on the partner queue if those efx_enqueue_skb()
455 " TX queue %d could not map skb with %d bytes %d " efx_enqueue_skb()
456 "fragments for DMA\n", tx_queue->queue, skb->len, efx_enqueue_skb()
483 /* Remove packets from the TX queue
485 * This removes packets from the TX queue, up to and including the
505 "TX queue %d spurious TX completion id %x\n", efx_dequeue_buffers()
506 tx_queue->queue, read_ptr); efx_dequeue_buffers()
557 /* Must be inverse of queue lookup in efx_hard_start_xmit() */ efx_init_tx_queue_core_txq()
560 tx_queue->queue / EFX_TXQ_TYPES + efx_init_tx_queue_core_txq()
561 ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? efx_init_tx_queue_core_txq()
589 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI)) efx_for_each_possible_channel_tx_queue()
637 /* See if we need to restart the netif queue. This memory efx_xmit_done()
639 * efx_dequeue_buffers()) before reading the queue status. efx_xmit_done()
652 /* Check whether the hardware queue is now empty */ efx_xmit_done()
669 /* At most half the descriptors in the queue at any time will refer to
690 "creating TX queue %d size %#x mask %#x\n", efx_probe_tx_queue()
691 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); efx_probe_tx_queue()
699 if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD) { efx_probe_tx_queue()
728 "initialising TX queue %d\n", tx_queue->queue); efx_init_tx_queue()
749 "shutting down TX queue %d\n", tx_queue->queue); efx_fini_tx_queue()
774 "destroying TX queue %d\n", tx_queue->queue); efx_remove_tx_queue()
916 * efx_tx_queue_insert - push descriptors onto the TX queue
917 * @tx_queue: Efx TX queue
920 * @final_buffer: The final buffer inserted into the queue
922 * Push descriptors onto the TX queue.
963 * Put a TSO header into the TX queue.
1086 * @tx_queue: Efx TX queue
1138 * @tx_queue: Efx TX queue
1253 * @tx_queue: Efx TX queue
1321 /* There could be packets left on the partner queue if those efx_enqueue_skb_tso()
H A Dvfdi.h33 * follow the same form and be sent to the first event queue assigned
34 * to the VF while that queue is enabled by the VF driver.
132 * @u.init_evq.index: Index of event queue to create.
133 * @u.init_evq.buf_count: Number of 4k buffers backing event queue.
135 * address of each page backing the event queue.
136 * @u.init_rxq.index: Index of receive queue to create.
137 * @u.init_rxq.buf_count: Number of 4k buffers backing receive queue.
138 * @u.init_rxq.evq: Instance of event queue to target receive events at.
142 * address of each page backing the receive queue.
143 * @u.init_txq.index: Index of transmit queue to create.
144 * @u.init_txq.buf_count: Number of 4k buffers backing transmit queue.
145 * @u.init_txq.evq: Instance of event queue to target transmit completion
150 * address of each page backing the transmit queue.
152 * all traffic at this receive queue.
/linux-4.1.27/arch/sparc/kernel/
H A Dsun4v_ivec.S29 /* Get CPU mondo queue base phys address into %g7. */
49 /* Update queue head pointer. */
77 /* Get DEV mondo queue base phys address into %g5. */
91 /* Update queue head pointer, this frees up some registers. */
135 /* Get RES mondo queue base phys address into %g5. */
141 /* If the first word is non-zero, queue is full. */
151 /* Copy 64-byte queue entry into kernel buffer. */
177 /* Update queue head pointer. */
210 /* The queue is full, consolidate our damage by setting
246 /* Get RES mondo queue base phys address into %g5. */
252 /* If the first word is non-zero, queue is full. */
262 /* Copy 64-byte queue entry into kernel buffer. */
288 /* Update queue head pointer. */
321 /* The queue is full, consolidate our damage by setting
/linux-4.1.27/drivers/crypto/ccp/
H A Dccp-dev.c71 * ccp_enqueue_cmd - queue an operation for processing by the CCP
76 * would exceed the defined length of the cmd queue the cmd will
120 /* Find an idle queue */ ccp_enqueue_cmd()
133 /* If we found an idle queue, wake it up */ ccp_enqueue_cmd()
155 /* Find an idle queue */ ccp_do_cmd_backlog()
165 /* If we found an idle queue, wake it up */ ccp_do_cmd_backlog()
336 /* Allocate a dma pool for this queue */ ccp_init()
354 /* Reserve 2 KSB regions for the queue */ ccp_init()
359 /* Preset some register values and masks that are queue ccp_init()
373 /* Build queue interrupt mask (two interrupts per queue) */ ccp_init()
377 /* For arm64 set the recommended queue cache settings */ ccp_init()
382 dev_dbg(dev, "queue #%u available\n", i); ccp_init()
412 /* Create a kthread for each queue */ ccp_init()
421 dev_err(dev, "error creating queue thread (%ld)\n", ccp_init()
479 /* Stop the queue kthreads */ ccp_destroy()
484 /* Build queue interrupt mask (two interrupt masks per queue) */ ccp_destroy()
506 /* Flush the cmd and backlog queue */ ccp_destroy()
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb3/
H A Dsge.c88 * Types of Tx queues in each queue set. Order here matters, do not change.
125 struct rsp_desc { /* response queue descriptor */
183 * refill_rspq - replenish an SGE response queue
185 * @q: the response queue to replenish
188 * Replenishes a response queue by making the supplied number of responses
217 * @q: the Tx queue containing Tx descriptors for the packet
279 * @q: the Tx queue to reclaim descriptors from
282 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
283 * Tx buffers. Called with the Tx queue lock held.
317 * @q: the Tx queue to reclaim completed descriptors from
322 * queue's lock held.
340 * should_restart_tx - are there enough resources to restart a Tx queue?
341 * @q: the Tx queue
343 * Checks if there are enough descriptors to restart a suspended Tx queue.
377 * Release the buffers on an SGE free-buffer Rx queue. HW fetching from
378 * this queue should be stopped before calling this function.
498 * @n does not exceed the queue's capacity.
637 * @q: the queue set
664 * free_qset - free the resources of an SGE queue set
665 * @adapter: the adapter owning the queue set
666 * @q: the queue set
668 * Release the HW and SW resources associated with an SGE queue set, such
670 * queue set must be quiesced prior to calling this.
720 * init_qset_cntxt - initialize an SGE queue set context info
721 * @qs: the queue set
722 * @id: the queue set id
724 * Initializes the TIDs and context ids for the queues of a queue set.
996 * check_ring_tx_db - check and potentially ring a Tx queue's doorbell
998 * @q: the Tx queue
1000 * Ring the doorbel if a Tx queue is asleep. There is a natural race,
1036 * @q: the SGE Tx queue
1131 * @q: the Tx queue
1215 * eth_xmit - add a packet to the Ethernet Tx queue
1219 * Add a packet to an SGE Tx queue. Runs with softirqs disabled.
1253 "%s: Tx ring %u full while queue awake!\n", t3_eth_xmit()
1306 * good thing. We also run them without holding our Tx queue lock, t3_eth_xmit()
1353 * check_desc_avail - check descriptor availability on a send queue
1355 * @q: the send queue
1358 * @qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1361 * SGE send queue. If the queue is already suspended or not enough
1363 * Must be called with the Tx queue locked.
1395 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1396 * @q: the SGE control Tx queue
1416 * ctrl_xmit - send a packet through an SGE control Tx queue
1418 * @q: the control queue
1421 * Send a packet through an SGE control Tx queue. Packets sent through
1422 * a control queue must fit entirely as immediate data in a single Tx
1467 * restart_ctrlq - restart a suspended control queue
1468 * @qs: the queue set cotaining the control queue
1470 * Resumes transmission on a suspended Tx control queue.
1510 * Send a management message through control queue 0
1570 * @q: the Tx queue
1636 * ofld_xmit - send a packet through an offload queue
1638 * @q: the Tx offload queue
1641 * Send an offload packet through an SGE offload queue.
1678 * restart_offloadq - restart a suspended offload queue
1679 * @qs: the queue set cotaining the offload queue
1681 * Resumes transmission on a suspended Tx offload queue.
1735 * queue_set - return the queue set a packet should use
1738 * Maps a packet to the SGE queue set it should use. The desired queue
1751 * Tx queue. This is indicated by bit 0 in the packet's priority.
1764 * appropriate Tx queue as follows: bit 0 indicates whether the packet
1765 * should be sent as regular or control, bits 1-3 select the queue set.
1779 * offload_enqueue - add an offload packet to an SGE offload receive queue
1780 * @q: the SGE response queue
1783 * Add a new offload packet to an SGE response queue's offload packet
1784 * queue. If the packet is the first on the queue it schedules the RX
1785 * softirq to process the queue.
1803 * @q: the SGE response queue that assembled the bundle
1824 * The NAPI handler for offload packets when a response queue is serviced
1839 struct sk_buff_head queue; ofld_poll() local
1843 __skb_queue_head_init(&queue); ofld_poll()
1844 skb_queue_splice_init(&q->rx_queue, &queue); ofld_poll()
1845 if (skb_queue_empty(&queue)) { ofld_poll()
1853 skb_queue_walk_safe(&queue, skb, tmp) { ofld_poll()
1858 __skb_unlink(skb, &queue); ofld_poll()
1868 if (!skb_queue_empty(&queue)) { ofld_poll()
1869 /* splice remaining packets back onto Rx queue */ ofld_poll()
1871 skb_queue_splice(&queue, &q->rx_queue); ofld_poll()
1883 * @rq: the response queue that received the packet
1889 * queue. Returns the index of the next available slot in the bundle.
1914 * @qs: the queue set to resume
1916 * Restarts suspended Tx queues of an SGE queue set if they have enough
2005 * @rq: the response queue that received the packet
2055 * @qs: the associated queue set
2142 * @qs: the queue set corresponding to the response
2146 * indications and completion credits for the queue set's Tx queues.
2178 * @qs: the queue set whose Tx queues are to be examined
2179 * @sleeping: indicates which Tx queue sent GTS
2181 * Checks if some of a queue set's Tx queues need to ring their doorbells
2214 * @q: the response queue
2241 * process_responses - process responses from an SGE response queue
2243 * @qs: the queue set to which the response queue belongs
2246 * Process responses from an SGE response queue up to the supplied budget.
2248 * for the queues that belong to the response queue's queue set.
2252 * on this queue. If the system is under memory shortage use a fairly
2388 smp_mb(); /* commit Tx queue .processed updates */ process_responses()
2449 * process_pure_responses - process pure responses from a response queue
2451 * @qs: the queue set owning the response queue
2497 smp_mb(); /* commit Tx queue .processed updates */ process_pure_responses()
2507 * @q: the response queue
2537 * The MSI-X interrupt handler for an SGE response queue for the non-NAPI case
2538 * (i.e., response queue serviced in hard interrupt).
2556 * The MSI-X interrupt handler for an SGE response queue for the NAPI case
2557 * (i.e., response queue serviced by NAPI polling).
2575 * the same MSI vector. We use one SGE response queue per port in this mode
2576 * and protect all response queues with queue 0's lock.
2625 * one SGE response queue per port in this mode and protect all response
2626 * queues with queue 0's lock.
2663 * the same interrupt pin. We use one SGE response queue per port in this mode
2664 * and protect all response queues with queue 0's lock.
2701 * response queue per port in this mode and protect all response queues with
2702 * queue 0's lock.
2735 * response queue per port in this mode and protect all response queues with
2736 * queue 0's lock.
2814 CH_ALERT(adapter, "SGE response queue credit overflow\n"); t3_sge_err_intr_handler()
2820 "packet delivered to disabled response queue " t3_sge_err_intr_handler()
2840 * @data: the SGE queue set to maintain
2842 * Runs periodically from a timer to perform maintenance of an SGE queue
2847 * queue so this timer is relatively infrequent and does any cleanup only
2848 * if the Tx queue has not seen any new packets in a while. We make a
2850 * around if we cannot get a queue's lock (which most likely is because
2884 * @data: the SGE queue set to maintain
2888 * when out of memory a queue can become empty. We try to add only a few
2889 * buffers here, the queue will be replenished fully as these new buffers
2892 * b) Return coalesced response queue credits in case a response queue is
2940 * t3_update_qset_coalesce - update coalescing settings for a queue set
2941 * @qs: the SGE queue set
2942 * @p: new queue set parameters
2944 * Update the coalescing settings for an SGE queue set. Nothing is done
2945 * if the queue set is not initialized yet.
2955 * t3_sge_alloc_qset - initialize an SGE queue set
2957 * @id: the queue set id
2958 * @nports: how many Ethernet ports will be using this queue set
2959 * @irq_vec_idx: the IRQ vector index for response queue interrupts
2960 * @p: configuration parameters for this queue set
2961 * @ntxq: number of Tx queues for the queue set
2962 * @netdev: net device associated with this queue set
2963 * @netdevq: net device TX queue associated with this queue set
2965 * Allocate resources and initialize an SGE queue set. A queue set
2966 * comprises a response queue, two Rx free-buffer queues, and up to 3
2968 * queue, offload queue, and control queue.
3004 * The control queue always uses immediate data so does not t3_sge_alloc_qset()
3113 CH_ALERT(adapter, "free list queue 0 initialization failed\n"); t3_sge_alloc_qset()
3117 CH_WARN(adapter, "free list queue 0 enabled with %d credits\n", t3_sge_alloc_qset()
3123 CH_WARN(adapter, "free list queue 1 enabled with %d credits\n", t3_sge_alloc_qset()
3143 * Starts each SGE queue set's timer call back
3164 * Stops each SGE queue set's timer call back
3184 * Frees resources used by the SGE queue sets.
3212 * case it also disables any pending queue restart tasklets. Note that
3240 * We do not initialize any of the queue sets here, instead the driver
/linux-4.1.27/drivers/media/v4l2-core/
H A Dv4l2-mem2mem.c48 /* Offset base for buffers on the destination queue - used to distinguish
184 struct v4l2_m2m_ctx, queue); v4l2_m2m_try_run()
193 * the pending job queue and add it if so.
201 * If a queue is buffered (for example a decoder hardware ringbuffer that has
203 * on that queue.
236 dprintk("On job queue already\n"); v4l2_m2m_try_schedule()
270 list_add_tail(&m2m_ctx->queue, &m2m_dev->job_queue); v4l2_m2m_try_schedule()
303 list_del(&m2m_ctx->queue); v4l2_m2m_cancel_job()
306 dprintk("m2m_ctx: %p had been on queue and was removed\n", v4l2_m2m_cancel_job()
309 /* Do nothing, was not on queue/running */ v4l2_m2m_cancel_job()
338 list_del(&m2m_dev->curr_ctx->queue); v4l2_m2m_job_finish()
354 * v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
367 * v4l2_m2m_querybuf() - multi-queue-aware QUERYBUF multiplexer
381 /* Adjust MMAP memory offsets for the CAPTURE queue */ v4l2_m2m_querybuf()
457 * v4l2_m2m_streamon() - turn on streaming for a video queue
475 * v4l2_m2m_streamoff() - turn off streaming for a video queue
495 /* We should not be scheduled anymore, since we're dropping a queue. */ v4l2_m2m_streamoff()
497 list_del(&m2m_ctx->queue); v4l2_m2m_streamoff()
501 /* Drop queue, since streamoff returns device to the same state as after v4l2_m2m_streamoff()
521 * is available to dequeue (with dqbuf) from the source queue, this will
523 * returned in case of the destination queue.
606 * seamlessly for videobuffer, which will receive normal per-queue offsets and
607 * proper videobuf queue pointers. The differentiation is made outside videobuf
670 * @vq_init - a callback for queue type-specific initialization function to be
699 INIT_LIST_HEAD(&m2m_ctx->queue); v4l2_m2m_ctx_init()
835 * for the output and the capture buffer queue.
/linux-4.1.27/drivers/s390/scsi/
H A Dzfcp_qdio.h21 * @res_q: response queue
22 * @req_q: request queue
24 * @req_q_free: number of free buffers in queue
26 * @req_q_lock: lock to serialize access to request queue
29 * @req_q_full: queue full incidents
50 * struct zfcp_qdio_req - qdio queue related values for a request
58 * @qdio_outb_usage: usage of outbound queue
97 * @qdio: request queue where to start putting the request
104 * This is the start of putting the request into the queue, the last
105 * step is passing the request to zfcp_qdio_send. The request queue
/linux-4.1.27/drivers/net/wireless/ti/wlcore/
H A Dtx.h198 static inline int wl1271_tx_get_queue(int queue) wl1271_tx_get_queue() argument
200 switch (queue) { wl1271_tx_get_queue()
215 int wlcore_tx_get_mac80211_queue(struct wl12xx_vif *wlvif, int queue) wlcore_tx_get_mac80211_queue() argument
219 switch (queue) { wlcore_tx_get_mac80211_queue()
263 u8 queue, enum wlcore_queue_stop_reason reason);
264 void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
266 void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
273 struct wl12xx_vif *wlvif, u8 queue,
278 u8 queue,
281 u8 queue);
/linux-4.1.27/drivers/virtio/
H A Dvirtio_pci_legacy.c126 /* Select the queue we're interested in */ setup_vq()
129 /* Check if queue is either not available or already active. */ setup_vq()
138 info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); setup_vq()
139 if (info->queue == NULL) setup_vq()
142 /* activate the queue */ setup_vq()
143 iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT, setup_vq()
149 true, info->queue, vp_notify, callback, name); setup_vq()
172 free_pages_exact(info->queue, size); setup_vq()
193 /* Select and deactivate the queue */ del_vq()
197 free_pages_exact(info->queue, size); del_vq()
/linux-4.1.27/drivers/net/wireless/ath/carl9170/
H A Dtx.c49 unsigned int queue) __carl9170_get_queue()
52 return queue; __carl9170_get_queue()
78 int queue, i; carl9170_tx_accounting() local
83 queue = skb_get_queue_mapping(skb); carl9170_tx_accounting()
87 * The driver has to accept the frame, regardless if the queue is carl9170_tx_accounting()
92 ar->tx_stats[queue].len++; carl9170_tx_accounting()
93 ar->tx_stats[queue].count++; carl9170_tx_accounting()
158 int queue; carl9170_tx_accounting_free() local
160 queue = skb_get_queue_mapping(skb); carl9170_tx_accounting_free()
164 ar->tx_stats[queue].len--; carl9170_tx_accounting_free()
455 int queue = skb_get_queue_mapping(skb); carl9170_tx_bar_status() local
458 list_for_each_entry_rcu(entry, &ar->bar_list[queue], list) { carl9170_tx_bar_status()
460 spin_lock_bh(&ar->bar_list_lock[queue]); carl9170_tx_bar_status()
462 spin_unlock_bh(&ar->bar_list_lock[queue]); carl9170_tx_bar_status()
469 queue, bar->ra, bar->ta, bar->control, carl9170_tx_bar_status()
514 struct sk_buff_head *queue) carl9170_get_queued_skb()
518 spin_lock_bh(&queue->lock); skb_queue_walk()
519 skb_queue_walk(queue, skb) { skb_queue_walk()
525 __skb_unlink(skb, queue); skb_queue_walk()
526 spin_unlock_bh(&queue->lock); skb_queue_walk()
531 spin_unlock_bh(&queue->lock);
587 * At least one queue has been stuck for long enough. carl9170_check_queue_stop_timeout()
617 skb = skb_peek(&iter->queue); carl9170_tx_ampdu_timeout()
1131 u16 seq, queue, tmpssn; carl9170_tx_ampdu() local
1158 queue = TID_TO_WME_AC(tid_info->tid); carl9170_tx_ampdu()
1165 first = skb_peek(&tid_info->queue); carl9170_tx_ampdu()
1176 while ((skb = skb_peek(&tid_info->queue))) { carl9170_tx_ampdu()
1196 __skb_unlink(skb, &tid_info->queue); carl9170_tx_ampdu()
1204 if (skb_queue_empty(&tid_info->queue) || carl9170_tx_ampdu()
1205 carl9170_get_seq(skb_peek(&tid_info->queue)) != carl9170_tx_ampdu()
1208 * or whenever the queue is empty. carl9170_tx_ampdu()
1227 spin_lock_bh(&ar->tx_pending[queue].lock); carl9170_tx_ampdu()
1228 skb_queue_splice_tail_init(&agg, &ar->tx_pending[queue]); carl9170_tx_ampdu()
1229 spin_unlock_bh(&ar->tx_pending[queue].lock); carl9170_tx_ampdu()
1240 struct sk_buff_head *queue) carl9170_tx_pick_skb()
1248 spin_lock_bh(&queue->lock); carl9170_tx_pick_skb()
1249 skb = skb_peek(queue); carl9170_tx_pick_skb()
1256 __skb_unlink(skb, queue); carl9170_tx_pick_skb()
1257 spin_unlock_bh(&queue->lock); carl9170_tx_pick_skb()
1266 spin_unlock_bh(&queue->lock); carl9170_tx_pick_skb()
1324 unsigned int queue = skb_get_queue_mapping(skb); carl9170_bar_check() local
1329 spin_lock_bh(&ar->bar_list_lock[queue]); carl9170_bar_check()
1330 list_add_tail_rcu(&entry->list, &ar->bar_list[queue]); carl9170_bar_check()
1331 spin_unlock_bh(&ar->bar_list_lock[queue]); carl9170_bar_check()
1427 __skb_queue_tail(&agg->queue, skb); carl9170_tx_ampdu_queue()
1432 skb_queue_reverse_walk(&agg->queue, iter) { carl9170_tx_ampdu_queue()
1436 __skb_queue_after(&agg->queue, iter, skb); carl9170_tx_ampdu_queue()
1441 __skb_queue_head(&agg->queue, skb); carl9170_tx_ampdu_queue()
1445 if (agg->snx == carl9170_get_seq(skb_peek(&agg->queue))) { carl9170_tx_ampdu_queue()
1510 unsigned int queue = skb_get_queue_mapping(skb); carl9170_op_tx() local
1514 skb_queue_tail(&ar->tx_pending[queue], skb); carl9170_op_tx()
1541 /* The AR9170 hardware has no fancy beacon queue or some carl9170_pick_beaconing_vif()
48 __carl9170_get_queue(struct ar9170 *ar, unsigned int queue) __carl9170_get_queue() argument
513 carl9170_get_queued_skb(struct ar9170 *ar, u8 cookie, struct sk_buff_head *queue) carl9170_get_queued_skb() argument
1239 carl9170_tx_pick_skb(struct ar9170 *ar, struct sk_buff_head *queue) carl9170_tx_pick_skb() argument
/linux-4.1.27/net/mac80211/
H A Dtkip.c245 u8 *ra, int only_iv, int queue, ieee80211_tkip_decrypt_data()
268 if (key->u.tkip.rx[queue].state != TKIP_STATE_NOT_INIT && ieee80211_tkip_decrypt_data()
269 (iv32 < key->u.tkip.rx[queue].iv32 || ieee80211_tkip_decrypt_data()
270 (iv32 == key->u.tkip.rx[queue].iv32 && ieee80211_tkip_decrypt_data()
271 iv16 <= key->u.tkip.rx[queue].iv16))) ieee80211_tkip_decrypt_data()
276 key->u.tkip.rx[queue].state = TKIP_STATE_PHASE1_HW_UPLOADED; ieee80211_tkip_decrypt_data()
280 if (key->u.tkip.rx[queue].state == TKIP_STATE_NOT_INIT || ieee80211_tkip_decrypt_data()
281 key->u.tkip.rx[queue].iv32 != iv32) { ieee80211_tkip_decrypt_data()
283 tkip_mixing_phase1(tk, &key->u.tkip.rx[queue], ta, iv32); ieee80211_tkip_decrypt_data()
287 key->u.tkip.rx[queue].state != TKIP_STATE_PHASE1_HW_UPLOADED) { ieee80211_tkip_decrypt_data()
294 iv32, key->u.tkip.rx[queue].p1k); ieee80211_tkip_decrypt_data()
295 key->u.tkip.rx[queue].state = TKIP_STATE_PHASE1_HW_UPLOADED; ieee80211_tkip_decrypt_data()
298 tkip_mixing_phase2(tk, &key->u.tkip.rx[queue], iv16, rc4key); ieee80211_tkip_decrypt_data()
242 ieee80211_tkip_decrypt_data(struct crypto_cipher *tfm, struct ieee80211_key *key, u8 *payload, size_t payload_len, u8 *ta, u8 *ra, int only_iv, int queue, u32 *out_iv32, u16 *out_iv16) ieee80211_tkip_decrypt_data() argument
/linux-4.1.27/drivers/net/wimax/i2400m/
H A Drx.c93 * driver enough information to queue them properly and then at some
101 * - reset queue: send all queued packets to the OS
103 * - queue: queue a packet
105 * - update ws: update the queue's window start and deliver queued
108 * - queue & update ws: queue a packet, update the window start and
389 * they are queued and at some point the queue is i2400m_rx_ctl()
390 * restarted [NOTE: we can't queue SKBs directly, as i2400m_rx_ctl()
464 * Reorder queue data stored on skb->cb while the skb is queued in the
477 * is for this queue
478 * @queue: the skb queue itself
480 * reorder process in this queue that can be displayed in case of
491 struct sk_buff_head queue; member in struct:i2400m_roq
500 skb_queue_head_init(&roq->queue); __i2400m_roq_init()
513 * Normalize a sequence number based on the queue's window start
566 dev_err(dev, "q#%d queue ws %u cnt %u sn %u/%u\n", i2400m_roq_log_entry_print()
638 * @roq: reorder queue where to add
646 * - the queue is empty
647 * - the skb would be appended to the queue
651 * If these fail, then we have to do a sorted insertion in the queue,
676 if (skb_queue_empty(&roq->queue)) { __i2400m_roq_queue()
678 __skb_queue_head(&roq->queue, skb); __i2400m_roq_queue()
682 skb_itr = skb_peek_tail(&roq->queue); __i2400m_roq_queue()
689 __skb_queue_tail(&roq->queue, skb); __i2400m_roq_queue()
693 * right spot where to insert the packet; we know the queue is __i2400m_roq_queue()
698 skb_queue_walk(&roq->queue, skb_itr) { __i2400m_roq_queue()
706 __skb_queue_before(&roq->queue, skb_itr, skb); __i2400m_roq_queue()
715 skb_queue_walk(&roq->queue, skb_itr) { __i2400m_roq_queue()
733 * @roq: Reorder queue
736 * Updates the window start of a queue; when doing so, it must deliver
756 skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) { __i2400m_roq_update_ws()
765 __skb_unlink(skb_itr, &roq->queue); __i2400m_roq_update_ws()
777 * Reset a queue
794 roq->ws, skb_queue_len(&roq->queue), i2400m_roq_reset()
796 skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) { i2400m_roq_reset()
800 __skb_unlink(skb_itr, &roq->queue); i2400m_roq_reset()
817 * The hardware is asking the driver to queue a packet for later
829 len = skb_queue_len(&roq->queue); i2400m_roq_queue()
832 dev_err(dev, "SW BUG? queue nsn %d (lbn %u ws %u)\n", i2400m_roq_queue()
847 * Update the window start in a reorder queue and deliver all skbs
851 * @roq: Reorder queue
863 len = skb_queue_len(&roq->queue); i2400m_roq_update_ws()
892 len = skb_queue_len(&roq->queue); i2400m_roq_queue_update_ws()
901 /* If the queue is empty, don't bother as we'd queue i2400m_roq_queue_update_ws()
933 __skb_queue_purge(&i2400m->rx_roq[itr].queue); i2400m_rx_roq_destroy()
1334 * Initialize the RX queue and infrastructure
1384 /* Tear down the RX queue and infrastructure */ i2400m_rx_release()
/linux-4.1.27/drivers/scsi/qla4xxx/
H A Dql4_iocb.c39 /* Advance request queue pointer */ qla4xxx_advance_req_ring_ptr()
50 * qla4xxx_get_req_pkt - returns a valid entry in request queue.
52 * @queue_entry: Pointer to pointer to queue entry structure
55 * - returns the current request_in pointer (if queue not full)
57 * - checks for queue full
95 /* Get pointer to the queue entry for the marker */ qla4xxx_send_marker_iocb()
102 /* Put the marker in the request queue */ qla4xxx_send_marker_iocb()
212 * queue entries have been placed on the request queue.
229 * queue entries have been processed by the driver.
243 * queue entries have been placed on the request queue.
256 * queue entries have been processed by the driver.
296 * request queue. If a reset occurs and a request is in the queue, qla4xxx_send_command_to_isp()
397 /* Put the IOCB on the request queue */ qla4xxx_send_passthru0()
474 /* Get pointer to the queue entry for the marker */ qla4xxx_send_mbox_iocb()

Completed in 5473 milliseconds

1234567891011>>