Home
last modified time | relevance | path

Searched refs:queue (Results 1 – 200 of 1138) sorted by relevance

123456

/linux-4.1.27/drivers/media/usb/uvc/
Duvc_queue.c40 uvc_queue_to_stream(struct uvc_video_queue *queue) in uvc_queue_to_stream() argument
42 return container_of(queue, struct uvc_streaming, queue); in uvc_queue_to_stream()
50 static void uvc_queue_return_buffers(struct uvc_video_queue *queue, in uvc_queue_return_buffers() argument
57 while (!list_empty(&queue->irqqueue)) { in uvc_queue_return_buffers()
58 struct uvc_buffer *buf = list_first_entry(&queue->irqqueue, in uvc_queue_return_buffers()
60 queue); in uvc_queue_return_buffers()
61 list_del(&buf->queue); in uvc_queue_return_buffers()
75 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); in uvc_queue_setup() local
76 struct uvc_streaming *stream = uvc_queue_to_stream(queue); in uvc_queue_setup()
92 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); in uvc_buffer_prepare() local
[all …]
Duvcvideo.h358 struct list_head queue; member
374 struct vb2_queue queue; member
472 struct uvc_video_queue queue; member
627 extern int uvc_queue_init(struct uvc_video_queue *queue,
629 extern void uvc_queue_release(struct uvc_video_queue *queue);
630 extern int uvc_request_buffers(struct uvc_video_queue *queue,
632 extern int uvc_query_buffer(struct uvc_video_queue *queue,
634 extern int uvc_create_buffers(struct uvc_video_queue *queue,
636 extern int uvc_queue_buffer(struct uvc_video_queue *queue,
638 extern int uvc_dequeue_buffer(struct uvc_video_queue *queue,
[all …]
Duvc_isight.c39 static int isight_decode(struct uvc_video_queue *queue, struct uvc_buffer *buf, in isight_decode() argument
123 ret = isight_decode(&stream->queue, buf, in uvc_video_decode_isight()
133 buf = uvc_queue_next_buffer(&stream->queue, in uvc_video_decode_isight()
/linux-4.1.27/drivers/usb/gadget/function/
Duvc_queue.c48 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); in uvc_queue_setup() local
49 struct uvc_video *video = container_of(queue, struct uvc_video, queue); in uvc_queue_setup()
63 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); in uvc_buffer_prepare() local
72 if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED)) in uvc_buffer_prepare()
88 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); in uvc_buffer_queue() local
92 spin_lock_irqsave(&queue->irqlock, flags); in uvc_buffer_queue()
94 if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) { in uvc_buffer_queue()
95 list_add_tail(&buf->queue, &queue->irqqueue); in uvc_buffer_queue()
104 spin_unlock_irqrestore(&queue->irqlock, flags); in uvc_buffer_queue()
115 int uvcg_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type, in uvcg_queue_init() argument
[all …]
Duvc_queue.h30 struct list_head queue; member
43 struct vb2_queue queue; member
54 static inline int uvc_queue_streaming(struct uvc_video_queue *queue) in uvc_queue_streaming() argument
56 return vb2_is_streaming(&queue->queue); in uvc_queue_streaming()
59 int uvcg_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type,
62 void uvcg_free_buffers(struct uvc_video_queue *queue);
64 int uvcg_alloc_buffers(struct uvc_video_queue *queue,
67 int uvcg_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf);
69 int uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf);
71 int uvcg_dequeue_buffer(struct uvc_video_queue *queue,
[all …]
Duvc_video.c37 if (buf->bytesused - video->queue.buf_used <= len - 2) in uvc_video_encode_header()
47 struct uvc_video_queue *queue = &video->queue; in uvc_video_encode_data() local
52 mem = buf->mem + queue->buf_used; in uvc_video_encode_data()
53 nbytes = min((unsigned int)len, buf->bytesused - queue->buf_used); in uvc_video_encode_data()
56 queue->buf_used += nbytes; in uvc_video_encode_data()
87 if (buf->bytesused == video->queue.buf_used) { in uvc_video_encode_bulk()
88 video->queue.buf_used = 0; in uvc_video_encode_bulk()
90 uvcg_queue_next_buffer(&video->queue, buf); in uvc_video_encode_bulk()
97 buf->bytesused == video->queue.buf_used) in uvc_video_encode_bulk()
120 if (buf->bytesused == video->queue.buf_used) { in uvc_video_encode_isoc()
[all …]
Duvc_v4l2.c152 if (b->type != video->queue.queue.type) in uvc_v4l2_reqbufs()
155 return uvcg_alloc_buffers(&video->queue, b); in uvc_v4l2_reqbufs()
165 return uvcg_query_buffer(&video->queue, b); in uvc_v4l2_querybuf()
176 ret = uvcg_queue_buffer(&video->queue, b); in uvc_v4l2_qbuf()
190 return uvcg_dequeue_buffer(&video->queue, b, file->f_flags & O_NONBLOCK); in uvc_v4l2_dqbuf()
201 if (type != video->queue.queue.type) in uvc_v4l2_streamon()
226 if (type != video->queue.queue.type) in uvc_v4l2_streamoff()
317 uvcg_free_buffers(&video->queue); in uvc_v4l2_release()
334 return uvcg_queue_mmap(&uvc->video.queue, vma); in uvc_v4l2_mmap()
343 return uvcg_queue_poll(&uvc->video.queue, file, wait); in uvc_v4l2_poll()
[all …]
/linux-4.1.27/drivers/net/wireless/cw1200/
Dqueue.c29 static inline void __cw1200_queue_lock(struct cw1200_queue *queue) in __cw1200_queue_lock() argument
31 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_lock()
32 if (queue->tx_locked_cnt++ == 0) { in __cw1200_queue_lock()
34 queue->queue_id); in __cw1200_queue_lock()
35 ieee80211_stop_queue(stats->priv->hw, queue->queue_id); in __cw1200_queue_lock()
39 static inline void __cw1200_queue_unlock(struct cw1200_queue *queue) in __cw1200_queue_unlock() argument
41 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_unlock()
42 BUG_ON(!queue->tx_locked_cnt); in __cw1200_queue_unlock()
43 if (--queue->tx_locked_cnt == 0) { in __cw1200_queue_unlock()
45 queue->queue_id); in __cw1200_queue_unlock()
[all …]
Dqueue.h36 struct list_head queue; member
71 int cw1200_queue_init(struct cw1200_queue *queue,
76 int cw1200_queue_clear(struct cw1200_queue *queue);
78 void cw1200_queue_deinit(struct cw1200_queue *queue);
80 size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue,
82 int cw1200_queue_put(struct cw1200_queue *queue,
85 int cw1200_queue_get(struct cw1200_queue *queue,
90 int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id);
91 int cw1200_queue_requeue_all(struct cw1200_queue *queue);
92 int cw1200_queue_remove(struct cw1200_queue *queue,
[all …]
/linux-4.1.27/drivers/net/xen-netback/
Dnetback.c93 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
96 static void make_tx_response(struct xenvif_queue *queue,
99 static void push_tx_responses(struct xenvif_queue *queue);
101 static inline int tx_work_todo(struct xenvif_queue *queue);
103 static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
110 static inline unsigned long idx_to_pfn(struct xenvif_queue *queue, in idx_to_pfn() argument
113 return page_to_pfn(queue->mmap_pages[idx]); in idx_to_pfn()
116 static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue, in idx_to_kaddr() argument
119 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx)); in idx_to_kaddr()
152 bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed) in xenvif_rx_ring_slots_available() argument
[all …]
Dinterface.c54 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, in xenvif_skb_zerocopy_prepare() argument
58 atomic_inc(&queue->inflight_packets); in xenvif_skb_zerocopy_prepare()
61 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) in xenvif_skb_zerocopy_complete() argument
63 atomic_dec(&queue->inflight_packets); in xenvif_skb_zerocopy_complete()
75 struct xenvif_queue *queue = dev_id; in xenvif_tx_interrupt() local
77 if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)) in xenvif_tx_interrupt()
78 napi_schedule(&queue->napi); in xenvif_tx_interrupt()
85 struct xenvif_queue *queue = in xenvif_poll() local
93 if (unlikely(queue->vif->disabled)) { in xenvif_poll()
98 work_done = xenvif_tx_action(queue, budget); in xenvif_poll()
[all …]
Dcommon.h271 int xenvif_init_queue(struct xenvif_queue *queue);
272 void xenvif_deinit_queue(struct xenvif_queue *queue);
274 int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
285 int xenvif_queue_stopped(struct xenvif_queue *queue);
286 void xenvif_wake_queue(struct xenvif_queue *queue);
289 void xenvif_unmap_frontend_rings(struct xenvif_queue *queue);
290 int xenvif_map_frontend_rings(struct xenvif_queue *queue,
295 void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue);
300 int xenvif_tx_action(struct xenvif_queue *queue, int budget);
303 void xenvif_kick_thread(struct xenvif_queue *queue);
[all …]
Dxenbus.c41 static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
55 struct xenvif_queue *queue = m->private; in xenvif_read_io_ring() local
56 struct xen_netif_tx_back_ring *tx_ring = &queue->tx; in xenvif_read_io_ring()
57 struct xen_netif_rx_back_ring *rx_ring = &queue->rx; in xenvif_read_io_ring()
63 seq_printf(m, "Queue %d\nTX: nr_ents %u\n", queue->id, in xenvif_read_io_ring()
79 queue->pending_prod, in xenvif_read_io_ring()
80 queue->pending_cons, in xenvif_read_io_ring()
81 nr_pending_reqs(queue)); in xenvif_read_io_ring()
83 queue->dealloc_prod, in xenvif_read_io_ring()
84 queue->dealloc_cons, in xenvif_read_io_ring()
[all …]
/linux-4.1.27/drivers/net/wireless/b43legacy/
Dpio.c35 static void tx_start(struct b43legacy_pioqueue *queue) in tx_start() argument
37 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_start()
41 static void tx_octet(struct b43legacy_pioqueue *queue, in tx_octet() argument
44 if (queue->need_workarounds) { in tx_octet()
45 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet()
46 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet()
49 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet()
51 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet()
76 static void tx_data(struct b43legacy_pioqueue *queue, in tx_data() argument
84 if (queue->need_workarounds) { in tx_data()
[all …]
Dpio.h42 struct b43legacy_pioqueue *queue; member
48 (packet)->queue->tx_packets_cache))
83 u16 b43legacy_pio_read(struct b43legacy_pioqueue *queue, in b43legacy_pio_read() argument
86 return b43legacy_read16(queue->dev, queue->mmio_base + offset); in b43legacy_pio_read()
90 void b43legacy_pio_write(struct b43legacy_pioqueue *queue, in b43legacy_pio_write() argument
93 b43legacy_write16(queue->dev, queue->mmio_base + offset, value); in b43legacy_pio_write()
105 void b43legacy_pio_rx(struct b43legacy_pioqueue *queue);
108 void b43legacy_pio_tx_suspend(struct b43legacy_pioqueue *queue);
109 void b43legacy_pio_tx_resume(struct b43legacy_pioqueue *queue);
137 void b43legacy_pio_rx(struct b43legacy_pioqueue *queue) in b43legacy_pio_rx() argument
[all …]
/linux-4.1.27/drivers/infiniband/hw/ehca/
Dipz_pt_fn.c51 void *ipz_qpageit_get_inc(struct ipz_queue *queue) in ipz_qpageit_get_inc() argument
53 void *ret = ipz_qeit_get(queue); in ipz_qpageit_get_inc()
54 queue->current_q_offset += queue->pagesize; in ipz_qpageit_get_inc()
55 if (queue->current_q_offset > queue->queue_length) { in ipz_qpageit_get_inc()
56 queue->current_q_offset -= queue->pagesize; in ipz_qpageit_get_inc()
59 if (((u64)ret) % queue->pagesize) { in ipz_qpageit_get_inc()
66 void *ipz_qeit_eq_get_inc(struct ipz_queue *queue) in ipz_qeit_eq_get_inc() argument
68 void *ret = ipz_qeit_get(queue); in ipz_qeit_eq_get_inc()
69 u64 last_entry_in_q = queue->queue_length - queue->qe_size; in ipz_qeit_eq_get_inc()
71 queue->current_q_offset += queue->qe_size; in ipz_qeit_eq_get_inc()
[all …]
Dipz_pt_fn.h93 static inline void *ipz_qeit_calc(struct ipz_queue *queue, u64 q_offset) in ipz_qeit_calc() argument
96 if (q_offset >= queue->queue_length) in ipz_qeit_calc()
98 current_page = (queue->queue_pages)[q_offset >> EHCA_PAGESHIFT]; in ipz_qeit_calc()
106 static inline void *ipz_qeit_get(struct ipz_queue *queue) in ipz_qeit_get() argument
108 return ipz_qeit_calc(queue, queue->current_q_offset); in ipz_qeit_get()
118 void *ipz_qpageit_get_inc(struct ipz_queue *queue);
126 static inline void *ipz_qeit_get_inc(struct ipz_queue *queue) in ipz_qeit_get_inc() argument
128 void *ret = ipz_qeit_get(queue); in ipz_qeit_get_inc()
129 queue->current_q_offset += queue->qe_size; in ipz_qeit_get_inc()
130 if (queue->current_q_offset >= queue->queue_length) { in ipz_qeit_get_inc()
[all …]
/linux-4.1.27/drivers/misc/genwqe/
Dcard_ddcb.c91 static int queue_empty(struct ddcb_queue *queue) in queue_empty() argument
93 return queue->ddcb_next == queue->ddcb_act; in queue_empty()
96 static int queue_enqueued_ddcbs(struct ddcb_queue *queue) in queue_enqueued_ddcbs() argument
98 if (queue->ddcb_next >= queue->ddcb_act) in queue_enqueued_ddcbs()
99 return queue->ddcb_next - queue->ddcb_act; in queue_enqueued_ddcbs()
101 return queue->ddcb_max - (queue->ddcb_act - queue->ddcb_next); in queue_enqueued_ddcbs()
104 static int queue_free_ddcbs(struct ddcb_queue *queue) in queue_free_ddcbs() argument
106 int free_ddcbs = queue->ddcb_max - queue_enqueued_ddcbs(queue) - 1; in queue_free_ddcbs()
172 static void print_ddcb_info(struct genwqe_dev *cd, struct ddcb_queue *queue) in print_ddcb_info() argument
183 cd->card_idx, queue->ddcb_act, queue->ddcb_next); in print_ddcb_info()
[all …]
Dcard_debugfs.c236 struct ddcb_queue *queue; in genwqe_ddcb_info_show() local
239 queue = &cd->queue; in genwqe_ddcb_info_show()
250 queue->ddcb_max, (long long)queue->ddcb_daddr, in genwqe_ddcb_info_show()
251 (long long)queue->ddcb_daddr + in genwqe_ddcb_info_show()
252 (queue->ddcb_max * DDCB_LENGTH), in genwqe_ddcb_info_show()
253 (long long)queue->ddcb_vaddr, queue->ddcbs_in_flight, in genwqe_ddcb_info_show()
254 queue->ddcbs_max_in_flight, queue->ddcbs_completed, in genwqe_ddcb_info_show()
255 queue->return_on_busy, queue->wait_on_busy, in genwqe_ddcb_info_show()
268 queue->IO_QUEUE_CONFIG, in genwqe_ddcb_info_show()
269 __genwqe_readq(cd, queue->IO_QUEUE_CONFIG), in genwqe_ddcb_info_show()
[all …]
/linux-4.1.27/arch/arm/mach-ixp4xx/include/mach/
Dqmgr.h60 void qmgr_set_irq(unsigned int queue, int src,
62 void qmgr_enable_irq(unsigned int queue);
63 void qmgr_disable_irq(unsigned int queue);
70 int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
75 int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
78 #define qmgr_request_queue(queue, len, nearly_empty_watermark, \ argument
80 __qmgr_request_queue(queue, len, nearly_empty_watermark, \
84 void qmgr_release_queue(unsigned int queue);
87 static inline void qmgr_put_entry(unsigned int queue, u32 val) in qmgr_put_entry() argument
91 BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */ in qmgr_put_entry()
[all …]
/linux-4.1.27/drivers/gpu/drm/vmwgfx/
Dvmwgfx_marker.c37 void vmw_marker_queue_init(struct vmw_marker_queue *queue) in vmw_marker_queue_init() argument
39 INIT_LIST_HEAD(&queue->head); in vmw_marker_queue_init()
40 queue->lag = 0; in vmw_marker_queue_init()
41 queue->lag_time = ktime_get_raw_ns(); in vmw_marker_queue_init()
42 spin_lock_init(&queue->lock); in vmw_marker_queue_init()
45 void vmw_marker_queue_takedown(struct vmw_marker_queue *queue) in vmw_marker_queue_takedown() argument
49 spin_lock(&queue->lock); in vmw_marker_queue_takedown()
50 list_for_each_entry_safe(marker, next, &queue->head, head) { in vmw_marker_queue_takedown()
53 spin_unlock(&queue->lock); in vmw_marker_queue_takedown()
56 int vmw_marker_push(struct vmw_marker_queue *queue, in vmw_marker_push() argument
[all …]
/linux-4.1.27/drivers/net/
Dxen-netfront.c203 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue, in xennet_get_rx_skb() argument
207 struct sk_buff *skb = queue->rx_skbs[i]; in xennet_get_rx_skb()
208 queue->rx_skbs[i] = NULL; in xennet_get_rx_skb()
212 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue, in xennet_get_rx_ref() argument
216 grant_ref_t ref = queue->grant_rx_ref[i]; in xennet_get_rx_ref()
217 queue->grant_rx_ref[i] = GRANT_INVALID_REF; in xennet_get_rx_ref()
233 struct netfront_queue *queue = (struct netfront_queue *)data; in rx_refill_timeout() local
234 napi_schedule(&queue->napi); in rx_refill_timeout()
237 static int netfront_tx_slot_available(struct netfront_queue *queue) in netfront_tx_slot_available() argument
239 return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < in netfront_tx_slot_available()
[all …]
Deql.c140 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave);
147 spin_lock(&eql->queue.lock); in eql_timer()
148 head = &eql->queue.all_slaves; in eql_timer()
157 eql_kill_one_slave(&eql->queue, slave); in eql_timer()
161 spin_unlock(&eql->queue.lock); in eql_timer()
186 spin_lock_init(&eql->queue.lock); in eql_setup()
187 INIT_LIST_HEAD(&eql->queue.all_slaves); in eql_setup()
188 eql->queue.master_dev = dev; in eql_setup()
213 BUG_ON(!list_empty(&eql->queue.all_slaves)); in eql_open()
223 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave) in eql_kill_one_slave() argument
[all …]
/linux-4.1.27/drivers/net/wireless/rt2x00/
Drt2x00queue.c36 struct data_queue *queue = entry->queue; in rt2x00queue_alloc_rxskb() local
37 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2x00queue_alloc_rxskb()
48 frame_size = queue->data_size + queue->desc_size + queue->winfo_size; in rt2x00queue_alloc_rxskb()
107 struct device *dev = entry->queue->rt2x00dev->dev; in rt2x00queue_map_txskb()
123 struct device *dev = entry->queue->rt2x00dev->dev; in rt2x00queue_unmap_skb()
498 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; in rt2x00queue_write_tx_data()
510 entry->queue->qid, DRV_PROJECT); in rt2x00queue_write_tx_data()
539 struct data_queue *queue = entry->queue; in rt2x00queue_write_tx_descriptor() local
541 queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc); in rt2x00queue_write_tx_descriptor()
547 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb); in rt2x00queue_write_tx_descriptor()
[all …]
Drt2x00usb.c238 struct data_queue *queue; in rt2x00usb_work_txdone() local
241 tx_queue_for_each(rt2x00dev, queue) { in rt2x00usb_work_txdone()
242 while (!rt2x00queue_empty(queue)) { in rt2x00usb_work_txdone()
243 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); in rt2x00usb_work_txdone()
257 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; in rt2x00usb_interrupt_txdone()
284 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; in rt2x00usb_kick_tx_entry()
312 usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint), in rt2x00usb_kick_tx_entry()
350 skbdesc->desc_len = entry->queue->desc_size; in rt2x00usb_work_rxdone()
362 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; in rt2x00usb_interrupt_rxdone()
377 if (urb->actual_length < entry->queue->desc_size || urb->status) in rt2x00usb_interrupt_rxdone()
[all …]
Drt2x00mmio.c62 struct data_queue *queue = rt2x00dev->rx; in rt2x00mmio_rxdone() local
69 entry = rt2x00queue_get_entry(queue, Q_INDEX); in rt2x00mmio_rxdone()
80 skbdesc->desc_len = entry->queue->desc_size; in rt2x00mmio_rxdone()
99 void rt2x00mmio_flush_queue(struct data_queue *queue, bool drop) in rt2x00mmio_flush_queue() argument
103 for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++) in rt2x00mmio_flush_queue()
112 struct data_queue *queue) in rt2x00mmio_alloc_queue_dma() argument
123 queue->limit * queue->desc_size, &dma, in rt2x00mmio_alloc_queue_dma()
131 for (i = 0; i < queue->limit; i++) { in rt2x00mmio_alloc_queue_dma()
132 entry_priv = queue->entries[i].priv_data; in rt2x00mmio_alloc_queue_dma()
133 entry_priv->desc = addr + i * queue->desc_size; in rt2x00mmio_alloc_queue_dma()
[all …]
Drt2x00mac.c31 struct data_queue *queue, in rt2x00mac_tx_rts_cts() argument
91 retval = rt2x00queue_write_tx_frame(queue, skb, NULL, true); in rt2x00mac_tx_rts_cts()
107 struct data_queue *queue = NULL; in rt2x00mac_tx() local
125 queue = rt2x00queue_get_tx_queue(rt2x00dev, qid); in rt2x00mac_tx()
126 if (unlikely(!queue)) { in rt2x00mac_tx()
145 if (rt2x00queue_available(queue) <= 1) in rt2x00mac_tx()
148 if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb)) in rt2x00mac_tx()
152 if (unlikely(rt2x00queue_write_tx_frame(queue, skb, control->sta, false))) in rt2x00mac_tx()
160 spin_lock(&queue->tx_lock); in rt2x00mac_tx()
161 if (rt2x00queue_threshold(queue)) in rt2x00mac_tx()
[all …]
Drt2800mmio.c55 const unsigned int txwi_size = entry->queue->winfo_size; in rt2800mmio_write_tx_desc()
252 struct data_queue *queue; in rt2800mmio_txdone() local
269 queue = rt2x00queue_get_tx_queue(rt2x00dev, qid); in rt2800mmio_txdone()
270 if (unlikely(queue == NULL)) { in rt2800mmio_txdone()
280 if (unlikely(rt2x00queue_empty(queue))) { in rt2800mmio_txdone()
294 if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, in rt2800mmio_txdone()
301 if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, in rt2800mmio_txdone()
313 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, in rt2800mmio_txdone()
556 void rt2800mmio_start_queue(struct data_queue *queue) in rt2800mmio_start_queue() argument
558 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2800mmio_start_queue()
[all …]
Drt2800usb.c57 static void rt2800usb_start_queue(struct data_queue *queue) in rt2800usb_start_queue() argument
59 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2800usb_start_queue()
62 switch (queue->qid) { in rt2800usb_start_queue()
80 static void rt2800usb_stop_queue(struct data_queue *queue) in rt2800usb_stop_queue() argument
82 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2800usb_stop_queue()
85 switch (queue->qid) { in rt2800usb_stop_queue()
109 struct data_queue *queue; in rt2800usb_txstatus_pending() local
111 tx_queue_for_each(rt2x00dev, queue) { in rt2800usb_txstatus_pending()
112 if (rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE) != in rt2800usb_txstatus_pending()
113 rt2x00queue_get_entry(queue, Q_INDEX_DONE)) in rt2800usb_txstatus_pending()
[all …]
Drt2x00queue.h381 struct data_queue *queue; member
585 bool rt2x00queue_for_each_entry(struct data_queue *queue,
596 static inline int rt2x00queue_empty(struct data_queue *queue) in rt2x00queue_empty() argument
598 return queue->length == 0; in rt2x00queue_empty()
605 static inline int rt2x00queue_full(struct data_queue *queue) in rt2x00queue_full() argument
607 return queue->length == queue->limit; in rt2x00queue_full()
614 static inline int rt2x00queue_available(struct data_queue *queue) in rt2x00queue_available() argument
616 return queue->limit - queue->length; in rt2x00queue_available()
623 static inline int rt2x00queue_threshold(struct data_queue *queue) in rt2x00queue_threshold() argument
625 return rt2x00queue_available(queue) < queue->threshold; in rt2x00queue_threshold()
Drt2400pci.c635 static void rt2400pci_start_queue(struct data_queue *queue) in rt2400pci_start_queue() argument
637 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2400pci_start_queue()
640 switch (queue->qid) { in rt2400pci_start_queue()
658 static void rt2400pci_kick_queue(struct data_queue *queue) in rt2400pci_kick_queue() argument
660 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2400pci_kick_queue()
663 switch (queue->qid) { in rt2400pci_kick_queue()
684 static void rt2400pci_stop_queue(struct data_queue *queue) in rt2400pci_stop_queue() argument
686 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2400pci_stop_queue()
689 switch (queue->qid) { in rt2400pci_stop_queue()
727 if (entry->queue->qid == QID_RX) { in rt2400pci_get_entry_state()
[all …]
Drt2x00.h572 void (*start_queue) (struct data_queue *queue);
573 void (*kick_queue) (struct data_queue *queue);
574 void (*stop_queue) (struct data_queue *queue);
575 void (*flush_queue) (struct data_queue *queue, bool drop);
640 void (*queue_init)(struct data_queue *queue);
1284 const enum data_queue_qid queue) in rt2x00queue_get_tx_queue() argument
1286 if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx) in rt2x00queue_get_tx_queue()
1287 return &rt2x00dev->tx[queue]; in rt2x00queue_get_tx_queue()
1289 if (queue == QID_ATIM) in rt2x00queue_get_tx_queue()
1300 struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
[all …]
Drt2800mmio.h149 void rt2800mmio_start_queue(struct data_queue *queue);
150 void rt2800mmio_kick_queue(struct data_queue *queue);
151 void rt2800mmio_stop_queue(struct data_queue *queue);
152 void rt2800mmio_queue_init(struct data_queue *queue);
Drt2500pci.c294 struct data_queue *queue = rt2x00dev->bcn; in rt2500pci_config_intf() local
305 rt2x00_set_field32(&reg, BCNCSR1_BEACON_CWMIN, queue->cw_min); in rt2500pci_config_intf()
724 static void rt2500pci_start_queue(struct data_queue *queue) in rt2500pci_start_queue() argument
726 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2500pci_start_queue()
729 switch (queue->qid) { in rt2500pci_start_queue()
747 static void rt2500pci_kick_queue(struct data_queue *queue) in rt2500pci_kick_queue() argument
749 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2500pci_kick_queue()
752 switch (queue->qid) { in rt2500pci_kick_queue()
773 static void rt2500pci_stop_queue(struct data_queue *queue) in rt2500pci_stop_queue() argument
775 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2500pci_stop_queue()
[all …]
Drt2500usb.c737 static void rt2500usb_start_queue(struct data_queue *queue) in rt2500usb_start_queue() argument
739 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2500usb_start_queue()
742 switch (queue->qid) { in rt2500usb_start_queue()
760 static void rt2500usb_stop_queue(struct data_queue *queue) in rt2500usb_stop_queue() argument
762 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2500usb_stop_queue()
765 switch (queue->qid) { in rt2500usb_stop_queue()
1106 rt2x00_set_field32(&word, TXD_W1_AIFS, entry->queue->aifs); in rt2500usb_write_tx_desc()
1107 rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min); in rt2500usb_write_tx_desc()
1108 rt2x00_set_field32(&word, TXD_W1_CWMAX, entry->queue->cw_max); in rt2500usb_write_tx_desc()
1141 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; in rt2500usb_write_beacon()
[all …]
Drt61pci.c1134 static void rt61pci_start_queue(struct data_queue *queue) in rt61pci_start_queue() argument
1136 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt61pci_start_queue()
1139 switch (queue->qid) { in rt61pci_start_queue()
1157 static void rt61pci_kick_queue(struct data_queue *queue) in rt61pci_kick_queue() argument
1159 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt61pci_kick_queue()
1162 switch (queue->qid) { in rt61pci_kick_queue()
1188 static void rt61pci_stop_queue(struct data_queue *queue) in rt61pci_stop_queue() argument
1190 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt61pci_stop_queue()
1193 switch (queue->qid) { in rt61pci_stop_queue()
1385 if (entry->queue->qid == QID_RX) { in rt61pci_get_entry_state()
[all …]
Drt73usb.c1021 static void rt73usb_start_queue(struct data_queue *queue) in rt73usb_start_queue() argument
1023 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt73usb_start_queue()
1026 switch (queue->qid) { in rt73usb_start_queue()
1044 static void rt73usb_stop_queue(struct data_queue *queue) in rt73usb_stop_queue() argument
1046 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt73usb_stop_queue()
1049 switch (queue->qid) { in rt73usb_stop_queue()
1489 rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, entry->queue->qid); in rt73usb_write_tx_desc()
1490 rt2x00_set_field32(&word, TXD_W1_AIFSN, entry->queue->aifs); in rt73usb_write_tx_desc()
1491 rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min); in rt73usb_write_tx_desc()
1492 rt2x00_set_field32(&word, TXD_W1_CWMAX, entry->queue->cw_max); in rt73usb_write_tx_desc()
[all …]
Drt2x00debug.c199 dump_hdr->queue_index = skbdesc->entry->queue->qid; in rt2x00debug_dump_frame()
331 struct data_queue *queue; in rt2x00debug_read_queue_stats() local
348 queue_for_each(intf->rt2x00dev, queue) { in rt2x00debug_read_queue_stats()
349 spin_lock_irqsave(&queue->index_lock, irqflags); in rt2x00debug_read_queue_stats()
352 queue->qid, (unsigned int)queue->flags, in rt2x00debug_read_queue_stats()
353 queue->count, queue->limit, queue->length, in rt2x00debug_read_queue_stats()
354 queue->index[Q_INDEX], in rt2x00debug_read_queue_stats()
355 queue->index[Q_INDEX_DMA_DONE], in rt2x00debug_read_queue_stats()
356 queue->index[Q_INDEX_DONE]); in rt2x00debug_read_queue_stats()
358 spin_unlock_irqrestore(&queue->index_lock, irqflags); in rt2x00debug_read_queue_stats()
/linux-4.1.27/drivers/scsi/arm/
Dqueue.c58 int queue_initialise (Queue_t *queue) in queue_initialise() argument
63 spin_lock_init(&queue->queue_lock); in queue_initialise()
64 INIT_LIST_HEAD(&queue->head); in queue_initialise()
65 INIT_LIST_HEAD(&queue->free); in queue_initialise()
73 queue->alloc = q = kmalloc(sizeof(QE_t) * nqueues, GFP_KERNEL); in queue_initialise()
78 list_add(&q->list, &queue->free); in queue_initialise()
82 return queue->alloc != NULL; in queue_initialise()
90 void queue_free (Queue_t *queue) in queue_free() argument
92 if (!list_empty(&queue->head)) in queue_free()
93 printk(KERN_WARNING "freeing non-empty queue %p\n", queue); in queue_free()
[all …]
Dqueue.h25 extern int queue_initialise (Queue_t *queue);
32 extern void queue_free (Queue_t *queue);
40 extern struct scsi_cmnd *queue_remove (Queue_t *queue);
49 extern struct scsi_cmnd *queue_remove_exclude(Queue_t *queue,
52 #define queue_add_cmd_ordered(queue,SCpnt) \ argument
53 __queue_add(queue,SCpnt,(SCpnt)->cmnd[0] == REQUEST_SENSE)
54 #define queue_add_cmd_tail(queue,SCpnt) \ argument
55 __queue_add(queue,SCpnt,0)
64 extern int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head);
75 extern struct scsi_cmnd *queue_remove_tgtluntag(Queue_t *queue, int target,
[all …]
DMakefile7 obj-$(CONFIG_SCSI_ACORNSCSI_3) += acornscsi_mod.o queue.o msgqueue.o
8 obj-$(CONFIG_SCSI_ARXESCSI) += arxescsi.o fas216.o queue.o msgqueue.o
10 obj-$(CONFIG_SCSI_CUMANA_2) += cumana_2.o fas216.o queue.o msgqueue.o
12 obj-$(CONFIG_SCSI_POWERTECSCSI) += powertec.o fas216.o queue.o msgqueue.o
13 obj-$(CONFIG_SCSI_EESOXSCSI) += eesox.o fas216.o queue.o msgqueue.o
/linux-4.1.27/net/sunrpc/
Dsched.c65 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) in __rpc_disable_timer() argument
72 if (list_empty(&queue->timer_list.list)) in __rpc_disable_timer()
73 del_timer(&queue->timer_list.timer); in __rpc_disable_timer()
77 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) in rpc_set_queue_timer() argument
79 queue->timer_list.expires = expires; in rpc_set_queue_timer()
80 mod_timer(&queue->timer_list.timer, expires); in rpc_set_queue_timer()
87 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task) in __rpc_add_timer() argument
96 …if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.… in __rpc_add_timer()
97 rpc_set_queue_timer(queue, task->u.tk_wait.expires); in __rpc_add_timer()
98 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); in __rpc_add_timer()
[all …]
/linux-4.1.27/drivers/net/ethernet/ibm/ehea/
Dehea_qmr.h210 static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset) in hw_qeit_calc() argument
214 if (q_offset >= queue->queue_length) in hw_qeit_calc()
215 q_offset -= queue->queue_length; in hw_qeit_calc()
216 current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT]; in hw_qeit_calc()
220 static inline void *hw_qeit_get(struct hw_queue *queue) in hw_qeit_get() argument
222 return hw_qeit_calc(queue, queue->current_q_offset); in hw_qeit_get()
225 static inline void hw_qeit_inc(struct hw_queue *queue) in hw_qeit_inc() argument
227 queue->current_q_offset += queue->qe_size; in hw_qeit_inc()
228 if (queue->current_q_offset >= queue->queue_length) { in hw_qeit_inc()
229 queue->current_q_offset = 0; in hw_qeit_inc()
[all …]
Dehea_qmr.c39 static void *hw_qpageit_get_inc(struct hw_queue *queue) in hw_qpageit_get_inc() argument
41 void *retvalue = hw_qeit_get(queue); in hw_qpageit_get_inc()
43 queue->current_q_offset += queue->pagesize; in hw_qpageit_get_inc()
44 if (queue->current_q_offset > queue->queue_length) { in hw_qpageit_get_inc()
45 queue->current_q_offset -= queue->pagesize; in hw_qpageit_get_inc()
54 static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages, in hw_queue_ctor() argument
66 queue->queue_length = nr_of_pages * pagesize; in hw_queue_ctor()
67 queue->queue_pages = kmalloc_array(nr_of_pages, sizeof(void *), in hw_queue_ctor()
69 if (!queue->queue_pages) in hw_queue_ctor()
83 (queue->queue_pages)[i] = (struct ehea_page *)kpage; in hw_queue_ctor()
[all …]
/linux-4.1.27/drivers/net/wireless/ath/ath5k/
Dqcu.c63 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue) in ath5k_hw_num_tx_pending() argument
66 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); in ath5k_hw_num_tx_pending()
69 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) in ath5k_hw_num_tx_pending()
76 pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue)); in ath5k_hw_num_tx_pending()
82 if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue)) in ath5k_hw_num_tx_pending()
94 ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue) in ath5k_hw_release_tx_queue() argument
96 if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num)) in ath5k_hw_release_tx_queue()
100 ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE; in ath5k_hw_release_tx_queue()
102 AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue); in ath5k_hw_release_tx_queue()
138 ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, in ath5k_hw_get_tx_queueprops() argument
[all …]
Ddma.c130 ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue) in ath5k_hw_start_tx_dma() argument
134 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); in ath5k_hw_start_tx_dma()
137 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) in ath5k_hw_start_tx_dma()
146 switch (ah->ah_txq[queue].tqi_type) { in ath5k_hw_start_tx_dma()
168 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue)) in ath5k_hw_start_tx_dma()
172 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue); in ath5k_hw_start_tx_dma()
188 ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue) in ath5k_hw_stop_tx_dma() argument
193 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); in ath5k_hw_stop_tx_dma()
196 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) in ath5k_hw_stop_tx_dma()
205 switch (ah->ah_txq[queue].tqi_type) { in ath5k_hw_stop_tx_dma()
[all …]
/linux-4.1.27/include/net/
Drequest_sock.h193 int reqsk_queue_alloc(struct request_sock_queue *queue,
196 void __reqsk_queue_destroy(struct request_sock_queue *queue);
197 void reqsk_queue_destroy(struct request_sock_queue *queue);
202 reqsk_queue_yank_acceptq(struct request_sock_queue *queue) in reqsk_queue_yank_acceptq() argument
204 struct request_sock *req = queue->rskq_accept_head; in reqsk_queue_yank_acceptq()
206 queue->rskq_accept_head = NULL; in reqsk_queue_yank_acceptq()
210 static inline int reqsk_queue_empty(struct request_sock_queue *queue) in reqsk_queue_empty() argument
212 return queue->rskq_accept_head == NULL; in reqsk_queue_empty()
215 static inline void reqsk_queue_add(struct request_sock_queue *queue, in reqsk_queue_add() argument
223 if (queue->rskq_accept_head == NULL) in reqsk_queue_add()
[all …]
/linux-4.1.27/arch/mips/cavium-octeon/executive/
Dcvmx-pko.c162 int queue; in cvmx_pko_shutdown() local
166 for (queue = 0; queue < CVMX_PKO_MAX_OUTPUT_QUEUES; queue++) { in cvmx_pko_shutdown()
171 config.s.queue = queue & 0x7f; in cvmx_pko_shutdown()
177 config1.s.qid7 = queue >> 7; in cvmx_pko_shutdown()
181 cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_PKO(queue)); in cvmx_pko_shutdown()
209 uint64_t queue; in cvmx_pko_config_port() local
235 for (queue = 0; queue < num_queues; queue++) { in cvmx_pko_config_port()
238 && priority[queue] == in cvmx_pko_config_port()
240 static_priority_base = queue; in cvmx_pko_config_port()
244 && priority[queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY in cvmx_pko_config_port()
[all …]
Dcvmx-helper-util.c182 int cvmx_helper_setup_red_queue(int queue, int pass_thresh, int drop_thresh) in cvmx_helper_setup_red_queue() argument
193 cvmx_write_csr(CVMX_IPD_QOSX_RED_MARKS(queue), red_marks.u64); in cvmx_helper_setup_red_queue()
202 cvmx_write_csr(CVMX_IPD_RED_QUEX_PARAM(queue), red_param.u64); in cvmx_helper_setup_red_queue()
222 int queue; in cvmx_helper_setup_red() local
237 for (queue = 0; queue < 8; queue++) in cvmx_helper_setup_red()
238 cvmx_helper_setup_red_queue(queue, pass_thresh, drop_thresh); in cvmx_helper_setup_red()
/linux-4.1.27/net/core/
Drequest_sock.c40 int reqsk_queue_alloc(struct request_sock_queue *queue, in reqsk_queue_alloc() argument
61 spin_lock_init(&queue->syn_wait_lock); in reqsk_queue_alloc()
62 queue->rskq_accept_head = NULL; in reqsk_queue_alloc()
66 spin_lock_bh(&queue->syn_wait_lock); in reqsk_queue_alloc()
67 queue->listen_opt = lopt; in reqsk_queue_alloc()
68 spin_unlock_bh(&queue->syn_wait_lock); in reqsk_queue_alloc()
73 void __reqsk_queue_destroy(struct request_sock_queue *queue) in __reqsk_queue_destroy() argument
76 kvfree(queue->listen_opt); in __reqsk_queue_destroy()
80 struct request_sock_queue *queue) in reqsk_queue_yank_listen_sk() argument
84 spin_lock_bh(&queue->syn_wait_lock); in reqsk_queue_yank_listen_sk()
[all …]
Dnet-sysfs.c618 struct netdev_rx_queue *queue = to_rx_queue(kobj); in rx_queue_attr_show() local
623 return attribute->show(queue, attribute, buf); in rx_queue_attr_show()
630 struct netdev_rx_queue *queue = to_rx_queue(kobj); in rx_queue_attr_store() local
635 return attribute->store(queue, attribute, buf, count); in rx_queue_attr_store()
644 static ssize_t show_rps_map(struct netdev_rx_queue *queue, in show_rps_map() argument
655 map = rcu_dereference(queue->rps_map); in show_rps_map()
667 static ssize_t store_rps_map(struct netdev_rx_queue *queue, in store_rps_map() argument
708 old_map = rcu_dereference_protected(queue->rps_map, in store_rps_map()
710 rcu_assign_pointer(queue->rps_map, map); in store_rps_map()
723 static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, in show_rps_dev_flow_table_cnt() argument
[all …]
/linux-4.1.27/net/netfilter/
Dnfnetlink_queue_core.c158 static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
188 __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) in __enqueue_entry() argument
190 list_add_tail(&entry->list, &queue->queue_list); in __enqueue_entry()
191 queue->queue_total++; in __enqueue_entry()
195 __dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) in __dequeue_entry() argument
198 queue->queue_total--; in __dequeue_entry()
202 find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id) in find_dequeue_entry() argument
206 spin_lock_bh(&queue->lock); in find_dequeue_entry()
208 list_for_each_entry(i, &queue->queue_list, list) { in find_dequeue_entry()
216 __dequeue_entry(queue, entry); in find_dequeue_entry()
[all …]
Dxt_NFQUEUE.c42 u32 queue = info->queuenum; in nfqueue_tg_v1() local
45 queue = nfqueue_hash(skb, queue, info->queues_total, in nfqueue_tg_v1()
48 return NF_QUEUE_NR(queue); in nfqueue_tg_v1()
91 u32 queue = info->queuenum; in nfqueue_tg_v3() local
98 queue = info->queuenum + cpu % info->queues_total; in nfqueue_tg_v3()
100 queue = nfqueue_hash(skb, queue, info->queues_total, in nfqueue_tg_v3()
105 ret = NF_QUEUE_NR(queue); in nfqueue_tg_v3()
Dnft_queue.c35 u32 queue = priv->queuenum; in nft_queue_eval() local
42 queue = priv->queuenum + cpu % priv->queues_total; in nft_queue_eval()
44 queue = nfqueue_hash(pkt->skb, queue, in nft_queue_eval()
50 ret = NF_QUEUE_NR(queue); in nft_queue_eval()
/linux-4.1.27/arch/arm/mach-ixp4xx/
Dixp4xx_qmgr.c28 void qmgr_set_irq(unsigned int queue, int src, in qmgr_set_irq() argument
34 if (queue < HALF_QUEUES) { in qmgr_set_irq()
38 reg = &qmgr_regs->irqsrc[queue >> 3]; /* 8 queues per u32 */ in qmgr_set_irq()
39 bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */ in qmgr_set_irq()
46 irq_handlers[queue] = handler; in qmgr_set_irq()
47 irq_pdevs[queue] = pdev; in qmgr_set_irq()
115 void qmgr_enable_irq(unsigned int queue) in qmgr_enable_irq() argument
118 int half = queue / 32; in qmgr_enable_irq()
119 u32 mask = 1 << (queue & (HALF_QUEUES - 1)); in qmgr_enable_irq()
127 void qmgr_disable_irq(unsigned int queue) in qmgr_disable_irq() argument
[all …]
/linux-4.1.27/net/sctp/
Dinqueue.c47 void sctp_inq_init(struct sctp_inq *queue) in sctp_inq_init() argument
49 INIT_LIST_HEAD(&queue->in_chunk_list); in sctp_inq_init()
50 queue->in_progress = NULL; in sctp_inq_init()
53 INIT_WORK(&queue->immediate, NULL); in sctp_inq_init()
57 void sctp_inq_free(struct sctp_inq *queue) in sctp_inq_free() argument
62 list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) { in sctp_inq_free()
70 if (queue->in_progress) { in sctp_inq_free()
71 sctp_chunk_free(queue->in_progress); in sctp_inq_free()
72 queue->in_progress = NULL; in sctp_inq_free()
99 struct sctp_chunkhdr *sctp_inq_peek(struct sctp_inq *queue) in sctp_inq_peek() argument
[all …]
/linux-4.1.27/sound/core/seq/
Dseq_queue.c72 q->queue = i; in queue_list_add()
121 q->queue = -1; in queue_new()
199 snd_seq_queue_use(q->queue, client, 1); /* use this queue */ in snd_seq_queue_alloc()
200 return q->queue; in snd_seq_queue_alloc()
320 dest = cell->event.queue; /* destination queue */ in snd_seq_enqueue_event()
449 struct snd_seq_queue *queue; in snd_seq_queue_timer_open() local
452 queue = queueptr(queueid); in snd_seq_queue_timer_open()
453 if (queue == NULL) in snd_seq_queue_timer_open()
455 tmr = queue->timer; in snd_seq_queue_timer_open()
456 if ((result = snd_seq_timer_open(queue)) < 0) { in snd_seq_queue_timer_open()
[all …]
Dseq_clientmgr.c545 bounce_ev.queue = SNDRV_SEQ_QUEUE_DIRECT; in bounce_error_event()
569 int queue, int real_time) in update_timestamp_of_queue() argument
573 q = queueptr(queue); in update_timestamp_of_queue()
576 event->queue = queue; in update_timestamp_of_queue()
687 update_timestamp_of_queue(event, subs->info.queue, in deliver_to_subscribers()
818 if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS || in snd_seq_deliver_event()
822 else if (event->queue == SNDRV_SEQ_ADDRESS_BROADCAST || in snd_seq_deliver_event()
927 if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) { in snd_seq_client_enqueue_event()
929 event->queue = SNDRV_SEQ_QUEUE_DIRECT; in snd_seq_client_enqueue_event()
932 if (event->queue == SNDRV_SEQ_ADDRESS_BROADCAST) { in snd_seq_client_enqueue_event()
[all …]
/linux-4.1.27/net/irda/
Dirqueue.c233 static void enqueue_first(irda_queue_t **queue, irda_queue_t* element) in enqueue_first() argument
239 if ( *queue == NULL ) { in enqueue_first()
243 element->q_next = element->q_prev = *queue = element; in enqueue_first()
249 element->q_next = (*queue); in enqueue_first()
250 (*queue)->q_prev->q_next = element; in enqueue_first()
251 element->q_prev = (*queue)->q_prev; in enqueue_first()
252 (*queue)->q_prev = element; in enqueue_first()
253 (*queue) = element; in enqueue_first()
264 static irda_queue_t *dequeue_first(irda_queue_t **queue) in dequeue_first() argument
273 ret = *queue; in dequeue_first()
[all …]
/linux-4.1.27/drivers/gpu/drm/amd/amdkfd/
Dkfd_kernel_queue.c124 if (init_queue(&kq->queue, prop) != 0) in initialize()
127 kq->queue->device = dev; in initialize()
128 kq->queue->process = kfd_get_process(current); in initialize()
130 retval = kq->mqd->init_mqd(kq->mqd, &kq->queue->mqd, in initialize()
131 &kq->queue->mqd_mem_obj, in initialize()
132 &kq->queue->gart_mqd_addr, in initialize()
133 &kq->queue->properties); in initialize()
140 kq->queue->pipe = KFD_CIK_HIQ_PIPE; in initialize()
141 kq->queue->queue = KFD_CIK_HIQ_QUEUE; in initialize()
142 kq->mqd->load_mqd(kq->mqd, kq->queue->mqd, kq->queue->pipe, in initialize()
[all …]
Dkfd_queue.c45 void print_queue(struct queue *q) in print_queue()
66 int init_queue(struct queue **q, struct queue_properties properties) in init_queue()
68 struct queue *tmp; in init_queue()
72 tmp = kzalloc(sizeof(struct queue), GFP_KERNEL); in init_queue()
82 void uninit_queue(struct queue *q) in uninit_queue()
Dkfd_device_queue_manager.c44 struct queue *q,
51 struct queue *q,
95 struct queue *q) in allocate_vmid()
119 struct queue *q) in deallocate_vmid()
132 struct queue *q, in create_queue_nocpsch()
195 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q) in allocate_hqd()
212 q->queue = bit; in allocate_hqd()
222 __func__, q->pipe, q->queue); in allocate_hqd()
230 struct queue *q) in deallocate_hqd()
232 set_bit(q->queue, (unsigned long *)&dqm->allocated_queues[q->pipe]); in deallocate_hqd()
[all …]
Dkfd_device_queue_manager.h88 struct queue *q,
93 struct queue *q);
95 struct queue *q);
/linux-4.1.27/drivers/soc/ti/
Dknav_qmss_acc.c45 int range_base, queue; in __knav_acc_notify() local
50 for (queue = 0; queue < range->num_queues; queue++) { in __knav_acc_notify()
52 queue); in __knav_acc_notify()
56 range_base + queue); in __knav_acc_notify()
61 queue = acc->channel - range->acc_info.start_channel; in __knav_acc_notify()
62 inst = knav_range_offset_to_inst(kdev, range, queue); in __knav_acc_notify()
64 range_base + queue); in __knav_acc_notify()
104 int range_base, channel, queue = 0; in knav_acc_int_handler() local
115 for (queue = 0; queue < range->num_irqs; queue++) in knav_acc_int_handler()
116 if (range->irqs[queue].irq == irq) in knav_acc_int_handler()
[all …]
/linux-4.1.27/Documentation/ABI/testing/
Dsysfs-class-net-queues1 What: /sys/class/<iface>/queues/rx-<queue>/rps_cpus
8 network device queue. Possible values depend on the number
11 What: /sys/class/<iface>/queues/rx-<queue>/rps_flow_cnt
17 processed by this particular network device receive queue.
19 What: /sys/class/<iface>/queues/tx-<queue>/tx_timeout
25 network interface transmit queue.
27 What: /sys/class/<iface>/queues/tx-<queue>/tx_maxrate
32 A Mbps max-rate set for the queue, a value of zero means disabled,
35 What: /sys/class/<iface>/queues/tx-<queue>/xps_cpus
42 network device transmit queue. Possible vaules depend on the
[all …]
Dsysfs-class-pktcdvd43 queue.
45 congestion_off (0644) If bio write queue size is below
49 congestion_on (0644) If bio write queue size is higher
54 so that bio write queue size is
/linux-4.1.27/drivers/staging/rtl8723au/os_dep/
Dxmit_linux.c63 u16 queue; in rtw_os_pkt_complete23a() local
65 queue = skb_get_queue_mapping(pkt); in rtw_os_pkt_complete23a()
67 if (__netif_subqueue_stopped(padapter->pnetdev, queue) && in rtw_os_pkt_complete23a()
68 (pxmitpriv->hwxmits[queue].accnt < WMM_XMIT_THRESHOLD)) in rtw_os_pkt_complete23a()
69 netif_wake_subqueue(padapter->pnetdev, queue); in rtw_os_pkt_complete23a()
71 if (__netif_subqueue_stopped(padapter->pnetdev, queue)) in rtw_os_pkt_complete23a()
72 netif_wake_subqueue(padapter->pnetdev, queue); in rtw_os_pkt_complete23a()
105 u16 queue; in rtw_check_xmit_resource() local
107 queue = skb_get_queue_mapping(pkt); in rtw_check_xmit_resource()
110 if (pxmitpriv->hwxmits[queue].accnt > WMM_XMIT_THRESHOLD) in rtw_check_xmit_resource()
[all …]
/linux-4.1.27/drivers/isdn/i4l/
Disdn_net.h85 lp = nd->queue; /* get lp on top of queue */ in isdn_net_get_locked_lp()
86 while (isdn_net_lp_busy(nd->queue)) { in isdn_net_get_locked_lp()
87 nd->queue = nd->queue->next; in isdn_net_get_locked_lp()
88 if (nd->queue == lp) { /* not found -- should never happen */ in isdn_net_get_locked_lp()
93 lp = nd->queue; in isdn_net_get_locked_lp()
94 nd->queue = nd->queue->next; in isdn_net_get_locked_lp()
114 lp = nd->queue; in isdn_net_add_to_bundle()
121 nd->queue = nlp; in isdn_net_add_to_bundle()
141 if (master_lp->netdev->queue == lp) { in isdn_net_rm_from_bundle()
142 master_lp->netdev->queue = lp->next; in isdn_net_rm_from_bundle()
[all …]
/linux-4.1.27/drivers/block/rsxx/
Ddev.c262 card->queue = blk_alloc_queue(GFP_KERNEL); in rsxx_setup_dev()
263 if (!card->queue) { in rsxx_setup_dev()
272 blk_cleanup_queue(card->queue); in rsxx_setup_dev()
279 blk_queue_dma_alignment(card->queue, blk_size - 1); in rsxx_setup_dev()
280 blk_queue_logical_block_size(card->queue, blk_size); in rsxx_setup_dev()
283 blk_queue_make_request(card->queue, rsxx_make_request); in rsxx_setup_dev()
284 blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY); in rsxx_setup_dev()
285 blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors); in rsxx_setup_dev()
286 blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE); in rsxx_setup_dev()
288 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue); in rsxx_setup_dev()
[all …]
/linux-4.1.27/drivers/net/fddi/skfp/
Dhwmtm.c85 static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue);
86 static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue);
358 struct s_smt_tx_queue *queue ; in init_txd_ring() local
366 queue = smc->hw.fp.tx[QUEUE_A0] ; in init_txd_ring()
372 queue->tx_curr_put = queue->tx_curr_get = ds ; in init_txd_ring()
374 queue->tx_free = HWM_ASYNC_TXD_COUNT ; in init_txd_ring()
375 queue->tx_used = 0 ; in init_txd_ring()
380 queue = smc->hw.fp.tx[QUEUE_S] ; in init_txd_ring()
386 queue->tx_curr_put = queue->tx_curr_get = ds ; in init_txd_ring()
387 queue->tx_free = HWM_SYNC_TXD_COUNT ; in init_txd_ring()
[all …]
/linux-4.1.27/drivers/gpu/drm/ttm/
Dttm_lock.c48 init_waitqueue_head(&lock->queue); in ttm_lock_init()
60 wake_up_all(&lock->queue); in ttm_read_unlock()
88 ret = wait_event_interruptible(lock->queue, in ttm_read_lock()
91 wait_event(lock->queue, __ttm_read_lock(lock)); in ttm_read_lock()
127 (lock->queue, __ttm_read_trylock(lock, &locked)); in ttm_read_trylock()
129 wait_event(lock->queue, __ttm_read_trylock(lock, &locked)); in ttm_read_trylock()
143 wake_up_all(&lock->queue); in ttm_write_unlock()
174 ret = wait_event_interruptible(lock->queue, in ttm_write_lock()
179 wake_up_all(&lock->queue); in ttm_write_lock()
183 wait_event(lock->queue, __ttm_read_lock(lock)); in ttm_write_lock()
[all …]
/linux-4.1.27/drivers/net/ethernet/cadence/
Dmacb.c69 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue, in macb_tx_desc() argument
72 return &queue->tx_ring[macb_tx_ring_wrap(index)]; in macb_tx_desc()
75 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue, in macb_tx_skb() argument
78 return &queue->tx_skb[macb_tx_ring_wrap(index)]; in macb_tx_skb()
81 static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index) in macb_tx_dma() argument
87 return queue->tx_ring_dma + offset; in macb_tx_dma()
500 struct macb_queue *queue = container_of(work, struct macb_queue, in macb_tx_error_task() local
502 struct macb *bp = queue->bp; in macb_tx_error_task()
510 (unsigned int)(queue - bp->queues), in macb_tx_error_task()
511 queue->tx_tail, queue->tx_head); in macb_tx_error_task()
[all …]
/linux-4.1.27/drivers/staging/unisys/visorchannel/
Dvisorchannel_funcs.c309 #define SIG_WRITE_FIELD(channel, queue, sig_hdr, FIELD) \ argument
311 SIG_QUEUE_OFFSET(&channel->chan_hdr, queue)+ \
317 sig_read_header(struct visorchannel *channel, u32 queue, in sig_read_header() argument
328 SIG_QUEUE_OFFSET(&channel->chan_hdr, queue), in sig_read_header()
339 sig_do_data(struct visorchannel *channel, u32 queue, in sig_do_data() argument
344 int signal_data_offset = SIG_DATA_OFFSET(&channel->chan_hdr, queue, in sig_do_data()
364 sig_read_data(struct visorchannel *channel, u32 queue, in sig_read_data() argument
367 return sig_do_data(channel, queue, sig_hdr, slot, data, FALSE); in sig_read_data()
371 sig_write_data(struct visorchannel *channel, u32 queue, in sig_write_data() argument
374 return sig_do_data(channel, queue, sig_hdr, slot, data, TRUE); in sig_write_data()
[all …]
Dvisorchannel.h55 BOOL visorchannel_signalremove(struct visorchannel *channel, u32 queue,
57 BOOL visorchannel_signalinsert(struct visorchannel *channel, u32 queue,
60 u32 queue);
61 int visorchannel_signalqueue_max_slots(struct visorchannel *channel, u32 queue);
/linux-4.1.27/Documentation/block/
Dnull_blk.txt10 Single-queue block-layer
12 - Single submission queue per device.
14 Multi-queue block-layer
21 All of them have a completion queue for each core in the system.
25 queue_mode=[0-2]: Default: 2-Multi-queue
29 1: Single-queue.
30 2: Multi-queue.
60 defaults to 1 on single-queue and bio-based instances. For multi-queue,
64 The hardware queue depth of the device.
66 III: Multi-queue specific parameters
[all …]
Dswitching-sched.txt5 Each io queue has a set of io scheduler tunables associated with it. These
9 /sys/block/<device>/queue/iosched
24 echo SCHEDNAME > /sys/block/DEV/queue/scheduler
30 a "cat /sys/block/DEV/queue/scheduler" - the list of valid names
33 # cat /sys/block/hda/queue/scheduler
35 # echo deadline > /sys/block/hda/queue/scheduler
36 # cat /sys/block/hda/queue/scheduler
Dcfq-iosched.txt7 CFQ maintains the per process queue for the processes which request I/O
19 queue is expired and CFQ selects next queue to dispatch from.
72 queue level. This was introduced after a bottleneck was observed
73 in higher end storage due to idle on sequential queue and allow dispatch
74 from a single queue. The idea with this parameter is that it can be run with
101 time for each process to issue I/O request before the cfq queue is switched.
112 This parameter is same as of slice_sync but for asynchronous queue. The
118 device request queue in queue's slice time. The maximum number of request that
124 When a queue is selected for execution, the queues IO requests are only
126 queue. This parameter is used to calculate the time slice of synchronous
[all …]
Dbiodoc.txt47 - Per-queue parameters
112 Tuning at a per-queue level:
114 i. Per-queue limits/values exported to the generic layer by the driver
117 a per-queue level (e.g maximum request size, maximum number of segments in
121 major/minor are now directly associated with the queue. Some of these may
123 have been incorporated into a queue flags field rather than separate fields
127 Some new queue property settings:
136 - The request queue's max_sectors, which is a soft size in
140 - The request queue's max_hw_sectors, which is a hard limit
163 New queue flags:
[all …]
/linux-4.1.27/arch/m68k/emu/
Dnfblock.c58 struct request_queue *queue; member
62 static void nfhd_make_request(struct request_queue *queue, struct bio *bio) in nfhd_make_request() argument
64 struct nfhd_device *dev = queue->queuedata; in nfhd_make_request()
120 dev->queue = blk_alloc_queue(GFP_KERNEL); in nfhd_init_one()
121 if (dev->queue == NULL) in nfhd_init_one()
124 dev->queue->queuedata = dev; in nfhd_init_one()
125 blk_queue_make_request(dev->queue, nfhd_make_request); in nfhd_init_one()
126 blk_queue_logical_block_size(dev->queue, bsize); in nfhd_init_one()
138 dev->disk->queue = dev->queue; in nfhd_init_one()
147 blk_cleanup_queue(dev->queue); in nfhd_init_one()
[all …]
/linux-4.1.27/drivers/mmc/card/
Dqueue.c52 struct request_queue *q = mq->queue; in mmc_queue_thread()
203 mq->queue = blk_init_queue(mmc_request_fn, lock); in mmc_init_queue()
204 if (!mq->queue) in mmc_init_queue()
209 mq->queue->queuedata = mq; in mmc_init_queue()
211 blk_queue_prep_rq(mq->queue, mmc_prep_request); in mmc_init_queue()
212 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); in mmc_init_queue()
213 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue); in mmc_init_queue()
215 mmc_queue_setup_discard(mq->queue, card); in mmc_init_queue()
248 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); in mmc_init_queue()
249 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); in mmc_init_queue()
[all …]
Dblock.c98 struct mmc_queue queue; member
186 blk_cleanup_queue(md->queue.queue); in mmc_blk_put()
201 struct mmc_card *card = md->queue.card; in power_ro_lock_show()
231 card = md->queue.card; in power_ro_lock_store()
482 card = md->queue.card; in mmc_blk_ioctl_cmd()
1049 struct mmc_card *card = md->queue.card; in mmc_blk_issue_discard_rq()
1093 struct mmc_card *card = md->queue.card; in mmc_blk_issue_secdiscard_rq()
1159 struct mmc_card *card = md->queue.card; in mmc_blk_issue_flush()
1539 struct request_queue *q = mq->queue; in mmc_blk_prep_packed_list()
1810 struct request_queue *q = mq->queue; in mmc_blk_revert_packed_req()
[all …]
/linux-4.1.27/scripts/
Dheaderdep.pl88 my @queue = @_;
89 while(@queue) {
90 my $header = pop @queue;
105 push @queue, $dep;
142 my @queue = map { [[0, $_]] } @_;
143 while(@queue) {
144 my $top = pop @queue;
158 push @queue, $chain;
/linux-4.1.27/drivers/staging/rtl8188eu/os_dep/
Dxmit_linux.c108 u16 queue; in rtw_os_pkt_complete() local
111 queue = skb_get_queue_mapping(pkt); in rtw_os_pkt_complete()
113 if (__netif_subqueue_stopped(padapter->pnetdev, queue) && in rtw_os_pkt_complete()
114 (pxmitpriv->hwxmits[queue].accnt < WMM_XMIT_THRESHOLD)) in rtw_os_pkt_complete()
115 netif_wake_subqueue(padapter->pnetdev, queue); in rtw_os_pkt_complete()
117 if (__netif_subqueue_stopped(padapter->pnetdev, queue)) in rtw_os_pkt_complete()
118 netif_wake_subqueue(padapter->pnetdev, queue); in rtw_os_pkt_complete()
151 u16 queue; in rtw_check_xmit_resource() local
153 queue = skb_get_queue_mapping(pkt); in rtw_check_xmit_resource()
156 if (pxmitpriv->hwxmits[queue].accnt > WMM_XMIT_THRESHOLD) in rtw_check_xmit_resource()
[all …]
/linux-4.1.27/crypto/
Dcryptd.c37 struct crypto_queue queue; member
47 struct cryptd_queue *queue; member
52 struct cryptd_queue *queue; member
57 struct cryptd_queue *queue; member
87 static int cryptd_init_queue(struct cryptd_queue *queue, in cryptd_init_queue() argument
93 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue); in cryptd_init_queue()
94 if (!queue->cpu_queue) in cryptd_init_queue()
97 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); in cryptd_init_queue()
98 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); in cryptd_init_queue()
104 static void cryptd_fini_queue(struct cryptd_queue *queue) in cryptd_fini_queue() argument
[all …]
Dmcryptd.c45 struct mcryptd_queue *queue; member
68 static int mcryptd_init_queue(struct mcryptd_queue *queue, in mcryptd_init_queue() argument
74 queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue); in mcryptd_init_queue()
75 pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue); in mcryptd_init_queue()
76 if (!queue->cpu_queue) in mcryptd_init_queue()
79 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); in mcryptd_init_queue()
80 pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue); in mcryptd_init_queue()
81 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); in mcryptd_init_queue()
87 static void mcryptd_fini_queue(struct mcryptd_queue *queue) in mcryptd_fini_queue() argument
93 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); in mcryptd_fini_queue()
[all …]
Dalgapi.c851 void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen) in crypto_init_queue() argument
853 INIT_LIST_HEAD(&queue->list); in crypto_init_queue()
854 queue->backlog = &queue->list; in crypto_init_queue()
855 queue->qlen = 0; in crypto_init_queue()
856 queue->max_qlen = max_qlen; in crypto_init_queue()
860 int crypto_enqueue_request(struct crypto_queue *queue, in crypto_enqueue_request() argument
865 if (unlikely(queue->qlen >= queue->max_qlen)) { in crypto_enqueue_request()
869 if (queue->backlog == &queue->list) in crypto_enqueue_request()
870 queue->backlog = &request->list; in crypto_enqueue_request()
873 queue->qlen++; in crypto_enqueue_request()
[all …]
Dchainiv.c42 struct crypto_queue queue; member
128 if (!ctx->queue.qlen) { in async_chainiv_schedule_work()
132 if (!ctx->queue.qlen || in async_chainiv_schedule_work()
151 err = skcipher_enqueue_givcrypt(&ctx->queue, req); in async_chainiv_postpone_request()
197 if (ctx->queue.qlen) { in async_chainiv_givencrypt()
246 req = skcipher_dequeue_givcrypt(&ctx->queue); in async_chainiv_do_postponed()
270 crypto_init_queue(&ctx->queue, 100); in async_chainiv_init()
280 BUG_ON(test_bit(CHAINIV_STATE_INUSE, &ctx->state) || ctx->queue.qlen); in async_chainiv_exit()
/linux-4.1.27/block/
Dnoop-iosched.c12 struct list_head queue; member
25 if (!list_empty(&nd->queue)) { in noop_dispatch()
27 rq = list_entry(nd->queue.next, struct request, queuelist); in noop_dispatch()
39 list_add_tail(&rq->queuelist, &nd->queue); in noop_add_request()
47 if (rq->queuelist.prev == &nd->queue) in noop_former_request()
57 if (rq->queuelist.next == &nd->queue) in noop_latter_request()
78 INIT_LIST_HEAD(&nd->queue); in noop_init_queue()
90 BUG_ON(!list_empty(&nd->queue)); in noop_exit_queue()
Dblk-mq-cpumap.c36 unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling; in blk_mq_update_queue_map() local
52 queue = 0; in blk_mq_update_queue_map()
65 map[i] = cpu_to_queue_index(nr_cpus, nr_queues, queue); in blk_mq_update_queue_map()
66 queue++; in blk_mq_update_queue_map()
78 queue); in blk_mq_update_queue_map()
79 queue++; in blk_mq_update_queue_map()
/linux-4.1.27/drivers/net/ethernet/marvell/
Dmvneta.c745 int queue; in mvneta_port_up() local
751 for (queue = 0; queue < txq_number; queue++) { in mvneta_port_up()
752 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_port_up()
754 q_map |= (1 << queue); in mvneta_port_up()
760 for (queue = 0; queue < rxq_number; queue++) { in mvneta_port_up()
761 struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; in mvneta_port_up()
763 q_map |= (1 << queue); in mvneta_port_up()
867 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue) in mvneta_set_ucast_table() argument
872 if (queue == -1) { in mvneta_set_ucast_table()
875 val = 0x1 | (queue << 1); in mvneta_set_ucast_table()
[all …]
Dmvpp2.c3920 int tx_port_num, val, queue, ptxq, lrxq; in mvpp2_defaults_set() local
3940 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) { in mvpp2_defaults_set()
3941 ptxq = mvpp2_txq_phys(port->id, queue); in mvpp2_defaults_set()
3966 queue = port->rxqs[lrxq]->id; in mvpp2_defaults_set()
3967 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); in mvpp2_defaults_set()
3970 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); in mvpp2_defaults_set()
3981 int lrxq, queue; in mvpp2_ingress_enable() local
3984 queue = port->rxqs[lrxq]->id; in mvpp2_ingress_enable()
3985 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); in mvpp2_ingress_enable()
3987 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); in mvpp2_ingress_enable()
[all …]
/linux-4.1.27/Documentation/devicetree/bindings/soc/ti/
Dkeystone-navigator-qmss.txt5 multi-core Navigator. QMSS consist of queue managers, packed-data structure
15 queue pool management (allocation, push, pop and notify) and descriptor
22 - queue-range : <start number> total range of queue numbers for the device.
28 - qmgrs : child node describing the individual queue managers on the
31 -- managed-queues : the actual queues managed by each queue manager
32 instance, specified as <"base queue #" "# of queues">.
40 - Queue Management/Queue Proxy region for queue Push.
41 - Queue Management/Queue Proxy region for queue Pop.
42 - queue-pools : child node classifying the queue ranges into pools.
50 -- qrange : number of queues to use per queue range, specified as
[all …]
/linux-4.1.27/arch/mips/include/asm/octeon/
Dcvmx-pko.h152 uint64_t queue:9; member
157 uint64_t queue:9;
326 static inline void cvmx_pko_doorbell(uint64_t port, uint64_t queue, in cvmx_pko_doorbell() argument
336 ptr.s.queue = queue; in cvmx_pko_doorbell()
378 static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue, in cvmx_pko_send_packet_prepare() argument
396 (CVMX_TAG_SUBGROUP_MASK & queue); in cvmx_pko_send_packet_prepare()
421 uint64_t queue, in cvmx_pko_send_packet_finish() argument
429 result = cvmx_cmd_queue_write2(CVMX_CMD_QUEUE_PKO(queue), in cvmx_pko_send_packet_finish()
433 cvmx_pko_doorbell(port, queue, 2); in cvmx_pko_send_packet_finish()
464 uint64_t queue, in cvmx_pko_send_packet_finish3() argument
[all …]
/linux-4.1.27/drivers/scsi/aacraid/
Dcomminit.c298 comm->queue[HostNormCmdQueue].base = queues; in aac_comm_init()
299 aac_queue_init(dev, &comm->queue[HostNormCmdQueue], headers, HOST_NORM_CMD_ENTRIES); in aac_comm_init()
304 comm->queue[HostHighCmdQueue].base = queues; in aac_comm_init()
305 aac_queue_init(dev, &comm->queue[HostHighCmdQueue], headers, HOST_HIGH_CMD_ENTRIES); in aac_comm_init()
311 comm->queue[AdapNormCmdQueue].base = queues; in aac_comm_init()
312 aac_queue_init(dev, &comm->queue[AdapNormCmdQueue], headers, ADAP_NORM_CMD_ENTRIES); in aac_comm_init()
318 comm->queue[AdapHighCmdQueue].base = queues; in aac_comm_init()
319 aac_queue_init(dev, &comm->queue[AdapHighCmdQueue], headers, ADAP_HIGH_CMD_ENTRIES); in aac_comm_init()
325 comm->queue[HostNormRespQueue].base = queues; in aac_comm_init()
326 aac_queue_init(dev, &comm->queue[HostNormRespQueue], headers, HOST_NORM_RESP_ENTRIES); in aac_comm_init()
[all …]
/linux-4.1.27/Documentation/networking/
Dscaling.txt24 (multi-queue). On reception, a NIC can send different packets to different
28 queue, which in turn can be processed by separate CPUs. This mechanism is
31 Multi-queue distribution can also be used for traffic prioritization, but
38 stores a queue number. The receive queue for a packet is determined
45 can be directed to their own receive queue. Such “n-tuple” filters can
50 The driver for a multi-queue capable NIC typically provides a kernel
53 num_queues. A typical RSS configuration would be to have one receive queue
58 The indirection table of an RSS device, which resolves a queue by masked
68 Each receive queue has a separate IRQ associated with it. The NIC triggers
69 this to notify a CPU when new packets arrive on the given queue. The
[all …]
Dmultiqueue.txt23 netif_{start|stop|wake}_subqueue() functions to manage each queue while the
33 default pfifo_fast qdisc. This qdisc supports one qdisc per hardware queue.
37 the base driver to determine which queue to send the skb to.
40 blocking. It will cycle though the bands and verify that the hardware queue
45 will be queued to the band associated with the hardware queue.
61 band 0 => queue 0
62 band 1 => queue 1
63 band 2 => queue 2
64 band 3 => queue 3
66 Traffic will begin flowing through each queue based on either the simple_tx_hash
[all …]
/linux-4.1.27/drivers/net/wireless/ti/wlcore/
Dtx.h198 static inline int wl1271_tx_get_queue(int queue) in wl1271_tx_get_queue() argument
200 switch (queue) { in wl1271_tx_get_queue()
215 int wlcore_tx_get_mac80211_queue(struct wl12xx_vif *wlvif, int queue) in wlcore_tx_get_mac80211_queue() argument
219 switch (queue) { in wlcore_tx_get_mac80211_queue()
263 u8 queue, enum wlcore_queue_stop_reason reason);
264 void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
266 void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
273 struct wl12xx_vif *wlvif, u8 queue,
278 u8 queue,
281 u8 queue);
/linux-4.1.27/drivers/watchdog/
Dmtx-1_wdt.c64 int queue; member
81 if (mtx1_wdt_device.queue && ticks) in mtx1_wdt_trigger()
99 if (!mtx1_wdt_device.queue) { in mtx1_wdt_start()
100 mtx1_wdt_device.queue = 1; in mtx1_wdt_start()
114 if (mtx1_wdt_device.queue) { in mtx1_wdt_stop()
115 mtx1_wdt_device.queue = 0; in mtx1_wdt_stop()
220 mtx1_wdt_device.queue = 0; in mtx1_wdt_probe()
238 if (mtx1_wdt_device.queue) { in mtx1_wdt_remove()
239 mtx1_wdt_device.queue = 0; in mtx1_wdt_remove()
Dcpu5wdt.c65 int queue; member
85 if (cpu5wdt_device.queue && ticks) in cpu5wdt_trigger()
109 if (!cpu5wdt_device.queue) { in cpu5wdt_start()
110 cpu5wdt_device.queue = 1; in cpu5wdt_start()
226 cpu5wdt_device.queue = 0; in cpu5wdt_init()
265 if (cpu5wdt_device.queue) { in cpu5wdt_exit()
266 cpu5wdt_device.queue = 0; in cpu5wdt_exit()
Drdc321x_wdt.c60 int queue; member
88 if (rdc321x_wdt_device.queue && ticks) in rdc321x_wdt_trigger()
107 if (!rdc321x_wdt_device.queue) { in rdc321x_wdt_start()
108 rdc321x_wdt_device.queue = 1; in rdc321x_wdt_start()
261 rdc321x_wdt_device.queue = 0; in rdc321x_wdt_probe()
276 if (rdc321x_wdt_device.queue) { in rdc321x_wdt_remove()
277 rdc321x_wdt_device.queue = 0; in rdc321x_wdt_remove()
/linux-4.1.27/Documentation/device-mapper/
Ddm-queue-length.txt1 dm-queue-length
4 dm-queue-length is a path selector module for device-mapper targets,
6 The path selector name is 'queue-length'.
23 dm-queue-length increments/decrements 'in-flight' when an I/O is
25 dm-queue-length selects a path with the minimum 'in-flight'.
32 # echo "0 10 multipath 0 0 1 1 queue-length 0 2 1 8:0 128 8:16 128" \
36 test: 0 10 multipath 0 0 1 1 queue-length 0 2 1 8:0 128 8:16 128
/linux-4.1.27/include/crypto/
Dalgapi.h178 void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
179 int crypto_enqueue_request(struct crypto_queue *queue,
181 void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset);
182 struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
183 int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
337 struct crypto_queue *queue) in crypto_get_backlog() argument
339 return queue->backlog == &queue->list ? NULL : in crypto_get_backlog()
340 container_of(queue->backlog, struct crypto_async_request, list); in crypto_get_backlog()
343 static inline int ablkcipher_enqueue_request(struct crypto_queue *queue, in ablkcipher_enqueue_request() argument
346 return crypto_enqueue_request(queue, &request->base); in ablkcipher_enqueue_request()
[all …]
/linux-4.1.27/sound/core/seq/oss/
Dseq_oss_init.c58 static int delete_seq_queue(int queue);
196 dp->queue = -1; in snd_seq_oss_open()
286 delete_seq_queue(dp->queue); in snd_seq_oss_open()
372 dp->queue = qinfo.queue; in alloc_seq_queue()
380 delete_seq_queue(int queue) in delete_seq_queue() argument
385 if (queue < 0) in delete_seq_queue()
388 qinfo.queue = queue; in delete_seq_queue()
391 pr_err("ALSA: seq_oss: unable to delete queue %d (%d)\n", queue, rc); in delete_seq_queue()
420 int queue; in snd_seq_oss_release() local
431 queue = dp->queue; in snd_seq_oss_release()
[all …]
Dseq_oss_timer.c150 ev.queue = dp->queue; in send_timer_event()
151 ev.data.queue.queue = dp->queue; in send_timer_event()
152 ev.data.queue.param.value = value; in send_timer_event()
169 tmprec.queue = dp->queue; in snd_seq_oss_timer_start()
Dseq_oss_device.h87 int queue; /* sequencer queue number */ member
165 ev->queue = dp->queue; in snd_seq_oss_fill_addr()
/linux-4.1.27/drivers/mfd/
Dpcf50633-adc.c47 struct pcf50633_adc_request *queue[PCF50633_MAX_ADC_FIFO_DEPTH]; member
78 if (!adc->queue[head]) in trigger_next_adc_job_if_any()
81 adc_setup(pcf, adc->queue[head]->mux, adc->queue[head]->avg); in trigger_next_adc_job_if_any()
95 if (adc->queue[tail]) { in adc_enqueue_request()
101 adc->queue[tail] = req; in adc_enqueue_request()
182 req = adc->queue[head]; in pcf50633_adc_irq()
188 adc->queue[head] = NULL; in pcf50633_adc_irq()
230 if (WARN_ON(adc->queue[head])) in pcf50633_adc_remove()
235 kfree(adc->queue[i]); in pcf50633_adc_remove()
/linux-4.1.27/drivers/block/
Dps3disk.c44 struct request_queue *queue; member
281 ps3disk_do_request(dev, priv->queue); in ps3disk_interrupt()
409 struct request_queue *queue; in ps3disk_probe() local
453 queue = blk_init_queue(ps3disk_request, &priv->lock); in ps3disk_probe()
454 if (!queue) { in ps3disk_probe()
461 priv->queue = queue; in ps3disk_probe()
462 queue->queuedata = dev; in ps3disk_probe()
464 blk_queue_bounce_limit(queue, BLK_BOUNCE_HIGH); in ps3disk_probe()
466 blk_queue_max_hw_sectors(queue, dev->bounce_size >> 9); in ps3disk_probe()
467 blk_queue_segment_boundary(queue, -1UL); in ps3disk_probe()
[all …]
Dnbd.c682 nbd->disk->queue); in __nbd_ioctl()
684 blk_queue_flush(nbd->disk->queue, REQ_FLUSH); in __nbd_ioctl()
686 blk_queue_flush(nbd->disk->queue, 0); in __nbd_ioctl()
707 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); in __nbd_ioctl()
816 disk->queue = blk_init_queue(do_nbd_request, &nbd_lock); in nbd_init()
817 if (!disk->queue) { in nbd_init()
824 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue); in nbd_init()
825 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue); in nbd_init()
826 disk->queue->limits.discard_granularity = 512; in nbd_init()
827 disk->queue->limits.max_discard_sectors = UINT_MAX; in nbd_init()
[all …]
Dps3vram.c69 struct request_queue *queue; member
625 struct request_queue *queue; in ps3vram_probe() local
744 queue = blk_alloc_queue(GFP_KERNEL); in ps3vram_probe()
745 if (!queue) { in ps3vram_probe()
751 priv->queue = queue; in ps3vram_probe()
752 queue->queuedata = dev; in ps3vram_probe()
753 blk_queue_make_request(queue, ps3vram_make_request); in ps3vram_probe()
754 blk_queue_max_segments(queue, BLK_MAX_SEGMENTS); in ps3vram_probe()
755 blk_queue_max_segment_size(queue, BLK_MAX_SEGMENT_SIZE); in ps3vram_probe()
756 blk_queue_max_hw_sectors(queue, BLK_SAFE_MAX_SECTORS); in ps3vram_probe()
[all …]
/linux-4.1.27/virt/kvm/
Dasync_pf.c68 INIT_LIST_HEAD(&vcpu->async_pf.queue); in kvm_async_pf_vcpu_init()
107 while (!list_empty(&vcpu->async_pf.queue)) { in kvm_clear_async_pf_completion_queue()
109 list_entry(vcpu->async_pf.queue.next, in kvm_clear_async_pf_completion_queue()
110 typeof(*work), queue); in kvm_clear_async_pf_completion_queue()
111 list_del(&work->queue); in kvm_clear_async_pf_completion_queue()
152 list_del(&work->queue); in kvm_check_async_pf_completion()
194 list_add_tail(&work->queue, &vcpu->async_pf.queue); in kvm_setup_async_pf()
217 INIT_LIST_HEAD(&work->queue); /* for list_del to work */ in kvm_async_pf_wakeup_all()
/linux-4.1.27/drivers/net/wireless/ath/carl9170/
Dtx.c49 unsigned int queue) in __carl9170_get_queue() argument
52 return queue; in __carl9170_get_queue()
78 int queue, i; in carl9170_tx_accounting() local
83 queue = skb_get_queue_mapping(skb); in carl9170_tx_accounting()
92 ar->tx_stats[queue].len++; in carl9170_tx_accounting()
93 ar->tx_stats[queue].count++; in carl9170_tx_accounting()
158 int queue; in carl9170_tx_accounting_free() local
160 queue = skb_get_queue_mapping(skb); in carl9170_tx_accounting_free()
164 ar->tx_stats[queue].len--; in carl9170_tx_accounting_free()
455 int queue = skb_get_queue_mapping(skb); in carl9170_tx_bar_status() local
[all …]
/linux-4.1.27/include/uapi/sound/
Dasequencer.h251 unsigned char queue; /* affected queue */ member
277 unsigned char queue; /* schedule queue */ member
290 struct snd_seq_ev_queue_control queue; member
394 unsigned char queue; /* Queue for REMOVE_DEST */ member
473 int queue; /* queue id */ member
489 int queue; /* queue id */ member
501 int queue; /* sequencer queue */ member
517 int queue; /* sequencer queue */ member
530 int queue; /* sequencer queue */ member
548 unsigned char queue; /* input time-stamp queue (optional) */ member
[all …]
/linux-4.1.27/drivers/misc/vmw_vmci/
Dvmci_queue_pair.c142 typedef int vmci_memcpy_to_queue_func(struct vmci_queue *queue,
146 const struct vmci_queue *queue,
273 struct vmci_queue *queue = q; in qp_free_queue() local
275 if (queue) { in qp_free_queue()
281 queue->kernel_if->u.g.vas[i], in qp_free_queue()
282 queue->kernel_if->u.g.pas[i]); in qp_free_queue()
285 vfree(queue); in qp_free_queue()
297 struct vmci_queue *queue; in qp_alloc_queue() local
300 size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if); in qp_alloc_queue()
305 (sizeof(*queue->kernel_if->u.g.pas) + in qp_alloc_queue()
[all …]
/linux-4.1.27/drivers/staging/lustre/lustre/obdclass/
Dcl_io.c331 int cl_queue_match(const struct list_head *queue, in cl_queue_match() argument
336 list_for_each_entry(scan, queue, cill_linkage) { in cl_queue_match()
344 static int cl_queue_merge(const struct list_head *queue, in cl_queue_merge() argument
349 list_for_each_entry(scan, queue, cill_linkage) { in cl_queue_merge()
742 struct cl_2queue *queue; in cl_io_read_page() local
751 queue = &io->ci_queue; in cl_io_read_page()
753 cl_2queue_init(queue); in cl_io_read_page()
776 result = cl_io_submit_rw(env, io, CRT_READ, queue); in cl_io_read_page()
780 cl_page_list_disown(env, io, &queue->c2_qin); in cl_io_read_page()
781 cl_2queue_fini(env, queue); in cl_io_read_page()
[all …]
/linux-4.1.27/net/mac80211/
Dtkip.c245 u8 *ra, int only_iv, int queue, in ieee80211_tkip_decrypt_data() argument
268 if (key->u.tkip.rx[queue].state != TKIP_STATE_NOT_INIT && in ieee80211_tkip_decrypt_data()
269 (iv32 < key->u.tkip.rx[queue].iv32 || in ieee80211_tkip_decrypt_data()
270 (iv32 == key->u.tkip.rx[queue].iv32 && in ieee80211_tkip_decrypt_data()
271 iv16 <= key->u.tkip.rx[queue].iv16))) in ieee80211_tkip_decrypt_data()
276 key->u.tkip.rx[queue].state = TKIP_STATE_PHASE1_HW_UPLOADED; in ieee80211_tkip_decrypt_data()
280 if (key->u.tkip.rx[queue].state == TKIP_STATE_NOT_INIT || in ieee80211_tkip_decrypt_data()
281 key->u.tkip.rx[queue].iv32 != iv32) { in ieee80211_tkip_decrypt_data()
283 tkip_mixing_phase1(tk, &key->u.tkip.rx[queue], ta, iv32); in ieee80211_tkip_decrypt_data()
287 key->u.tkip.rx[queue].state != TKIP_STATE_PHASE1_HW_UPLOADED) { in ieee80211_tkip_decrypt_data()
[all …]
/linux-4.1.27/drivers/media/platform/xilinx/
Dxilinx-dma.c289 struct list_head queue; member
301 list_del(&buf->queue); in xvip_dma_complete()
348 if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { in xvip_dma_buffer_queue()
377 list_add_tail(&buf->queue, &dma->queued_bufs); in xvip_dma_buffer_queue()
382 if (vb2_is_streaming(&dma->queue)) in xvip_dma_buffer_queue()
436 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) { in xvip_dma_start_streaming()
438 list_del(&buf->queue); in xvip_dma_start_streaming()
463 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) { in xvip_dma_stop_streaming()
465 list_del(&buf->queue); in xvip_dma_stop_streaming()
493 if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) in xvip_dma_querycap()
[all …]
/linux-4.1.27/drivers/staging/rtl8723au/include/
Dosdep_service.h51 struct list_head queue; member
55 static inline struct list_head *get_list_head(struct rtw_queue *queue) in get_list_head() argument
57 return &queue->queue; in get_list_head()
/linux-4.1.27/drivers/net/ethernet/tile/
Dtilepro.c172 struct tile_netio_queue queue; member
342 struct tile_netio_queue *queue = &info->queue; in tile_net_return_credit() local
343 netio_queue_user_impl_t *qup = &queue->__user_part; in tile_net_return_credit()
361 struct tile_netio_queue *queue = &info->queue; in tile_net_provide_linux_buffer() local
366 __netio_fastio_free_buffer(queue->__user_part.__fastio_index, buffer); in tile_net_provide_linux_buffer()
627 struct tile_netio_queue *queue = &info->queue; in tile_net_discard_aux() local
628 netio_queue_impl_t *qsp = queue->__system_part; in tile_net_discard_aux()
629 netio_queue_user_impl_t *qup = &queue->__user_part; in tile_net_discard_aux()
664 struct tile_netio_queue *queue = &info->queue; in tile_net_discard_packets() local
665 netio_queue_impl_t *qsp = queue->__system_part; in tile_net_discard_packets()
[all …]
/linux-4.1.27/drivers/s390/net/
Dqeth_core_main.c64 static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
68 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
512 static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue) in qeth_is_cq() argument
516 queue != 0 && in qeth_is_cq()
517 queue == card->qdio.no_in_queues - 1; in qeth_is_cq()
1247 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, in qeth_clear_output_buffer() argument
1255 atomic_dec(&queue->set_pci_flags_count); in qeth_clear_output_buffer()
1260 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) { in qeth_clear_output_buffer()
3311 struct qeth_qdio_q *queue = card->qdio.in_q; in qeth_queue_input_buffer() local
3318 count = (index < queue->next_buf_to_init)? in qeth_queue_input_buffer()
[all …]
/linux-4.1.27/tools/testing/selftests/mqueue/
Dmq_open_tests.c55 mqd_t queue = -1; variable
86 if (queue != -1) in shutdown()
87 if (mq_close(queue)) in shutdown()
201 if ((queue = mq_open(queue_path, flags, perms, attr)) == -1) in test_queue()
203 if (mq_getattr(queue, result)) in test_queue()
205 if (mq_close(queue)) in test_queue()
207 queue = -1; in test_queue()
223 if ((queue = mq_open(queue_path, flags, perms, attr)) == -1) in test_queue_fail()
225 if (mq_getattr(queue, result)) in test_queue_fail()
227 if (mq_close(queue)) in test_queue_fail()
[all …]
Dmq_perf_tests.c96 mqd_t queue = -1; variable
186 if (queue != -1) in shutdown()
187 if (mq_close(queue)) in shutdown()
291 queue = mq_open(queue_path, flags, perms, attr); in open_queue()
292 if (queue == -1) in open_queue()
294 if (mq_getattr(queue, &result)) in open_queue()
328 while (mq_send(queue, buff, sizeof(buff), 0) == 0) in cont_thread()
330 mq_receive(queue, buff, sizeof(buff), &priority); in cont_thread()
335 while (mq_receive(queue, buff, MSG_SIZE, &prio_in) == MSG_SIZE)
339 if (mq_send(queue, buff, MSG_SIZE, prio_out)) \
[all …]
/linux-4.1.27/drivers/staging/unisys/uislib/
Duisqueue.c53 unsigned char spar_signal_insert(struct channel_header __iomem *ch, u32 queue, in spar_signal_insert() argument
62 + queue; in spar_signal_insert()
112 spar_signal_remove(struct channel_header __iomem *ch, u32 queue, void *sig) in spar_signal_remove() argument
118 readq(&ch->ch_space_offset)) + queue; in spar_signal_remove()
166 unsigned int spar_signal_remove_all(struct channel_header *ch, u32 queue, in spar_signal_remove_all() argument
173 ch->ch_space_offset) + queue; in spar_signal_remove_all()
218 u32 queue) in spar_signalqueue_empty() argument
222 readq(&ch->ch_space_offset)) + queue; in spar_signalqueue_empty()
/linux-4.1.27/drivers/staging/i2o/
Di2o_block.c92 blk_cleanup_queue(dev->gd->queue); in i2o_block_device_free()
289 INIT_LIST_HEAD(&ireq->queue); in i2o_block_request_alloc()
406 struct request_queue *q = dreq->queue; in i2o_block_delayed_request_fn()
440 list_del(&ireq->queue); in i2o_block_end_request()
867 list_add_tail(&ireq->queue, &dev->open_queue); in i2o_block_transfer()
920 dreq->queue = q; in i2o_block_request_fn()
963 struct request_queue *queue; in i2o_block_device_alloc() local
987 queue = blk_init_queue(i2o_block_request_fn, &dev->lock); in i2o_block_device_alloc()
988 if (!queue) { in i2o_block_device_alloc()
994 blk_queue_prep_rq(queue, i2o_block_prep_req_fn); in i2o_block_device_alloc()
[all …]
Di2o_block.h89 struct list_head queue; member
100 struct request_queue *queue; member
/linux-4.1.27/drivers/usb/gadget/udc/
Ds3c-hsudc.c114 struct list_head queue; member
128 struct list_head queue; member
251 list_del_init(&hsreq->queue); in s3c_hsudc_complete_request()
275 while (!list_empty(&hsep->queue)) { in s3c_hsudc_nuke_ep()
276 hsreq = list_entry(hsep->queue.next, in s3c_hsudc_nuke_ep()
277 struct s3c_hsudc_req, queue); in s3c_hsudc_nuke_ep()
444 if (list_empty(&hsep->queue)) in s3c_hsudc_epin_intr()
447 hsreq = list_entry(hsep->queue.next, in s3c_hsudc_epin_intr()
448 struct s3c_hsudc_req, queue); in s3c_hsudc_epin_intr()
481 if (list_empty(&hsep->queue)) in s3c_hsudc_epout_intr()
[all …]
Datmel_usba_udc.c49 list_for_each_entry(req, &ep->queue, queue) { in queue_dbg_open()
53 list_add_tail(&req_copy->queue, queue_data); in queue_dbg_open()
62 list_for_each_entry_safe(req, req_copy, queue_data, queue) { in queue_dbg_open()
63 list_del(&req->queue); in queue_dbg_open()
87 struct list_head *queue = file->private_data; in queue_dbg_read() local
96 list_for_each_entry_safe(req, tmp_req, queue, queue) { in queue_dbg_read()
111 list_del(&req->queue); in queue_dbg_read()
132 list_for_each_entry_safe(req, tmp_req, queue_data, queue) { in queue_dbg_release()
133 list_del(&req->queue); in queue_dbg_release()
417 if (list_empty(&ep->queue)) { in submit_next_request()
[all …]
Dgoku_udc.c276 INIT_LIST_HEAD(&req->queue); in goku_alloc_request()
289 WARN_ON(!list_empty(&req->queue)); in goku_free_request()
301 list_del_init(&req->queue); in done()
487 if (dbuff && !list_empty(&ep->queue)) { in read_fifo()
488 req = list_entry(ep->queue.next, in read_fifo()
489 struct goku_request, queue); in read_fifo()
521 if (unlikely(list_empty (&ep->queue))) in pio_advance()
523 req = list_entry(ep->queue.next, struct goku_request, queue); in pio_advance()
594 if (unlikely(list_empty(&ep->queue))) { in dma_advance()
603 req = list_entry(ep->queue.next, struct goku_request, queue); in dma_advance()
[all …]
Domap_udc.c276 INIT_LIST_HEAD(&req->queue); in omap_alloc_request()
297 list_del_init(&req->queue); in done()
648 if (!list_empty(&ep->queue)) { in dma_irq()
649 req = container_of(ep->queue.next, in dma_irq()
650 struct omap_req, queue); in dma_irq()
655 if (!list_empty(&ep->queue)) { in dma_irq()
656 req = container_of(ep->queue.next, in dma_irq()
657 struct omap_req, queue); in dma_irq()
667 if (!list_empty(&ep->queue)) { in dma_irq()
668 req = container_of(ep->queue.next, in dma_irq()
[all …]
Dm66592-udc.c102 INIT_LIST_HEAD(&m66592->ep[0].queue); in m66592_usb_disconnect()
366 INIT_LIST_HEAD(&ep->queue); in m66592_ep_setting()
722 list_del_init(&req->queue); in transfer_complete()
728 if (!list_empty(&ep->queue)) in transfer_complete()
736 req = list_entry(ep->queue.next, struct m66592_request, queue); in transfer_complete()
910 req = list_entry(ep->queue.next, struct m66592_request, queue); in irq_pipe_ready()
918 req = list_entry(ep->queue.next, in irq_pipe_ready()
919 struct m66592_request, queue); in irq_pipe_ready()
941 req = list_entry(ep->queue.next, struct m66592_request, queue); in irq_pipe_empty()
954 req = list_entry(ep->queue.next, in irq_pipe_empty()
[all …]
Dfotg210-udc.c64 list_del_init(&req->queue); in fotg210_done()
77 if (list_empty(&ep->queue)) in fotg210_done()
222 while (!list_empty(&ep->queue)) { in fotg210_ep_disable()
223 req = list_entry(ep->queue.next, in fotg210_ep_disable()
224 struct fotg210_request, queue); in fotg210_ep_disable()
242 INIT_LIST_HEAD(&req->queue); in fotg210_ep_alloc_request()
425 if (list_empty(&ep->queue)) in fotg210_ep_queue()
428 list_add_tail(&req->queue, &ep->queue); in fotg210_ep_queue()
453 if (!list_empty(&ep->queue)) in fotg210_ep_dequeue()
518 if (!list_empty(&ep->queue)) in fotg210_set_halt_and_wedge()
[all …]
Dpch_udc.c298 struct list_head queue; member
414 struct list_head queue; member
1454 list_del_init(&req->queue); in complete_req()
1508 while (!list_empty(&ep->queue)) { in empty_req_queue()
1509 req = list_entry(ep->queue.next, struct pch_udc_request, queue); in empty_req_queue()
1752 INIT_LIST_HEAD(&ep->queue); in pch_udc_pcd_ep_disable()
1784 INIT_LIST_HEAD(&req->queue); in pch_udc_alloc_request()
1821 if (!list_empty(&req->queue)) in pch_udc_free_request()
1860 if (!list_empty(&req->queue)) in pch_udc_pcd_queue()
1907 if (list_empty(&ep->queue) && !ep->halted) { in pch_udc_pcd_queue()
[all …]
Dnet2272.c267 INIT_LIST_HEAD(&ep->queue); in net2272_ep_reset()
343 INIT_LIST_HEAD(&req->queue); in net2272_alloc_request()
359 WARN_ON(!list_empty(&req->queue)); in net2272_free_request()
377 list_del_init(&req->queue); in net2272_done()
482 if (!list_empty(&ep->queue)) { in net2272_write_fifo()
483 req = list_entry(ep->queue.next, in net2272_write_fifo()
485 queue); in net2272_write_fifo()
612 if (!list_empty(&ep->queue)) { in net2272_read_fifo()
613 req = list_entry(ep->queue.next, in net2272_read_fifo()
614 struct net2272_request, queue); in net2272_read_fifo()
[all …]
Dgr_udc.c161 if (list_empty(&ep->queue)) { in gr_seq_ep_show()
167 list_for_each_entry(req, &ep->queue, queue) { in gr_seq_ep_show()
310 list_del_init(&req->queue); in gr_finish_request()
375 INIT_LIST_HEAD(&req->queue); in gr_alloc_request()
390 if (list_empty(&ep->queue)) { in gr_start_dma()
395 req = list_first_entry(&ep->queue, struct gr_request, queue); in gr_start_dma()
431 req = list_first_entry(&ep->queue, struct gr_request, queue); in gr_dma_advance()
607 if (unlikely(!req->req.buf || !list_empty(&req->queue))) { in gr_queue()
610 ep->ep.name, req->req.buf, list_empty(&req->queue)); in gr_queue()
641 list_add_tail(&req->queue, &ep->queue); in gr_queue()
[all …]
Dpxa25x_udc.c309 INIT_LIST_HEAD (&req->queue); in pxa25x_ep_alloc_request()
323 WARN_ON(!list_empty (&req->queue)); in pxa25x_ep_free_request()
336 list_del_init(&req->queue); in done()
426 if (list_empty(&ep->queue)) in write_fifo()
573 if (list_empty(&ep->queue)) in read_fifo()
638 || !list_empty(&req->queue))) { in pxa25x_ep_queue()
672 if (list_empty(&ep->queue) && !ep->stopped) { in pxa25x_ep_queue()
728 list_add_tail(&req->queue, &ep->queue); in pxa25x_ep_queue()
743 while (!list_empty(&ep->queue)) { in nuke()
744 req = list_entry(ep->queue.next, in nuke()
[all …]
Dfsl_udc_core.c172 list_del_init(&req->queue); in done()
219 while (!list_empty(&ep->queue)) { in nuke()
222 req = list_entry(ep->queue.next, struct fsl_req, queue); in nuke()
690 INIT_LIST_HEAD(&req->queue); in fsl_alloc_request()
739 if (!(list_empty(&ep->queue)) && !(ep_index(ep) == 0)) { in fsl_queue_td()
742 lastreq = list_entry(ep->queue.prev, struct fsl_req, queue); in fsl_queue_td()
882 || !list_empty(&req->queue)) { in fsl_ep_queue()
919 list_add_tail(&req->queue, &ep->queue); in fsl_ep_queue()
951 list_for_each_entry(req, &ep->queue, queue) { in fsl_ep_dequeue()
961 if (ep->queue.next == &req->queue) { in fsl_ep_dequeue()
[all …]
Dbcm63xx_udc.c227 struct list_head queue; member
240 struct list_head queue; /* ep's requests */ member
762 INIT_LIST_HEAD(&bep->queue); in iudma_init_channel()
954 INIT_LIST_HEAD(&bep->queue); in bcm63xx_init_udc_hw()
1041 BUG_ON(!list_empty(&bep->queue)); in bcm63xx_ep_enable()
1080 if (!list_empty(&bep->queue)) { in bcm63xx_ep_disable()
1081 list_for_each_safe(pos, n, &bep->queue) { in bcm63xx_ep_disable()
1083 list_entry(pos, struct bcm63xx_req, queue); in bcm63xx_ep_disable()
1087 list_del(&breq->queue); in bcm63xx_ep_disable()
1177 list_add_tail(&breq->queue, &bep->queue); in bcm63xx_udc_queue()
[all …]
Dfusb300_udc.c261 while (!list_empty(&ep->queue)) { in fusb300_disable()
262 req = list_entry(ep->queue.next, struct fusb300_request, queue); in fusb300_disable()
279 INIT_LIST_HEAD(&req->queue); in fusb300_alloc_request()
438 if (list_empty(&ep->queue)) in fusb300_queue()
441 list_add_tail(&req->queue, &ep->queue); in fusb300_queue()
466 if (!list_empty(&ep->queue)) in fusb300_dequeue()
486 if (!list_empty(&ep->queue)) { in fusb300_set_halt_and_wedge()
528 .queue = fusb300_queue,
782 if (!list_empty(&ep->queue)) in clear_feature()
870 list_del_init(&req->queue); in done()
[all …]
Ds3c2410_udc.c259 list_del_init(&req->queue); in s3c2410_udc_done()
275 if (&ep->queue == NULL) in s3c2410_udc_nuke()
278 while (!list_empty(&ep->queue)) { in s3c2410_udc_nuke()
280 req = list_entry(ep->queue.next, struct s3c2410_request, in s3c2410_udc_nuke()
281 queue); in s3c2410_udc_nuke()
748 if (list_empty(&ep->queue)) in s3c2410_udc_handle_ep0()
751 req = list_entry(ep->queue.next, struct s3c2410_request, queue); in s3c2410_udc_handle_ep0()
819 if (likely(!list_empty(&ep->queue))) in s3c2410_udc_handle_ep()
820 req = list_entry(ep->queue.next, in s3c2410_udc_handle_ep()
821 struct s3c2410_request, queue); in s3c2410_udc_handle_ep()
[all …]
Dat91_udc.c135 if (list_empty (&ep->queue)) in proc_ep_show()
138 else list_for_each_entry (req, &ep->queue, queue) { in proc_ep_show()
261 list_del_init(&req->queue); in done()
276 if (list_empty(&ep->queue) && ep->int_mask != (1 << 0)) in done()
456 if (list_empty(&ep->queue)) in nuke()
460 while (!list_empty(&ep->queue)) { in nuke()
461 req = list_entry(ep->queue.next, struct at91_request, queue); in nuke()
588 INIT_LIST_HEAD(&req->queue); in at91_ep_alloc_request()
597 BUG_ON(!list_empty(&req->queue)); in at91_ep_free_request()
614 || !_req->buf || !list_empty(&req->queue)) { in at91_ep_queue()
[all …]
/linux-4.1.27/drivers/net/wireless/iwlwifi/
Diwl-op-mode.h158 void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
159 void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
199 int queue) in iwl_op_mode_queue_full() argument
201 op_mode->ops->queue_full(op_mode, queue); in iwl_op_mode_queue_full()
205 int queue) in iwl_op_mode_queue_not_full() argument
207 op_mode->ops->queue_not_full(op_mode, queue); in iwl_op_mode_queue_not_full()
Diwl-trans.h512 struct iwl_device_cmd *dev_cmd, int queue);
513 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
516 void (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
519 void (*txq_disable)(struct iwl_trans *trans, int queue,
823 struct iwl_device_cmd *dev_cmd, int queue) in iwl_trans_tx() argument
831 return trans->ops->tx(trans, skb, dev_cmd, queue); in iwl_trans_tx()
834 static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue, in iwl_trans_reclaim() argument
840 trans->ops->reclaim(trans, queue, ssn, skbs); in iwl_trans_reclaim()
843 static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue, in iwl_trans_txq_disable() argument
846 trans->ops->txq_disable(trans, queue, configure_scd); in iwl_trans_txq_disable()
[all …]
/linux-4.1.27/arch/xtensa/platforms/iss/
Dsimdisk.c31 struct request_queue *queue; member
281 dev->queue = blk_alloc_queue(GFP_KERNEL); in simdisk_setup()
282 if (dev->queue == NULL) { in simdisk_setup()
287 blk_queue_make_request(dev->queue, simdisk_make_request); in simdisk_setup()
288 dev->queue->queuedata = dev; in simdisk_setup()
298 dev->gd->queue = dev->queue; in simdisk_setup()
308 blk_cleanup_queue(dev->queue); in simdisk_setup()
309 dev->queue = NULL; in simdisk_setup()
365 if (dev->queue) in simdisk_teardown()
366 blk_cleanup_queue(dev->queue); in simdisk_teardown()
/linux-4.1.27/drivers/ptp/
Dptp_clock.c53 static void enqueue_external_timestamp(struct timestamp_event_queue *queue, in enqueue_external_timestamp() argument
63 spin_lock_irqsave(&queue->lock, flags); in enqueue_external_timestamp()
65 dst = &queue->buf[queue->tail]; in enqueue_external_timestamp()
70 if (!queue_free(queue)) in enqueue_external_timestamp()
71 queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS; in enqueue_external_timestamp()
73 queue->tail = (queue->tail + 1) % PTP_MAX_TIMESTAMPS; in enqueue_external_timestamp()
75 spin_unlock_irqrestore(&queue->lock, flags); in enqueue_external_timestamp()
Dptp_chardev.c274 struct timestamp_event_queue *queue = &ptp->tsevq; in ptp_read() local
292 ptp->defunct || queue_cnt(queue))) { in ptp_read()
308 spin_lock_irqsave(&queue->lock, flags); in ptp_read()
310 qcnt = queue_cnt(queue); in ptp_read()
316 event[i] = queue->buf[queue->head]; in ptp_read()
317 queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS; in ptp_read()
320 spin_unlock_irqrestore(&queue->lock, flags); in ptp_read()
Dptp_sysfs.c99 struct timestamp_event_queue *queue = &ptp->tsevq; in extts_fifo_show() local
110 spin_lock_irqsave(&queue->lock, flags); in extts_fifo_show()
111 qcnt = queue_cnt(queue); in extts_fifo_show()
113 event = queue->buf[queue->head]; in extts_fifo_show()
114 queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS; in extts_fifo_show()
116 spin_unlock_irqrestore(&queue->lock, flags); in extts_fifo_show()
/linux-4.1.27/drivers/media/platform/vsp1/
Dvsp1_video.c585 struct vsp1_video_buffer, queue); in vsp1_video_complete_buffer()
593 list_del(&done->queue); in vsp1_video_complete_buffer()
597 struct vsp1_video_buffer, queue); in vsp1_video_complete_buffer()
622 video->ops->queue(video, buf); in vsp1_video_frame_end()
773 list_add_tail(&buf->queue, &video->irqqueue); in vsp1_video_buffer_queue()
781 video->ops->queue(video, buf); in vsp1_video_buffer_queue()
784 if (vb2_is_streaming(&video->queue) && in vsp1_video_buffer_queue()
877 list_for_each_entry(buffer, &video->irqqueue, queue) in vsp1_video_stop_streaming()
928 if (format->type != video->queue.type) in vsp1_video_get_format()
944 if (format->type != video->queue.type) in vsp1_video_try_format()
[all …]
Dvsp1_video.h98 struct list_head queue; member
111 void (*queue)(struct vsp1_video *video, struct vsp1_video_buffer *buf); member
131 struct vb2_queue queue; member
/linux-4.1.27/Documentation/devicetree/bindings/net/
Dkeystone-netcp.txt93 - tx-queue: the navigator queue number associated with the tx dma channel.
119 - rx-queue: the navigator queue number associated with rx dma channel.
124 - rx-queue-depth: number of descriptors in each of the free descriptor
125 queue (FDQ) for the pktdma Rx flow. There can be at
128 - tx-completion-queue: the navigator queue number where the descriptors are
164 tx-queue = <648>;
196 rx-queue-depth = <128 128 0 0>;
198 rx-queue = <8704>;
199 tx-completion-queue = <8706>;
208 rx-queue-depth = <128 128 0 0>;
[all …]
Dfsl-fec.txt20 hw multi queues. Should specify the tx queue number, otherwise set tx queue
23 hw multi queues. Should specify the rx queue number, otherwise set rx queue
/linux-4.1.27/net/ipv4/
Dinet_connection_sock.c297 struct request_sock_queue *queue = &icsk->icsk_accept_queue; in inet_csk_accept() local
312 if (reqsk_queue_empty(queue)) { in inet_csk_accept()
324 req = reqsk_queue_remove(queue); in inet_csk_accept()
330 queue->fastopenq) { in inet_csk_accept()
331 spin_lock_bh(&queue->fastopenq->lock); in inet_csk_accept()
342 spin_unlock_bh(&queue->fastopenq->lock); in inet_csk_accept()
568 static bool reqsk_queue_unlink(struct request_sock_queue *queue, in reqsk_queue_unlink() argument
575 spin_lock(&queue->syn_wait_lock); in reqsk_queue_unlink()
576 lopt = queue->listen_opt; in reqsk_queue_unlink()
587 spin_unlock(&queue->syn_wait_lock); in reqsk_queue_unlink()
[all …]
Dtcp_yeah.c131 u32 rtt, queue; in tcp_yeah_cong_avoid() local
153 queue = bw; in tcp_yeah_cong_avoid()
155 if (queue > TCP_YEAH_ALPHA || in tcp_yeah_cong_avoid()
157 if (queue > TCP_YEAH_ALPHA && in tcp_yeah_cong_avoid()
159 u32 reduction = min(queue / TCP_YEAH_GAMMA , in tcp_yeah_cong_avoid()
188 yeah->lastQ = queue; in tcp_yeah_cong_avoid()
/linux-4.1.27/Documentation/devicetree/bindings/powerpc/fsl/
Draideng.txt30 There must be a sub-node for each job queue present in RAID Engine
33 - compatible: Should contain "fsl,raideng-v1.0-job-queue" as the value
34 This identifies the job queue interface
35 - reg: offset and length of the register set for job queue
42 compatible = "fsl,raideng-v1.0-job-queue";
49 This node must be a sub-node of job queue node
70 compatible = "fsl,raideng-v1.0-job-queue";
/linux-4.1.27/drivers/media/platform/omap3isp/
Dispvideo.c322 static int isp_video_queue_setup(struct vb2_queue *queue, in isp_video_queue_setup() argument
327 struct isp_video_fh *vfh = vb2_get_drv_priv(queue); in isp_video_queue_setup()
413 video->ops->queue(video, buffer); in isp_video_buffer_queue()
573 vb2_discard_done(video->queue); in omap3isp_video_resume()
580 video->ops->queue(video, buf); in omap3isp_video_resume()
828 ret = vb2_reqbufs(&vfh->queue, rb); in isp_video_reqbufs()
842 ret = vb2_querybuf(&vfh->queue, b); in isp_video_querybuf()
856 ret = vb2_qbuf(&vfh->queue, b); in isp_video_qbuf()
870 ret = vb2_dqbuf(&vfh->queue, b, file->f_flags & O_NONBLOCK); in isp_video_dqbuf()
1067 video->queue = &vfh->queue; in isp_video_streamon()
[all …]
Dispvideo.h148 int(*queue)(struct isp_video *video, struct isp_buffer *buffer); member
175 struct vb2_queue *queue; member
189 struct vb2_queue queue; member
196 container_of(q, struct isp_video_fh, queue)
/linux-4.1.27/drivers/scsi/ibmvscsi/
Dibmvscsi.c152 static void ibmvscsi_release_crq_queue(struct crq_queue *queue, in ibmvscsi_release_crq_queue() argument
166 queue->msg_token, in ibmvscsi_release_crq_queue()
167 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); in ibmvscsi_release_crq_queue()
168 free_page((unsigned long)queue->msgs); in ibmvscsi_release_crq_queue()
178 static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue) in crq_queue_next_crq() argument
183 spin_lock_irqsave(&queue->lock, flags); in crq_queue_next_crq()
184 crq = &queue->msgs[queue->cur]; in crq_queue_next_crq()
186 if (++queue->cur == queue->size) in crq_queue_next_crq()
187 queue->cur = 0; in crq_queue_next_crq()
195 spin_unlock_irqrestore(&queue->lock, flags); in crq_queue_next_crq()
[all …]
/linux-4.1.27/include/crypto/internal/
Dskcipher.h76 struct crypto_queue *queue, struct skcipher_givcrypt_request *request) in skcipher_enqueue_givcrypt() argument
78 return ablkcipher_enqueue_request(queue, &request->creq); in skcipher_enqueue_givcrypt()
82 struct crypto_queue *queue) in skcipher_dequeue_givcrypt() argument
84 return skcipher_givcrypt_cast(crypto_dequeue_request(queue)); in skcipher_dequeue_givcrypt()
Dhash.h182 static inline int ahash_enqueue_request(struct crypto_queue *queue, in ahash_enqueue_request() argument
185 return crypto_enqueue_request(queue, &request->base); in ahash_enqueue_request()
189 struct crypto_queue *queue) in ahash_dequeue_request() argument
191 return ahash_request_cast(crypto_dequeue_request(queue)); in ahash_dequeue_request()
194 static inline int ahash_tfm_in_queue(struct crypto_queue *queue, in ahash_tfm_in_queue() argument
197 return crypto_tfm_in_queue(queue, crypto_ahash_tfm(tfm)); in ahash_tfm_in_queue()
/linux-4.1.27/drivers/infiniband/hw/cxgb4/
Dt4.h287 union t4_wr *queue; member
313 union t4_recv_wr *queue; member
380 return wq->rq.queue[wq->rq.size].status.host_wq_pidx; in t4_rq_host_wq_pidx()
430 return wq->sq.queue[wq->sq.size].status.host_wq_pidx; in t4_sq_host_wq_pidx()
504 return wq->rq.queue[wq->rq.size].status.qp_err; in t4_wq_in_error()
509 wq->rq.queue[wq->rq.size].status.qp_err = 1; in t4_set_wq_in_error()
514 wq->rq.queue[wq->rq.size].status.db_off = 1; in t4_disable_wq_db()
519 wq->rq.queue[wq->rq.size].status.db_off = 0; in t4_enable_wq_db()
524 return !wq->rq.queue[wq->rq.size].status.db_off; in t4_wq_db_enabled()
532 struct t4_cqe *queue; member
[all …]
/linux-4.1.27/drivers/usb/isp1760/
Disp1760-udc.c31 struct list_head queue; member
238 list_del(&req->queue); in isp1760_udc_receive()
303 if (list_empty(&ep->queue)) { in isp1760_ep_rx_ready()
311 req = list_first_entry(&ep->queue, struct isp1760_request, in isp1760_ep_rx_ready()
312 queue); in isp1760_ep_rx_ready()
337 if (list_empty(&ep->queue)) { in isp1760_ep_tx_complete()
355 req = list_first_entry(&ep->queue, struct isp1760_request, in isp1760_ep_tx_complete()
356 queue); in isp1760_ep_tx_complete()
374 list_del(&req->queue); in isp1760_ep_tx_complete()
379 if (!list_empty(&ep->queue)) in isp1760_ep_tx_complete()
[all …]
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4/
Dcxgb4_uld.h164 unsigned int queue);
167 unsigned int queue);
169 unsigned int queue, bool ipv6);
172 unsigned int queue,
175 unsigned int queue, bool ipv6);
177 static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue) in set_wr_txq() argument
179 skb_set_queue_mapping(skb, (queue << 1) | prio); in set_wr_txq()
/linux-4.1.27/drivers/staging/rtl8188eu/include/
Dosdep_service.h61 struct list_head queue; member
65 static inline struct list_head *get_list_head(struct __queue *queue) in get_list_head() argument
67 return &(queue->queue); in get_list_head()
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/
Dhost.fuc39 // HOST HOST->PWR queue description
40 .equ #fifo_qlen 4 // log2(size of queue entry in bytes)
41 .equ #fifo_qnum 3 // log2(max number of entries in queue)
42 .equ #fifo_qmaskb (1 << #fifo_qnum) // max number of entries in queue
48 // HOST PWR->HOST queue description
49 .equ #rfifo_qlen 4 // log2(size of queue entry in bytes)
50 .equ #rfifo_qnum 3 // log2(max number of entries in queue)
51 .equ #rfifo_qmaskb (1 << #rfifo_qnum) // max number of entries in queue
/linux-4.1.27/drivers/hid/
Dhid-wiimote-core.c46 struct wiimote_queue *queue = container_of(work, struct wiimote_queue, in wiimote_queue_worker() local
48 struct wiimote_data *wdata = container_of(queue, struct wiimote_data, in wiimote_queue_worker()
49 queue); in wiimote_queue_worker()
53 spin_lock_irqsave(&wdata->queue.lock, flags); in wiimote_queue_worker()
55 while (wdata->queue.head != wdata->queue.tail) { in wiimote_queue_worker()
56 spin_unlock_irqrestore(&wdata->queue.lock, flags); in wiimote_queue_worker()
58 wdata->queue.outq[wdata->queue.tail].data, in wiimote_queue_worker()
59 wdata->queue.outq[wdata->queue.tail].size); in wiimote_queue_worker()
65 spin_lock_irqsave(&wdata->queue.lock, flags); in wiimote_queue_worker()
67 wdata->queue.tail = (wdata->queue.tail + 1) % WIIMOTE_BUFSIZE; in wiimote_queue_worker()
[all …]
/linux-4.1.27/drivers/net/ethernet/intel/i40e/
Di40e_lan_hmc.h171 u16 queue);
173 u16 queue,
176 u16 queue);
178 u16 queue,
/linux-4.1.27/include/net/netfilter/
Dnf_queue.h83 nfqueue_hash(const struct sk_buff *skb, u16 queue, u16 queues_total, u8 family, in nfqueue_hash() argument
87 queue += ((u64) hash_v4(skb, jhash_initval) * queues_total) >> 32; in nfqueue_hash()
90 queue += ((u64) hash_v6(skb, jhash_initval) * queues_total) >> 32; in nfqueue_hash()
93 return queue; in nfqueue_hash()
/linux-4.1.27/drivers/staging/media/omap4iss/
Diss_video.h143 int (*queue)(struct iss_video *video, struct iss_buffer *buffer); member
169 struct vb2_queue *queue; member
183 struct vb2_queue queue; member
190 container_of(q, struct iss_video_fh, queue)
Diss_video.c381 video->ops->queue(video, buffer); in iss_video_buf_queue()
509 vb2_queue_error(video->queue); in omap4iss_video_cancel_stream()
753 return vb2_reqbufs(&vfh->queue, rb); in iss_video_reqbufs()
761 return vb2_querybuf(&vfh->queue, b); in iss_video_querybuf()
769 return vb2_qbuf(&vfh->queue, b); in iss_video_qbuf()
777 return vb2_expbuf(&vfh->queue, e); in iss_video_expbuf()
785 return vb2_dqbuf(&vfh->queue, b, file->f_flags & O_NONBLOCK); in iss_video_dqbuf()
901 video->queue = &vfh->queue; in iss_video_streamon()
906 ret = vb2_streamon(&vfh->queue, type); in iss_video_streamon()
931 vb2_streamoff(&vfh->queue, type); in iss_video_streamon()
[all …]
/linux-4.1.27/drivers/net/ethernet/intel/i40evf/
Di40e_lan_hmc.h171 u16 queue);
173 u16 queue,
176 u16 queue);
178 u16 queue,
/linux-4.1.27/include/linux/
Dtcp.h381 struct request_sock_queue *queue = in fastopen_init_queue() local
384 if (queue->fastopenq == NULL) { in fastopen_init_queue()
385 queue->fastopenq = kzalloc( in fastopen_init_queue()
388 if (queue->fastopenq == NULL) in fastopen_init_queue()
392 spin_lock_init(&queue->fastopenq->lock); in fastopen_init_queue()
394 queue->fastopenq->max_qlen = backlog; in fastopen_init_queue()
/linux-4.1.27/Documentation/scsi/
Dhptiop.txt84 All queued requests are handled via inbound/outbound queue port.
89 - Get a free request packet by reading the inbound queue port or
92 The value returned from the inbound queue port is an offset
99 - Post the packet to IOP by writing it to inbound queue. For requests
100 allocated in IOP memory, write the offset to inbound queue port. For
102 to the inbound queue port.
105 will be put into outbound queue. An outbound interrupt will be
109 outbound queue.
112 is posted to the outbound queue. If IOP_REQUEST_FLAG_OUTPUT_CONTEXT
116 - The host read the outbound queue and complete the request.
[all …]
/linux-4.1.27/drivers/gpu/drm/atmel-hlcdc/
Datmel_hlcdc_layer.c163 dma->queue = fb_flip; in atmel_hlcdc_layer_update_apply()
199 flip = dma->queue ? dma->queue : dma->cur; in atmel_hlcdc_layer_irq()
266 dma->cur = dma->queue; in atmel_hlcdc_layer_irq()
267 dma->queue = NULL; in atmel_hlcdc_layer_irq()
279 if (dma->queue) in atmel_hlcdc_layer_irq()
281 dma->queue); in atmel_hlcdc_layer_irq()
288 dma->queue = NULL; in atmel_hlcdc_layer_irq()
291 if (!dma->queue) { in atmel_hlcdc_layer_irq()
326 if (dma->queue) { in atmel_hlcdc_layer_disable()
327 atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->queue); in atmel_hlcdc_layer_disable()
[all …]
/linux-4.1.27/drivers/media/platform/s5p-mfc/
Ds5p_mfc_intr.c28 ret = wait_event_interruptible_timeout(dev->queue, in s5p_mfc_wait_for_done_dev()
60 ret = wait_event_interruptible_timeout(ctx->queue, in s5p_mfc_wait_for_done_ctx()
65 ret = wait_event_timeout(ctx->queue, in s5p_mfc_wait_for_done_ctx()
/linux-4.1.27/drivers/usb/musb/
Dmusb_host.h141 struct list_head *queue; in next_urb() local
145 queue = &qh->hep->urb_list; in next_urb()
146 if (list_empty(queue)) in next_urb()
148 return list_entry(queue->next, struct urb, urb_list); in next_urb()
Dmusb_gadget.h134 struct list_head *queue = &ep->req_list; in next_request() local
136 if (list_empty(queue)) in next_request()
138 return container_of(queue->next, struct musb_request, list); in next_request()
/linux-4.1.27/drivers/net/ethernet/freescale/
Ducc_geth_ethtool.c219 int queue = 0; in uec_get_ringparam() local
226 ring->rx_pending = ug_info->bdRingLenRx[queue]; in uec_get_ringparam()
227 ring->rx_mini_pending = ug_info->bdRingLenRx[queue]; in uec_get_ringparam()
228 ring->rx_jumbo_pending = ug_info->bdRingLenRx[queue]; in uec_get_ringparam()
229 ring->tx_pending = ug_info->bdRingLenTx[queue]; in uec_get_ringparam()
238 int queue = 0, ret = 0; in uec_set_ringparam() local
256 ug_info->bdRingLenRx[queue] = ring->rx_pending; in uec_set_ringparam()
257 ug_info->bdRingLenTx[queue] = ring->tx_pending; in uec_set_ringparam()
/linux-4.1.27/drivers/ide/
Dide-pm.c21 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); in generic_ide_suspend()
29 ret = blk_execute_rq(drive->queue, NULL, rq, 0); in generic_ide_suspend()
61 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); in generic_ide_resume()
68 err = blk_execute_rq(drive->queue, NULL, rq, 1); in generic_ide_resume()
184 struct request_queue *q = drive->queue; in ide_complete_pm_rq()
229 struct request_queue *q = drive->queue; in ide_check_pm_state()
/linux-4.1.27/drivers/virtio/
Dvirtio_pci_legacy.c138 info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); in setup_vq()
139 if (info->queue == NULL) in setup_vq()
143 iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT, in setup_vq()
149 true, info->queue, vp_notify, callback, name); in setup_vq()
172 free_pages_exact(info->queue, size); in setup_vq()
197 free_pages_exact(info->queue, size); in del_vq()
/linux-4.1.27/drivers/media/pci/cx23885/
Dcx23885-vbi.c206 list_add_tail(&buf->queue, &q->active); in buffer_queue()
214 queue); in buffer_queue()
216 list_add_tail(&buf->queue, &q->active); in buffer_queue()
229 struct cx23885_buffer, queue); in cx23885_start_streaming()
245 struct cx23885_buffer, queue); in cx23885_stop_streaming()
247 list_del(&buf->queue); in cx23885_stop_streaming()
/linux-4.1.27/drivers/staging/rtl8712/
Drtl871x_cmd.c122 static sint _enqueue_cmd(struct __queue *queue, struct cmd_obj *obj) in _enqueue_cmd() argument
128 spin_lock_irqsave(&queue->lock, irqL); in _enqueue_cmd()
129 list_add_tail(&obj->list, &queue->queue); in _enqueue_cmd()
130 spin_unlock_irqrestore(&queue->lock, irqL); in _enqueue_cmd()
134 static struct cmd_obj *_dequeue_cmd(struct __queue *queue) in _dequeue_cmd() argument
139 spin_lock_irqsave(&(queue->lock), irqL); in _dequeue_cmd()
140 if (list_empty(&(queue->queue))) in _dequeue_cmd()
143 obj = LIST_CONTAINOR(queue->queue.next, in _dequeue_cmd()
147 spin_unlock_irqrestore(&(queue->lock), irqL); in _dequeue_cmd()
185 struct __queue *queue; in r8712_enqueue_cmd_ex() local
[all …]
/linux-4.1.27/drivers/block/zram/
Dzram_drv.c979 static void zram_make_request(struct request_queue *queue, struct bio *bio) in zram_make_request() argument
981 struct zram *zram = queue->queuedata; in zram_make_request()
1163 struct request_queue *queue; in create_device() local
1168 queue = blk_alloc_queue(GFP_KERNEL); in create_device()
1169 if (!queue) { in create_device()
1175 blk_queue_make_request(queue, zram_make_request); in create_device()
1189 zram->disk->queue = queue; in create_device()
1190 zram->disk->queue->queuedata = zram; in create_device()
1197 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue); in create_device()
1198 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue); in create_device()
[all …]
/linux-4.1.27/drivers/net/wimax/i2400m/
Drx.c491 struct sk_buff_head queue; member
500 skb_queue_head_init(&roq->queue); in __i2400m_roq_init()
676 if (skb_queue_empty(&roq->queue)) { in __i2400m_roq_queue()
678 __skb_queue_head(&roq->queue, skb); in __i2400m_roq_queue()
682 skb_itr = skb_peek_tail(&roq->queue); in __i2400m_roq_queue()
689 __skb_queue_tail(&roq->queue, skb); in __i2400m_roq_queue()
698 skb_queue_walk(&roq->queue, skb_itr) { in __i2400m_roq_queue()
706 __skb_queue_before(&roq->queue, skb_itr, skb); in __i2400m_roq_queue()
715 skb_queue_walk(&roq->queue, skb_itr) { in __i2400m_roq_queue()
756 skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) { in __i2400m_roq_update_ws()
[all …]
/linux-4.1.27/drivers/md/
Draid0.c102 rdev1->bdev->bd_disk->queue)); in create_strip_zones()
206 if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) in create_strip_zones()
430 if (mddev->queue) { in raid0_run()
434 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); in raid0_run()
435 blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); in raid0_run()
436 blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); in raid0_run()
438 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); in raid0_run()
439 blk_queue_io_opt(mddev->queue, in raid0_run()
449 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); in raid0_run()
451 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); in raid0_run()
[all …]
Ddm-cache-policy-mq.c132 struct queue { struct
140 static void queue_init(struct queue *q) in queue_init() argument
155 static unsigned queue_size(struct queue *q) in queue_size()
160 static bool queue_empty(struct queue *q) in queue_empty()
168 static void queue_push(struct queue *q, unsigned level, struct list_head *elt) in queue_push()
174 static void queue_remove(struct queue *q, struct list_head *elt) in queue_remove()
180 static bool is_sentinel(struct queue *q, struct list_head *h) in is_sentinel()
189 static struct list_head *queue_peek(struct queue *q) in queue_peek()
202 static struct list_head *queue_pop(struct queue *q) in queue_pop()
217 static struct list_head *queue_pop_old(struct queue *q) in queue_pop_old()
[all …]
/linux-4.1.27/drivers/char/
Dapm-emulation.c119 struct apm_queue queue; member
203 queue_add_event(&as->queue, event); in queue_event()
218 if (queue_empty(&as->queue) && fp->f_flags & O_NONBLOCK) in apm_read()
221 wait_event_interruptible(apm_waitqueue, !queue_empty(&as->queue)); in apm_read()
223 while ((i >= sizeof(event)) && !queue_empty(&as->queue)) { in apm_read()
224 event = queue_get_event(&as->queue); in apm_read()
251 return queue_empty(&as->queue) ? 0 : POLLIN | POLLRDNORM; in apm_poll()
557 queue_add_event(&as->queue, apm_event); in apm_suspend_notifier()
/linux-4.1.27/drivers/infiniband/hw/ipath/
Dipath_cq.c61 wc = cq->queue; in ipath_cq_enter()
145 wc = cq->queue; in ipath_poll_cq()
290 cq->queue = wc; in ipath_create_cq()
326 vfree(cq->queue); in ipath_destroy_cq()
357 cq->queue->head != cq->queue->tail) in ipath_req_notify_cq()
413 old_wc = cq->queue; in ipath_resize_cq()
441 cq->queue = wc; in ipath_resize_cq()
/linux-4.1.27/drivers/net/wireless/p54/
Dp54.h104 #define P54_SET_QUEUE(queue, ai_fs, cw_min, cw_max, _txop) \ argument
106 queue.aifs = cpu_to_le16(ai_fs); \
107 queue.cwmin = cpu_to_le16(cw_min); \
108 queue.cwmax = cpu_to_le16(cw_max); \
109 queue.txop = cpu_to_le16(_txop); \
Dtxrx.c189 struct p54_tx_queue_stats *queue; in p54_tx_qos_accounting_alloc() local
195 queue = &priv->tx_stats[p54_queue]; in p54_tx_qos_accounting_alloc()
198 if (unlikely(queue->len >= queue->limit && IS_QOS_QUEUE(p54_queue))) { in p54_tx_qos_accounting_alloc()
203 queue->len++; in p54_tx_qos_accounting_alloc()
204 queue->count++; in p54_tx_qos_accounting_alloc()
206 if (unlikely(queue->len == queue->limit && IS_QOS_QUEUE(p54_queue))) { in p54_tx_qos_accounting_alloc()
684 u8 *queue, u32 *extra_len, u16 *flags, u16 *aid, in p54_tx_80211_header() argument
703 *queue = skb_get_queue_mapping(skb) + P54_QUEUE_DATA; in p54_tx_80211_header()
723 *queue = P54_QUEUE_CAB; in p54_tx_80211_header()
747 *queue = P54_QUEUE_BEACON; in p54_tx_80211_header()
[all …]
/linux-4.1.27/include/drm/
Ddrm_os_linux.h43 #define DRM_WAIT_ON( ret, queue, timeout, condition ) \ argument
47 add_wait_queue(&(queue), &entry); \
64 remove_wait_queue(&(queue), &entry); \
/linux-4.1.27/drivers/atm/
Dfirestream.c588 static inline struct FS_QENTRY *get_qentry (struct fs_dev *dev, struct queue *q) in get_qentry()
594 static void submit_qentry (struct fs_dev *dev, struct queue *q, struct FS_QENTRY *qe) in submit_qentry()
638 static void submit_queue (struct fs_dev *dev, struct queue *q, in submit_queue()
665 static void submit_command (struct fs_dev *dev, struct queue *q, in submit_command()
677 static void process_return_queue (struct fs_dev *dev, struct queue *q) in process_return_queue()
703 static void process_txdone_queue (struct fs_dev *dev, struct queue *q) in process_txdone_queue()
773 static void process_incoming (struct fs_dev *dev, struct queue *q) in process_incoming()
1401 static int init_q(struct fs_dev *dev, struct queue *txq, int queue, in init_q() argument
1410 queue, nentries); in init_q()
1417 write_fs (dev, Q_SA(queue), virt_to_bus(p)); in init_q()
[all …]
Didt77252.c95 struct sk_buff *, int queue);
100 static void add_rx_skb(struct idt77252_dev *, int queue,
582 sb_pool_add(struct idt77252_dev *card, struct sk_buff *skb, int queue) in sb_pool_add() argument
584 struct sb_pool *pool = &card->sbpool[queue]; in sb_pool_add()
595 IDT77252_PRV_POOL(skb) = POOL_HANDLE(queue, index); in sb_pool_add()
604 unsigned int queue, index; in sb_pool_remove() local
609 queue = POOL_QUEUE(handle); in sb_pool_remove()
610 if (queue > 3) in sb_pool_remove()
617 card->sbpool[queue].skb[index] = NULL; in sb_pool_remove()
623 unsigned int queue, index; in sb_pool_skb() local
[all …]
/linux-4.1.27/drivers/net/wireless/prism54/
Disl_38xx.c223 isl38xx_in_queue(isl38xx_control_block *cb, int queue) in isl38xx_in_queue() argument
225 const s32 delta = (le32_to_cpu(cb->driver_curr_frag[queue]) - in isl38xx_in_queue()
226 le32_to_cpu(cb->device_curr_frag[queue])); in isl38xx_in_queue()
233 switch (queue) { in isl38xx_in_queue()
/linux-4.1.27/drivers/media/common/saa7146/
Dsaa7146_fops.c83 list_add_tail(&buf->vb.queue,&q->queue); in saa7146_buffer_queue()
124 if (!list_empty(&q->queue)) { in saa7146_buffer_next()
126 buf = list_entry(q->queue.next,struct saa7146_buf,vb.queue); in saa7146_buffer_next()
127 list_del(&buf->vb.queue); in saa7146_buffer_next()
128 if (!list_empty(&q->queue)) in saa7146_buffer_next()
129 next = list_entry(q->queue.next,struct saa7146_buf, vb.queue); in saa7146_buffer_next()
132 buf, q->queue.prev, q->queue.next); in saa7146_buffer_next()
/linux-4.1.27/arch/powerpc/boot/dts/
Dp1020rdb-pc_camp_core1.dts111 35 36 40 /* enet1-queue-group0 */
112 51 52 67 /* enet1-queue-group1 */
113 31 32 33 /* enet2-queue-group0 */
114 25 26 27 /* enet2-queue-group1 */
/linux-4.1.27/drivers/spi/
Dspi-txx9.c78 struct list_head queue; member
287 while (!list_empty(&c->queue)) { in txx9spi_work()
290 m = container_of(c->queue.next, struct spi_message, queue); in txx9spi_work()
291 list_del_init(&m->queue); in txx9spi_work()
317 list_add_tail(&m->queue, &c->queue); in txx9spi_transfer()
341 INIT_LIST_HEAD(&c->queue); in txx9spi_probe()
Dspi-mpc52xx.c76 struct list_head queue; /* queue of pending messages */ member
155 if (list_empty(&ms->queue)) in mpc52xx_spi_fsmstate_idle()
159 ms->message = list_first_entry(&ms->queue, struct spi_message, queue); in mpc52xx_spi_fsmstate_idle()
160 list_del_init(&ms->message->queue); in mpc52xx_spi_fsmstate_idle()
370 list_add_tail(&m->queue, &ms->queue); in mpc52xx_spi_transfer()
471 INIT_LIST_HEAD(&ms->queue); in mpc52xx_spi_probe()
/linux-4.1.27/security/integrity/ima/
Dima_queue.c37 .queue[0 ... IMA_MEASURE_HTABLE_SIZE - 1] = HLIST_HEAD_INIT
55 hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) { in ima_lookup_digest_entry()
88 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]); in ima_add_digest_entry()
/linux-4.1.27/drivers/net/fddi/skfp/h/
Dhwmtm.h243 #define HWM_GET_TX_USED(smc,queue) (int) (smc)->hw.fp.tx_q[queue].tx_used argument
261 #define HWM_GET_CURR_TXD(smc,queue) (struct s_smt_fp_txd volatile *)\ argument
262 (smc)->hw.fp.tx_q[queue].tx_curr_put
/linux-4.1.27/lib/
DKconfig.kmemcheck53 int "kmemcheck: error queue size"
57 Select the maximum number of errors to store in the queue. Since
60 other faults. The queue will be emptied as soon as a tasklet may
61 be scheduled. If the queue is full, new error reports will be
71 the queue. These bytes indicate what parts of an allocation are
/linux-4.1.27/drivers/usb/host/
Duhci-q.c261 INIT_LIST_HEAD(&qh->queue); in uhci_alloc_qh()
296 if (!list_empty(&qh->queue)) in uhci_free_qh()
335 if (qh->queue.next != &urbp->node) { in uhci_cleanup_queue()
384 urbp = list_entry(qh->queue.next, struct urb_priv, node); in uhci_fixup_toggles()
394 urbp = list_prepare_entry(urbp, &qh->queue, node); in uhci_fixup_toggles()
395 list_for_each_entry_continue(urbp, &qh->queue, node) { in uhci_fixup_toggles()
416 pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe; in uhci_fixup_toggles()
483 WARN_ON(list_empty(&qh->queue)); in uhci_activate_qh()
488 struct urb_priv *urbp = list_entry(qh->queue.next, in uhci_activate_qh()
1293 if (list_empty(&qh->queue)) { in uhci_submit_isochronous()
[all …]
/linux-4.1.27/drivers/staging/lustre/lustre/llite/
Drw26.c233 struct cl_2queue *queue; in ll_direct_rw_pages() local
245 queue = &io->ci_queue; in ll_direct_rw_pages()
246 cl_2queue_init(queue); in ll_direct_rw_pages()
301 cl_2queue_add(queue, clp); in ll_direct_rw_pages()
321 queue, 0); in ll_direct_rw_pages()
326 cl_2queue_discard(env, io, queue); in ll_direct_rw_pages()
327 cl_2queue_disown(env, io, queue); in ll_direct_rw_pages()
328 cl_2queue_fini(env, queue); in ll_direct_rw_pages()

123456