/linux-4.4.14/drivers/media/usb/uvc/ |
D | uvc_queue.c | 41 uvc_queue_to_stream(struct uvc_video_queue *queue) in uvc_queue_to_stream() argument 43 return container_of(queue, struct uvc_streaming, queue); in uvc_queue_to_stream() 51 static void uvc_queue_return_buffers(struct uvc_video_queue *queue, in uvc_queue_return_buffers() argument 58 while (!list_empty(&queue->irqqueue)) { in uvc_queue_return_buffers() 59 struct uvc_buffer *buf = list_first_entry(&queue->irqqueue, in uvc_queue_return_buffers() 61 queue); in uvc_queue_return_buffers() 62 list_del(&buf->queue); in uvc_queue_return_buffers() 77 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); in uvc_queue_setup() local 78 struct uvc_streaming *stream = uvc_queue_to_stream(queue); in uvc_queue_setup() 95 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); in uvc_buffer_prepare() local [all …]
|
D | uvcvideo.h | 358 struct list_head queue; member 374 struct vb2_queue queue; member 472 struct uvc_video_queue queue; member 623 extern int uvc_queue_init(struct uvc_video_queue *queue, 625 extern void uvc_queue_release(struct uvc_video_queue *queue); 626 extern int uvc_request_buffers(struct uvc_video_queue *queue, 628 extern int uvc_query_buffer(struct uvc_video_queue *queue, 630 extern int uvc_create_buffers(struct uvc_video_queue *queue, 632 extern int uvc_queue_buffer(struct uvc_video_queue *queue, 634 extern int uvc_export_buffer(struct uvc_video_queue *queue, [all …]
|
D | uvc_isight.c | 39 static int isight_decode(struct uvc_video_queue *queue, struct uvc_buffer *buf, in isight_decode() argument 123 ret = isight_decode(&stream->queue, buf, in uvc_video_decode_isight() 133 buf = uvc_queue_next_buffer(&stream->queue, in uvc_video_decode_isight()
|
/linux-4.4.14/drivers/usb/gadget/function/ |
D | uvc_queue.c | 48 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); in uvc_queue_setup() local 49 struct uvc_video *video = container_of(queue, struct uvc_video, queue); in uvc_queue_setup() 63 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); in uvc_buffer_prepare() local 73 if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED)) in uvc_buffer_prepare() 89 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); in uvc_buffer_queue() local 94 spin_lock_irqsave(&queue->irqlock, flags); in uvc_buffer_queue() 96 if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) { in uvc_buffer_queue() 97 list_add_tail(&buf->queue, &queue->irqqueue); in uvc_buffer_queue() 106 spin_unlock_irqrestore(&queue->irqlock, flags); in uvc_buffer_queue() 117 int uvcg_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type, in uvcg_queue_init() argument [all …]
|
D | uvc_queue.h | 30 struct list_head queue; member 43 struct vb2_queue queue; member 54 static inline int uvc_queue_streaming(struct uvc_video_queue *queue) in uvc_queue_streaming() argument 56 return vb2_is_streaming(&queue->queue); in uvc_queue_streaming() 59 int uvcg_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type, 62 void uvcg_free_buffers(struct uvc_video_queue *queue); 64 int uvcg_alloc_buffers(struct uvc_video_queue *queue, 67 int uvcg_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf); 69 int uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf); 71 int uvcg_dequeue_buffer(struct uvc_video_queue *queue, [all …]
|
D | uvc_video.c | 37 if (buf->bytesused - video->queue.buf_used <= len - 2) in uvc_video_encode_header() 47 struct uvc_video_queue *queue = &video->queue; in uvc_video_encode_data() local 52 mem = buf->mem + queue->buf_used; in uvc_video_encode_data() 53 nbytes = min((unsigned int)len, buf->bytesused - queue->buf_used); in uvc_video_encode_data() 56 queue->buf_used += nbytes; in uvc_video_encode_data() 87 if (buf->bytesused == video->queue.buf_used) { in uvc_video_encode_bulk() 88 video->queue.buf_used = 0; in uvc_video_encode_bulk() 90 uvcg_queue_next_buffer(&video->queue, buf); in uvc_video_encode_bulk() 97 buf->bytesused == video->queue.buf_used) in uvc_video_encode_bulk() 120 if (buf->bytesused == video->queue.buf_used) { in uvc_video_encode_isoc() [all …]
|
D | uvc_v4l2.c | 152 if (b->type != video->queue.queue.type) in uvc_v4l2_reqbufs() 155 return uvcg_alloc_buffers(&video->queue, b); in uvc_v4l2_reqbufs() 165 return uvcg_query_buffer(&video->queue, b); in uvc_v4l2_querybuf() 176 ret = uvcg_queue_buffer(&video->queue, b); in uvc_v4l2_qbuf() 190 return uvcg_dequeue_buffer(&video->queue, b, file->f_flags & O_NONBLOCK); in uvc_v4l2_dqbuf() 201 if (type != video->queue.queue.type) in uvc_v4l2_streamon() 226 if (type != video->queue.queue.type) in uvc_v4l2_streamoff() 317 uvcg_free_buffers(&video->queue); in uvc_v4l2_release() 334 return uvcg_queue_mmap(&uvc->video.queue, vma); in uvc_v4l2_mmap() 343 return uvcg_queue_poll(&uvc->video.queue, file, wait); in uvc_v4l2_poll() [all …]
|
/linux-4.4.14/drivers/net/wireless/cw1200/ |
D | queue.c | 29 static inline void __cw1200_queue_lock(struct cw1200_queue *queue) in __cw1200_queue_lock() argument 31 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_lock() 32 if (queue->tx_locked_cnt++ == 0) { in __cw1200_queue_lock() 34 queue->queue_id); in __cw1200_queue_lock() 35 ieee80211_stop_queue(stats->priv->hw, queue->queue_id); in __cw1200_queue_lock() 39 static inline void __cw1200_queue_unlock(struct cw1200_queue *queue) in __cw1200_queue_unlock() argument 41 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_unlock() 42 BUG_ON(!queue->tx_locked_cnt); in __cw1200_queue_unlock() 43 if (--queue->tx_locked_cnt == 0) { in __cw1200_queue_unlock() 45 queue->queue_id); in __cw1200_queue_unlock() [all …]
|
D | queue.h | 36 struct list_head queue; member 71 int cw1200_queue_init(struct cw1200_queue *queue, 76 int cw1200_queue_clear(struct cw1200_queue *queue); 78 void cw1200_queue_deinit(struct cw1200_queue *queue); 80 size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue, 82 int cw1200_queue_put(struct cw1200_queue *queue, 85 int cw1200_queue_get(struct cw1200_queue *queue, 90 int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id); 91 int cw1200_queue_requeue_all(struct cw1200_queue *queue); 92 int cw1200_queue_remove(struct cw1200_queue *queue, [all …]
|
/linux-4.4.14/drivers/net/wireless/b43legacy/ |
D | pio.c | 35 static void tx_start(struct b43legacy_pioqueue *queue) in tx_start() argument 37 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_start() 41 static void tx_octet(struct b43legacy_pioqueue *queue, in tx_octet() argument 44 if (queue->need_workarounds) { in tx_octet() 45 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet() 46 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet() 49 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet() 51 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet() 76 static void tx_data(struct b43legacy_pioqueue *queue, in tx_data() argument 84 if (queue->need_workarounds) { in tx_data() [all …]
|
D | pio.h | 42 struct b43legacy_pioqueue *queue; member 48 (packet)->queue->tx_packets_cache)) 83 u16 b43legacy_pio_read(struct b43legacy_pioqueue *queue, in b43legacy_pio_read() argument 86 return b43legacy_read16(queue->dev, queue->mmio_base + offset); in b43legacy_pio_read() 90 void b43legacy_pio_write(struct b43legacy_pioqueue *queue, in b43legacy_pio_write() argument 93 b43legacy_write16(queue->dev, queue->mmio_base + offset, value); in b43legacy_pio_write() 105 void b43legacy_pio_rx(struct b43legacy_pioqueue *queue); 108 void b43legacy_pio_tx_suspend(struct b43legacy_pioqueue *queue); 109 void b43legacy_pio_tx_resume(struct b43legacy_pioqueue *queue); 137 void b43legacy_pio_rx(struct b43legacy_pioqueue *queue) in b43legacy_pio_rx() argument [all …]
|
/linux-4.4.14/drivers/net/xen-netback/ |
D | netback.c | 93 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, 96 static void make_tx_response(struct xenvif_queue *queue, 99 static void push_tx_responses(struct xenvif_queue *queue); 101 static inline int tx_work_todo(struct xenvif_queue *queue); 103 static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, 110 static inline unsigned long idx_to_pfn(struct xenvif_queue *queue, in idx_to_pfn() argument 113 return page_to_pfn(queue->mmap_pages[idx]); in idx_to_pfn() 116 static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue, in idx_to_kaddr() argument 119 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx)); in idx_to_kaddr() 160 static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) in xenvif_rx_ring_slots_available() argument [all …]
|
D | interface.c | 54 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, in xenvif_skb_zerocopy_prepare() argument 58 atomic_inc(&queue->inflight_packets); in xenvif_skb_zerocopy_prepare() 61 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) in xenvif_skb_zerocopy_complete() argument 63 atomic_dec(&queue->inflight_packets); in xenvif_skb_zerocopy_complete() 69 wake_up(&queue->dealloc_wq); in xenvif_skb_zerocopy_complete() 81 struct xenvif_queue *queue = dev_id; in xenvif_tx_interrupt() local 83 if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)) in xenvif_tx_interrupt() 84 napi_schedule(&queue->napi); in xenvif_tx_interrupt() 91 struct xenvif_queue *queue = in xenvif_poll() local 99 if (unlikely(queue->vif->disabled)) { in xenvif_poll() [all …]
|
D | xenbus.c | 41 static int connect_rings(struct backend_info *be, struct xenvif_queue *queue); 55 struct xenvif_queue *queue = m->private; in xenvif_read_io_ring() local 56 struct xen_netif_tx_back_ring *tx_ring = &queue->tx; in xenvif_read_io_ring() 57 struct xen_netif_rx_back_ring *rx_ring = &queue->rx; in xenvif_read_io_ring() 63 seq_printf(m, "Queue %d\nTX: nr_ents %u\n", queue->id, in xenvif_read_io_ring() 79 queue->pending_prod, in xenvif_read_io_ring() 80 queue->pending_cons, in xenvif_read_io_ring() 81 nr_pending_reqs(queue)); in xenvif_read_io_ring() 83 queue->dealloc_prod, in xenvif_read_io_ring() 84 queue->dealloc_cons, in xenvif_read_io_ring() [all …]
|
D | common.h | 283 int xenvif_init_queue(struct xenvif_queue *queue); 284 void xenvif_deinit_queue(struct xenvif_queue *queue); 286 int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, 297 int xenvif_queue_stopped(struct xenvif_queue *queue); 298 void xenvif_wake_queue(struct xenvif_queue *queue); 301 void xenvif_unmap_frontend_rings(struct xenvif_queue *queue); 302 int xenvif_map_frontend_rings(struct xenvif_queue *queue, 307 void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue); 312 int xenvif_tx_action(struct xenvif_queue *queue, int budget); 315 void xenvif_kick_thread(struct xenvif_queue *queue); [all …]
|
/linux-4.4.14/drivers/misc/genwqe/ |
D | card_ddcb.c | 91 static int queue_empty(struct ddcb_queue *queue) in queue_empty() argument 93 return queue->ddcb_next == queue->ddcb_act; in queue_empty() 96 static int queue_enqueued_ddcbs(struct ddcb_queue *queue) in queue_enqueued_ddcbs() argument 98 if (queue->ddcb_next >= queue->ddcb_act) in queue_enqueued_ddcbs() 99 return queue->ddcb_next - queue->ddcb_act; in queue_enqueued_ddcbs() 101 return queue->ddcb_max - (queue->ddcb_act - queue->ddcb_next); in queue_enqueued_ddcbs() 104 static int queue_free_ddcbs(struct ddcb_queue *queue) in queue_free_ddcbs() argument 106 int free_ddcbs = queue->ddcb_max - queue_enqueued_ddcbs(queue) - 1; in queue_free_ddcbs() 172 static void print_ddcb_info(struct genwqe_dev *cd, struct ddcb_queue *queue) in print_ddcb_info() argument 183 cd->card_idx, queue->ddcb_act, queue->ddcb_next); in print_ddcb_info() [all …]
|
D | card_debugfs.c | 236 struct ddcb_queue *queue; in genwqe_ddcb_info_show() local 239 queue = &cd->queue; in genwqe_ddcb_info_show() 250 queue->ddcb_max, (long long)queue->ddcb_daddr, in genwqe_ddcb_info_show() 251 (long long)queue->ddcb_daddr + in genwqe_ddcb_info_show() 252 (queue->ddcb_max * DDCB_LENGTH), in genwqe_ddcb_info_show() 253 (long long)queue->ddcb_vaddr, queue->ddcbs_in_flight, in genwqe_ddcb_info_show() 254 queue->ddcbs_max_in_flight, queue->ddcbs_completed, in genwqe_ddcb_info_show() 255 queue->return_on_busy, queue->wait_on_busy, in genwqe_ddcb_info_show() 268 queue->IO_QUEUE_CONFIG, in genwqe_ddcb_info_show() 269 __genwqe_readq(cd, queue->IO_QUEUE_CONFIG), in genwqe_ddcb_info_show() [all …]
|
/linux-4.4.14/drivers/staging/rdma/ehca/ |
D | ipz_pt_fn.c | 51 void *ipz_qpageit_get_inc(struct ipz_queue *queue) in ipz_qpageit_get_inc() argument 53 void *ret = ipz_qeit_get(queue); in ipz_qpageit_get_inc() 54 queue->current_q_offset += queue->pagesize; in ipz_qpageit_get_inc() 55 if (queue->current_q_offset > queue->queue_length) { in ipz_qpageit_get_inc() 56 queue->current_q_offset -= queue->pagesize; in ipz_qpageit_get_inc() 59 if (((u64)ret) % queue->pagesize) { in ipz_qpageit_get_inc() 66 void *ipz_qeit_eq_get_inc(struct ipz_queue *queue) in ipz_qeit_eq_get_inc() argument 68 void *ret = ipz_qeit_get(queue); in ipz_qeit_eq_get_inc() 69 u64 last_entry_in_q = queue->queue_length - queue->qe_size; in ipz_qeit_eq_get_inc() 71 queue->current_q_offset += queue->qe_size; in ipz_qeit_eq_get_inc() [all …]
|
D | ipz_pt_fn.h | 93 static inline void *ipz_qeit_calc(struct ipz_queue *queue, u64 q_offset) in ipz_qeit_calc() argument 96 if (q_offset >= queue->queue_length) in ipz_qeit_calc() 98 current_page = (queue->queue_pages)[q_offset >> EHCA_PAGESHIFT]; in ipz_qeit_calc() 106 static inline void *ipz_qeit_get(struct ipz_queue *queue) in ipz_qeit_get() argument 108 return ipz_qeit_calc(queue, queue->current_q_offset); in ipz_qeit_get() 118 void *ipz_qpageit_get_inc(struct ipz_queue *queue); 126 static inline void *ipz_qeit_get_inc(struct ipz_queue *queue) in ipz_qeit_get_inc() argument 128 void *ret = ipz_qeit_get(queue); in ipz_qeit_get_inc() 129 queue->current_q_offset += queue->qe_size; in ipz_qeit_get_inc() 130 if (queue->current_q_offset >= queue->queue_length) { in ipz_qeit_get_inc() [all …]
|
/linux-4.4.14/arch/arm/mach-ixp4xx/include/mach/ |
D | qmgr.h | 60 void qmgr_set_irq(unsigned int queue, int src, 62 void qmgr_enable_irq(unsigned int queue); 63 void qmgr_disable_irq(unsigned int queue); 70 int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */, 75 int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */, 78 #define qmgr_request_queue(queue, len, nearly_empty_watermark, \ argument 80 __qmgr_request_queue(queue, len, nearly_empty_watermark, \ 84 void qmgr_release_queue(unsigned int queue); 87 static inline void qmgr_put_entry(unsigned int queue, u32 val) in qmgr_put_entry() argument 91 BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */ in qmgr_put_entry() [all …]
|
/linux-4.4.14/drivers/gpu/drm/vmwgfx/ |
D | vmwgfx_marker.c | 37 void vmw_marker_queue_init(struct vmw_marker_queue *queue) in vmw_marker_queue_init() argument 39 INIT_LIST_HEAD(&queue->head); in vmw_marker_queue_init() 40 queue->lag = 0; in vmw_marker_queue_init() 41 queue->lag_time = ktime_get_raw_ns(); in vmw_marker_queue_init() 42 spin_lock_init(&queue->lock); in vmw_marker_queue_init() 45 void vmw_marker_queue_takedown(struct vmw_marker_queue *queue) in vmw_marker_queue_takedown() argument 49 spin_lock(&queue->lock); in vmw_marker_queue_takedown() 50 list_for_each_entry_safe(marker, next, &queue->head, head) { in vmw_marker_queue_takedown() 53 spin_unlock(&queue->lock); in vmw_marker_queue_takedown() 56 int vmw_marker_push(struct vmw_marker_queue *queue, in vmw_marker_push() argument [all …]
|
/linux-4.4.14/drivers/net/ |
D | xen-netfront.c | 202 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue, in xennet_get_rx_skb() argument 206 struct sk_buff *skb = queue->rx_skbs[i]; in xennet_get_rx_skb() 207 queue->rx_skbs[i] = NULL; in xennet_get_rx_skb() 211 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue, in xennet_get_rx_ref() argument 215 grant_ref_t ref = queue->grant_rx_ref[i]; in xennet_get_rx_ref() 216 queue->grant_rx_ref[i] = GRANT_INVALID_REF; in xennet_get_rx_ref() 232 struct netfront_queue *queue = (struct netfront_queue *)data; in rx_refill_timeout() local 233 napi_schedule(&queue->napi); in rx_refill_timeout() 236 static int netfront_tx_slot_available(struct netfront_queue *queue) in netfront_tx_slot_available() argument 238 return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < in netfront_tx_slot_available() [all …]
|
D | eql.c | 140 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave); 147 spin_lock(&eql->queue.lock); in eql_timer() 148 head = &eql->queue.all_slaves; in eql_timer() 157 eql_kill_one_slave(&eql->queue, slave); in eql_timer() 161 spin_unlock(&eql->queue.lock); in eql_timer() 186 spin_lock_init(&eql->queue.lock); in eql_setup() 187 INIT_LIST_HEAD(&eql->queue.all_slaves); in eql_setup() 188 eql->queue.master_dev = dev; in eql_setup() 213 BUG_ON(!list_empty(&eql->queue.all_slaves)); in eql_open() 223 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave) in eql_kill_one_slave() argument [all …]
|
D | vrf.c | 59 struct slave_queue queue; member 627 static struct slave *__vrf_find_slave_dev(struct slave_queue *queue, in __vrf_find_slave_dev() argument 630 struct list_head *head = &queue->all_slaves; in __vrf_find_slave_dev() 642 static void __vrf_remove_slave(struct slave_queue *queue, struct slave *slave) in __vrf_remove_slave() argument 647 static void __vrf_insert_slave(struct slave_queue *queue, struct slave *slave) in __vrf_insert_slave() argument 649 list_add(&slave->list, &queue->all_slaves); in __vrf_insert_slave() 656 struct slave_queue *queue = &vrf->queue; in do_vrf_add_slave() local 678 __vrf_insert_slave(queue, slave); in do_vrf_add_slave() 702 struct slave_queue *queue = &vrf->queue; in do_vrf_del_slave() local 712 slave = __vrf_find_slave_dev(queue, port_dev); in do_vrf_del_slave() [all …]
|
/linux-4.4.14/drivers/net/wireless/rt2x00/ |
D | rt2x00queue.c | 36 struct data_queue *queue = entry->queue; in rt2x00queue_alloc_rxskb() local 37 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2x00queue_alloc_rxskb() 48 frame_size = queue->data_size + queue->desc_size + queue->winfo_size; in rt2x00queue_alloc_rxskb() 107 struct device *dev = entry->queue->rt2x00dev->dev; in rt2x00queue_map_txskb() 123 struct device *dev = entry->queue->rt2x00dev->dev; in rt2x00queue_unmap_skb() 498 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; in rt2x00queue_write_tx_data() 510 entry->queue->qid, DRV_PROJECT); in rt2x00queue_write_tx_data() 539 struct data_queue *queue = entry->queue; in rt2x00queue_write_tx_descriptor() local 541 queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc); in rt2x00queue_write_tx_descriptor() 547 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb); in rt2x00queue_write_tx_descriptor() [all …]
|
D | rt2x00usb.c | 238 struct data_queue *queue; in rt2x00usb_work_txdone() local 241 tx_queue_for_each(rt2x00dev, queue) { in rt2x00usb_work_txdone() 242 while (!rt2x00queue_empty(queue)) { in rt2x00usb_work_txdone() 243 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); in rt2x00usb_work_txdone() 257 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; in rt2x00usb_interrupt_txdone() 284 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; in rt2x00usb_kick_tx_entry() 312 usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint), in rt2x00usb_kick_tx_entry() 350 skbdesc->desc_len = entry->queue->desc_size; in rt2x00usb_work_rxdone() 362 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; in rt2x00usb_interrupt_rxdone() 377 if (urb->actual_length < entry->queue->desc_size || urb->status) in rt2x00usb_interrupt_rxdone() [all …]
|
D | rt2x00mmio.c | 62 struct data_queue *queue = rt2x00dev->rx; in rt2x00mmio_rxdone() local 69 entry = rt2x00queue_get_entry(queue, Q_INDEX); in rt2x00mmio_rxdone() 80 skbdesc->desc_len = entry->queue->desc_size; in rt2x00mmio_rxdone() 99 void rt2x00mmio_flush_queue(struct data_queue *queue, bool drop) in rt2x00mmio_flush_queue() argument 103 for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++) in rt2x00mmio_flush_queue() 112 struct data_queue *queue) in rt2x00mmio_alloc_queue_dma() argument 123 queue->limit * queue->desc_size, &dma, in rt2x00mmio_alloc_queue_dma() 131 for (i = 0; i < queue->limit; i++) { in rt2x00mmio_alloc_queue_dma() 132 entry_priv = queue->entries[i].priv_data; in rt2x00mmio_alloc_queue_dma() 133 entry_priv->desc = addr + i * queue->desc_size; in rt2x00mmio_alloc_queue_dma() [all …]
|
D | rt2x00mac.c | 31 struct data_queue *queue, in rt2x00mac_tx_rts_cts() argument 91 retval = rt2x00queue_write_tx_frame(queue, skb, NULL, true); in rt2x00mac_tx_rts_cts() 107 struct data_queue *queue = NULL; in rt2x00mac_tx() local 125 queue = rt2x00queue_get_tx_queue(rt2x00dev, qid); in rt2x00mac_tx() 126 if (unlikely(!queue)) { in rt2x00mac_tx() 145 if (rt2x00queue_available(queue) <= 1) in rt2x00mac_tx() 148 if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb)) in rt2x00mac_tx() 152 if (unlikely(rt2x00queue_write_tx_frame(queue, skb, control->sta, false))) in rt2x00mac_tx() 160 spin_lock(&queue->tx_lock); in rt2x00mac_tx() 161 if (rt2x00queue_threshold(queue)) in rt2x00mac_tx() [all …]
|
D | rt2800mmio.c | 55 const unsigned int txwi_size = entry->queue->winfo_size; in rt2800mmio_write_tx_desc() 252 struct data_queue *queue; in rt2800mmio_txdone() local 269 queue = rt2x00queue_get_tx_queue(rt2x00dev, qid); in rt2800mmio_txdone() 270 if (unlikely(queue == NULL)) { in rt2800mmio_txdone() 280 if (unlikely(rt2x00queue_empty(queue))) { in rt2800mmio_txdone() 294 if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, in rt2800mmio_txdone() 301 if (!rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, in rt2800mmio_txdone() 313 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, in rt2800mmio_txdone() 556 void rt2800mmio_start_queue(struct data_queue *queue) in rt2800mmio_start_queue() argument 558 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2800mmio_start_queue() [all …]
|
D | rt2800usb.c | 57 static void rt2800usb_start_queue(struct data_queue *queue) in rt2800usb_start_queue() argument 59 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2800usb_start_queue() 62 switch (queue->qid) { in rt2800usb_start_queue() 80 static void rt2800usb_stop_queue(struct data_queue *queue) in rt2800usb_stop_queue() argument 82 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2800usb_stop_queue() 85 switch (queue->qid) { in rt2800usb_stop_queue() 109 struct data_queue *queue; in rt2800usb_txstatus_pending() local 111 tx_queue_for_each(rt2x00dev, queue) { in rt2800usb_txstatus_pending() 112 if (rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE) != in rt2800usb_txstatus_pending() 113 rt2x00queue_get_entry(queue, Q_INDEX_DONE)) in rt2800usb_txstatus_pending() [all …]
|
D | rt2x00queue.h | 381 struct data_queue *queue; member 585 bool rt2x00queue_for_each_entry(struct data_queue *queue, 596 static inline int rt2x00queue_empty(struct data_queue *queue) in rt2x00queue_empty() argument 598 return queue->length == 0; in rt2x00queue_empty() 605 static inline int rt2x00queue_full(struct data_queue *queue) in rt2x00queue_full() argument 607 return queue->length == queue->limit; in rt2x00queue_full() 614 static inline int rt2x00queue_available(struct data_queue *queue) in rt2x00queue_available() argument 616 return queue->limit - queue->length; in rt2x00queue_available() 623 static inline int rt2x00queue_threshold(struct data_queue *queue) in rt2x00queue_threshold() argument 625 return rt2x00queue_available(queue) < queue->threshold; in rt2x00queue_threshold()
|
D | rt2400pci.c | 633 static void rt2400pci_start_queue(struct data_queue *queue) in rt2400pci_start_queue() argument 635 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2400pci_start_queue() 638 switch (queue->qid) { in rt2400pci_start_queue() 656 static void rt2400pci_kick_queue(struct data_queue *queue) in rt2400pci_kick_queue() argument 658 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2400pci_kick_queue() 661 switch (queue->qid) { in rt2400pci_kick_queue() 682 static void rt2400pci_stop_queue(struct data_queue *queue) in rt2400pci_stop_queue() argument 684 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2400pci_stop_queue() 687 switch (queue->qid) { in rt2400pci_stop_queue() 725 if (entry->queue->qid == QID_RX) { in rt2400pci_get_entry_state() [all …]
|
D | rt2x00.h | 574 void (*start_queue) (struct data_queue *queue); 575 void (*kick_queue) (struct data_queue *queue); 576 void (*stop_queue) (struct data_queue *queue); 577 void (*flush_queue) (struct data_queue *queue, bool drop); 642 void (*queue_init)(struct data_queue *queue); 1286 const enum data_queue_qid queue) in rt2x00queue_get_tx_queue() argument 1288 if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx) in rt2x00queue_get_tx_queue() 1289 return &rt2x00dev->tx[queue]; in rt2x00queue_get_tx_queue() 1291 if (queue == QID_ATIM) in rt2x00queue_get_tx_queue() 1302 struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue, [all …]
|
D | rt2800mmio.h | 149 void rt2800mmio_start_queue(struct data_queue *queue); 150 void rt2800mmio_kick_queue(struct data_queue *queue); 151 void rt2800mmio_stop_queue(struct data_queue *queue); 152 void rt2800mmio_queue_init(struct data_queue *queue);
|
D | rt2500pci.c | 292 struct data_queue *queue = rt2x00dev->bcn; in rt2500pci_config_intf() local 303 rt2x00_set_field32(®, BCNCSR1_BEACON_CWMIN, queue->cw_min); in rt2500pci_config_intf() 722 static void rt2500pci_start_queue(struct data_queue *queue) in rt2500pci_start_queue() argument 724 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2500pci_start_queue() 727 switch (queue->qid) { in rt2500pci_start_queue() 745 static void rt2500pci_kick_queue(struct data_queue *queue) in rt2500pci_kick_queue() argument 747 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2500pci_kick_queue() 750 switch (queue->qid) { in rt2500pci_kick_queue() 771 static void rt2500pci_stop_queue(struct data_queue *queue) in rt2500pci_stop_queue() argument 773 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2500pci_stop_queue() [all …]
|
D | rt2500usb.c | 735 static void rt2500usb_start_queue(struct data_queue *queue) in rt2500usb_start_queue() argument 737 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2500usb_start_queue() 740 switch (queue->qid) { in rt2500usb_start_queue() 758 static void rt2500usb_stop_queue(struct data_queue *queue) in rt2500usb_stop_queue() argument 760 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2500usb_stop_queue() 763 switch (queue->qid) { in rt2500usb_stop_queue() 1104 rt2x00_set_field32(&word, TXD_W1_AIFS, entry->queue->aifs); in rt2500usb_write_tx_desc() 1105 rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min); in rt2500usb_write_tx_desc() 1106 rt2x00_set_field32(&word, TXD_W1_CWMAX, entry->queue->cw_max); in rt2500usb_write_tx_desc() 1139 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; in rt2500usb_write_beacon() [all …]
|
D | rt61pci.c | 1132 static void rt61pci_start_queue(struct data_queue *queue) in rt61pci_start_queue() argument 1134 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt61pci_start_queue() 1137 switch (queue->qid) { in rt61pci_start_queue() 1155 static void rt61pci_kick_queue(struct data_queue *queue) in rt61pci_kick_queue() argument 1157 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt61pci_kick_queue() 1160 switch (queue->qid) { in rt61pci_kick_queue() 1186 static void rt61pci_stop_queue(struct data_queue *queue) in rt61pci_stop_queue() argument 1188 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt61pci_stop_queue() 1191 switch (queue->qid) { in rt61pci_stop_queue() 1383 if (entry->queue->qid == QID_RX) { in rt61pci_get_entry_state() [all …]
|
D | rt73usb.c | 1019 static void rt73usb_start_queue(struct data_queue *queue) in rt73usb_start_queue() argument 1021 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt73usb_start_queue() 1024 switch (queue->qid) { in rt73usb_start_queue() 1042 static void rt73usb_stop_queue(struct data_queue *queue) in rt73usb_stop_queue() argument 1044 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt73usb_stop_queue() 1047 switch (queue->qid) { in rt73usb_stop_queue() 1487 rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, entry->queue->qid); in rt73usb_write_tx_desc() 1488 rt2x00_set_field32(&word, TXD_W1_AIFSN, entry->queue->aifs); in rt73usb_write_tx_desc() 1489 rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min); in rt73usb_write_tx_desc() 1490 rt2x00_set_field32(&word, TXD_W1_CWMAX, entry->queue->cw_max); in rt73usb_write_tx_desc() [all …]
|
D | rt2x00debug.c | 199 dump_hdr->queue_index = skbdesc->entry->queue->qid; in rt2x00debug_dump_frame() 331 struct data_queue *queue; in rt2x00debug_read_queue_stats() local 348 queue_for_each(intf->rt2x00dev, queue) { in rt2x00debug_read_queue_stats() 349 spin_lock_irqsave(&queue->index_lock, irqflags); in rt2x00debug_read_queue_stats() 352 queue->qid, (unsigned int)queue->flags, in rt2x00debug_read_queue_stats() 353 queue->count, queue->limit, queue->length, in rt2x00debug_read_queue_stats() 354 queue->index[Q_INDEX], in rt2x00debug_read_queue_stats() 355 queue->index[Q_INDEX_DMA_DONE], in rt2x00debug_read_queue_stats() 356 queue->index[Q_INDEX_DONE]); in rt2x00debug_read_queue_stats() 358 spin_unlock_irqrestore(&queue->index_lock, irqflags); in rt2x00debug_read_queue_stats()
|
/linux-4.4.14/drivers/scsi/arm/ |
D | queue.c | 58 int queue_initialise (Queue_t *queue) in queue_initialise() argument 63 spin_lock_init(&queue->queue_lock); in queue_initialise() 64 INIT_LIST_HEAD(&queue->head); in queue_initialise() 65 INIT_LIST_HEAD(&queue->free); in queue_initialise() 73 queue->alloc = q = kmalloc(sizeof(QE_t) * nqueues, GFP_KERNEL); in queue_initialise() 78 list_add(&q->list, &queue->free); in queue_initialise() 82 return queue->alloc != NULL; in queue_initialise() 90 void queue_free (Queue_t *queue) in queue_free() argument 92 if (!list_empty(&queue->head)) in queue_free() 93 printk(KERN_WARNING "freeing non-empty queue %p\n", queue); in queue_free() [all …]
|
D | queue.h | 25 extern int queue_initialise (Queue_t *queue); 32 extern void queue_free (Queue_t *queue); 40 extern struct scsi_cmnd *queue_remove (Queue_t *queue); 49 extern struct scsi_cmnd *queue_remove_exclude(Queue_t *queue, 52 #define queue_add_cmd_ordered(queue,SCpnt) \ argument 53 __queue_add(queue,SCpnt,(SCpnt)->cmnd[0] == REQUEST_SENSE) 54 #define queue_add_cmd_tail(queue,SCpnt) \ argument 55 __queue_add(queue,SCpnt,0) 64 extern int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head); 75 extern struct scsi_cmnd *queue_remove_tgtluntag(Queue_t *queue, int target, [all …]
|
D | Makefile | 7 obj-$(CONFIG_SCSI_ACORNSCSI_3) += acornscsi_mod.o queue.o msgqueue.o 8 obj-$(CONFIG_SCSI_ARXESCSI) += arxescsi.o fas216.o queue.o msgqueue.o 10 obj-$(CONFIG_SCSI_CUMANA_2) += cumana_2.o fas216.o queue.o msgqueue.o 12 obj-$(CONFIG_SCSI_POWERTECSCSI) += powertec.o fas216.o queue.o msgqueue.o 13 obj-$(CONFIG_SCSI_EESOXSCSI) += eesox.o fas216.o queue.o msgqueue.o
|
/linux-4.4.14/net/sunrpc/ |
D | sched.c | 65 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) in __rpc_disable_timer() argument 72 if (list_empty(&queue->timer_list.list)) in __rpc_disable_timer() 73 del_timer(&queue->timer_list.timer); in __rpc_disable_timer() 77 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) in rpc_set_queue_timer() argument 79 queue->timer_list.expires = expires; in rpc_set_queue_timer() 80 mod_timer(&queue->timer_list.timer, expires); in rpc_set_queue_timer() 87 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task) in __rpc_add_timer() argument 96 …if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.… in __rpc_add_timer() 97 rpc_set_queue_timer(queue, task->u.tk_wait.expires); in __rpc_add_timer() 98 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); in __rpc_add_timer() [all …]
|
/linux-4.4.14/drivers/net/ethernet/ibm/ehea/ |
D | ehea_qmr.h | 210 static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset) in hw_qeit_calc() argument 214 if (q_offset >= queue->queue_length) in hw_qeit_calc() 215 q_offset -= queue->queue_length; in hw_qeit_calc() 216 current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT]; in hw_qeit_calc() 220 static inline void *hw_qeit_get(struct hw_queue *queue) in hw_qeit_get() argument 222 return hw_qeit_calc(queue, queue->current_q_offset); in hw_qeit_get() 225 static inline void hw_qeit_inc(struct hw_queue *queue) in hw_qeit_inc() argument 227 queue->current_q_offset += queue->qe_size; in hw_qeit_inc() 228 if (queue->current_q_offset >= queue->queue_length) { in hw_qeit_inc() 229 queue->current_q_offset = 0; in hw_qeit_inc() [all …]
|
D | ehea_qmr.c | 39 static void *hw_qpageit_get_inc(struct hw_queue *queue) in hw_qpageit_get_inc() argument 41 void *retvalue = hw_qeit_get(queue); in hw_qpageit_get_inc() 43 queue->current_q_offset += queue->pagesize; in hw_qpageit_get_inc() 44 if (queue->current_q_offset > queue->queue_length) { in hw_qpageit_get_inc() 45 queue->current_q_offset -= queue->pagesize; in hw_qpageit_get_inc() 54 static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages, in hw_queue_ctor() argument 66 queue->queue_length = nr_of_pages * pagesize; in hw_queue_ctor() 67 queue->queue_pages = kmalloc_array(nr_of_pages, sizeof(void *), in hw_queue_ctor() 69 if (!queue->queue_pages) in hw_queue_ctor() 83 (queue->queue_pages)[i] = (struct ehea_page *)kpage; in hw_queue_ctor() [all …]
|
/linux-4.4.14/drivers/net/wireless/ath/ath5k/ |
D | qcu.c | 63 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue) in ath5k_hw_num_tx_pending() argument 66 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); in ath5k_hw_num_tx_pending() 69 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) in ath5k_hw_num_tx_pending() 76 pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue)); in ath5k_hw_num_tx_pending() 82 if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue)) in ath5k_hw_num_tx_pending() 94 ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue) in ath5k_hw_release_tx_queue() argument 96 if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num)) in ath5k_hw_release_tx_queue() 100 ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE; in ath5k_hw_release_tx_queue() 102 AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue); in ath5k_hw_release_tx_queue() 138 ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, in ath5k_hw_get_tx_queueprops() argument [all …]
|
D | dma.c | 130 ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue) in ath5k_hw_start_tx_dma() argument 134 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); in ath5k_hw_start_tx_dma() 137 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) in ath5k_hw_start_tx_dma() 146 switch (ah->ah_txq[queue].tqi_type) { in ath5k_hw_start_tx_dma() 168 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue)) in ath5k_hw_start_tx_dma() 172 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue); in ath5k_hw_start_tx_dma() 188 ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue) in ath5k_hw_stop_tx_dma() argument 193 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); in ath5k_hw_stop_tx_dma() 196 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) in ath5k_hw_stop_tx_dma() 205 switch (ah->ah_txq[queue].tqi_type) { in ath5k_hw_stop_tx_dma() [all …]
|
/linux-4.4.14/arch/mips/cavium-octeon/executive/ |
D | cvmx-pko.c | 70 int queue; in __cvmx_pko_iport_config() local 76 for (queue = 0; queue < num_queues; queue++) { in __cvmx_pko_iport_config() 82 config.s.index = queue; in __cvmx_pko_iport_config() 83 config.s.qid = base_queue + queue; in __cvmx_pko_iport_config() 85 config.s.tail = (queue == (num_queues - 1)); in __cvmx_pko_iport_config() 86 config.s.s_tail = (queue == static_priority_end); in __cvmx_pko_iport_config() 88 config.s.static_q = (queue <= static_priority_end); in __cvmx_pko_iport_config() 92 CVMX_CMD_QUEUE_PKO(base_queue + queue), in __cvmx_pko_iport_config() 101 num_queues, queue); in __cvmx_pko_iport_config() 104 CVMX_CMD_QUEUE_PKO(base_queue + queue)); in __cvmx_pko_iport_config() [all …]
|
D | cvmx-helper-util.c | 182 int cvmx_helper_setup_red_queue(int queue, int pass_thresh, int drop_thresh) in cvmx_helper_setup_red_queue() argument 193 cvmx_write_csr(CVMX_IPD_QOSX_RED_MARKS(queue), red_marks.u64); in cvmx_helper_setup_red_queue() 202 cvmx_write_csr(CVMX_IPD_RED_QUEX_PARAM(queue), red_param.u64); in cvmx_helper_setup_red_queue() 222 int queue; in cvmx_helper_setup_red() local 237 for (queue = 0; queue < 8; queue++) in cvmx_helper_setup_red() 238 cvmx_helper_setup_red_queue(queue, pass_thresh, drop_thresh); in cvmx_helper_setup_red()
|
/linux-4.4.14/net/netfilter/ |
D | nfnetlink_queue.c | 158 static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, 188 __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) in __enqueue_entry() argument 190 list_add_tail(&entry->list, &queue->queue_list); in __enqueue_entry() 191 queue->queue_total++; in __enqueue_entry() 195 __dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) in __dequeue_entry() argument 198 queue->queue_total--; in __dequeue_entry() 202 find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id) in find_dequeue_entry() argument 206 spin_lock_bh(&queue->lock); in find_dequeue_entry() 208 list_for_each_entry(i, &queue->queue_list, list) { in find_dequeue_entry() 216 __dequeue_entry(queue, entry); in find_dequeue_entry() [all …]
|
D | xt_NFQUEUE.c | 42 u32 queue = info->queuenum; in nfqueue_tg_v1() local 45 queue = nfqueue_hash(skb, queue, info->queues_total, in nfqueue_tg_v1() 48 return NF_QUEUE_NR(queue); in nfqueue_tg_v1() 91 u32 queue = info->queuenum; in nfqueue_tg_v3() local 98 queue = info->queuenum + cpu % info->queues_total; in nfqueue_tg_v3() 100 queue = nfqueue_hash(skb, queue, info->queues_total, in nfqueue_tg_v3() 105 ret = NF_QUEUE_NR(queue); in nfqueue_tg_v3()
|
D | nft_queue.c | 35 u32 queue = priv->queuenum; in nft_queue_eval() local 42 queue = priv->queuenum + cpu % priv->queues_total; in nft_queue_eval() 44 queue = nfqueue_hash(pkt->skb, queue, in nft_queue_eval() 50 ret = NF_QUEUE_NR(queue); in nft_queue_eval()
|
/linux-4.4.14/arch/arm/mach-ixp4xx/ |
D | ixp4xx_qmgr.c | 28 void qmgr_set_irq(unsigned int queue, int src, in qmgr_set_irq() argument 34 if (queue < HALF_QUEUES) { in qmgr_set_irq() 38 reg = &qmgr_regs->irqsrc[queue >> 3]; /* 8 queues per u32 */ in qmgr_set_irq() 39 bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */ in qmgr_set_irq() 46 irq_handlers[queue] = handler; in qmgr_set_irq() 47 irq_pdevs[queue] = pdev; in qmgr_set_irq() 115 void qmgr_enable_irq(unsigned int queue) in qmgr_enable_irq() argument 118 int half = queue / 32; in qmgr_enable_irq() 119 u32 mask = 1 << (queue & (HALF_QUEUES - 1)); in qmgr_enable_irq() 127 void qmgr_disable_irq(unsigned int queue) in qmgr_disable_irq() argument [all …]
|
/linux-4.4.14/include/net/ |
D | request_sock.h | 179 void reqsk_queue_alloc(struct request_sock_queue *queue); 184 static inline bool reqsk_queue_empty(const struct request_sock_queue *queue) in reqsk_queue_empty() argument 186 return queue->rskq_accept_head == NULL; in reqsk_queue_empty() 189 static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue, in reqsk_queue_remove() argument 194 spin_lock_bh(&queue->rskq_lock); in reqsk_queue_remove() 195 req = queue->rskq_accept_head; in reqsk_queue_remove() 198 queue->rskq_accept_head = req->dl_next; in reqsk_queue_remove() 199 if (queue->rskq_accept_head == NULL) in reqsk_queue_remove() 200 queue->rskq_accept_tail = NULL; in reqsk_queue_remove() 202 spin_unlock_bh(&queue->rskq_lock); in reqsk_queue_remove() [all …]
|
/linux-4.4.14/net/sctp/ |
D | inqueue.c | 47 void sctp_inq_init(struct sctp_inq *queue) in sctp_inq_init() argument 49 INIT_LIST_HEAD(&queue->in_chunk_list); in sctp_inq_init() 50 queue->in_progress = NULL; in sctp_inq_init() 53 INIT_WORK(&queue->immediate, NULL); in sctp_inq_init() 57 void sctp_inq_free(struct sctp_inq *queue) in sctp_inq_free() argument 62 list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) { in sctp_inq_free() 70 if (queue->in_progress) { in sctp_inq_free() 71 sctp_chunk_free(queue->in_progress); in sctp_inq_free() 72 queue->in_progress = NULL; in sctp_inq_free() 99 struct sctp_chunkhdr *sctp_inq_peek(struct sctp_inq *queue) in sctp_inq_peek() argument [all …]
|
/linux-4.4.14/sound/core/seq/ |
D | seq_queue.c | 72 q->queue = i; in queue_list_add() 121 q->queue = -1; in queue_new() 199 snd_seq_queue_use(q->queue, client, 1); /* use this queue */ in snd_seq_queue_alloc() 200 return q->queue; in snd_seq_queue_alloc() 320 dest = cell->event.queue; /* destination queue */ in snd_seq_enqueue_event() 449 struct snd_seq_queue *queue; in snd_seq_queue_timer_open() local 452 queue = queueptr(queueid); in snd_seq_queue_timer_open() 453 if (queue == NULL) in snd_seq_queue_timer_open() 455 tmr = queue->timer; in snd_seq_queue_timer_open() 456 if ((result = snd_seq_timer_open(queue)) < 0) { in snd_seq_queue_timer_open() [all …]
|
D | seq_clientmgr.c | 545 bounce_ev.queue = SNDRV_SEQ_QUEUE_DIRECT; in bounce_error_event() 569 int queue, int real_time) in update_timestamp_of_queue() argument 573 q = queueptr(queue); in update_timestamp_of_queue() 576 event->queue = queue; in update_timestamp_of_queue() 687 update_timestamp_of_queue(event, subs->info.queue, in deliver_to_subscribers() 818 if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS || in snd_seq_deliver_event() 822 else if (event->queue == SNDRV_SEQ_ADDRESS_BROADCAST || in snd_seq_deliver_event() 927 if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) { in snd_seq_client_enqueue_event() 929 event->queue = SNDRV_SEQ_QUEUE_DIRECT; in snd_seq_client_enqueue_event() 932 if (event->queue == SNDRV_SEQ_ADDRESS_BROADCAST) { in snd_seq_client_enqueue_event() [all …]
|
/linux-4.4.14/net/irda/ |
D | irqueue.c | 233 static void enqueue_first(irda_queue_t **queue, irda_queue_t* element) in enqueue_first() argument 239 if ( *queue == NULL ) { in enqueue_first() 243 element->q_next = element->q_prev = *queue = element; in enqueue_first() 249 element->q_next = (*queue); in enqueue_first() 250 (*queue)->q_prev->q_next = element; in enqueue_first() 251 element->q_prev = (*queue)->q_prev; in enqueue_first() 252 (*queue)->q_prev = element; in enqueue_first() 253 (*queue) = element; in enqueue_first() 264 static irda_queue_t *dequeue_first(irda_queue_t **queue) in dequeue_first() argument 273 ret = *queue; in dequeue_first() [all …]
|
/linux-4.4.14/drivers/gpu/drm/amd/amdkfd/ |
D | kfd_kernel_queue.c | 124 if (init_queue(&kq->queue, prop) != 0) in initialize() 127 kq->queue->device = dev; in initialize() 128 kq->queue->process = kfd_get_process(current); in initialize() 130 retval = kq->mqd->init_mqd(kq->mqd, &kq->queue->mqd, in initialize() 131 &kq->queue->mqd_mem_obj, in initialize() 132 &kq->queue->gart_mqd_addr, in initialize() 133 &kq->queue->properties); in initialize() 140 kq->queue->pipe = KFD_CIK_HIQ_PIPE; in initialize() 141 kq->queue->queue = KFD_CIK_HIQ_QUEUE; in initialize() 142 kq->mqd->load_mqd(kq->mqd, kq->queue->mqd, kq->queue->pipe, in initialize() [all …]
|
D | kfd_queue.c | 45 void print_queue(struct queue *q) in print_queue() 66 int init_queue(struct queue **q, struct queue_properties properties) in init_queue() 68 struct queue *tmp; in init_queue() 72 tmp = kzalloc(sizeof(struct queue), GFP_KERNEL); in init_queue() 82 void uninit_queue(struct queue *q) in uninit_queue()
|
D | kfd_device_queue_manager.h | 88 struct queue *q, 94 struct queue *q); 97 struct queue *q); 140 struct queue *q,
|
D | kfd_device_queue_manager.c | 44 struct queue *q, 52 struct queue *q, 96 struct queue *q) in allocate_vmid() 120 struct queue *q) in deallocate_vmid() 133 struct queue *q, in create_queue_nocpsch() 196 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q) in allocate_hqd() 213 q->queue = bit; in allocate_hqd() 223 __func__, q->pipe, q->queue); in allocate_hqd() 231 struct queue *q) in deallocate_hqd() 233 set_bit(q->queue, (unsigned long *)&dqm->allocated_queues[q->pipe]); in deallocate_hqd() [all …]
|
/linux-4.4.14/drivers/soc/ti/ |
D | knav_qmss_acc.c | 45 int range_base, queue; in __knav_acc_notify() local 50 for (queue = 0; queue < range->num_queues; queue++) { in __knav_acc_notify() 52 queue); in __knav_acc_notify() 56 range_base + queue); in __knav_acc_notify() 61 queue = acc->channel - range->acc_info.start_channel; in __knav_acc_notify() 62 inst = knav_range_offset_to_inst(kdev, range, queue); in __knav_acc_notify() 64 range_base + queue); in __knav_acc_notify() 104 int range_base, channel, queue = 0; in knav_acc_int_handler() local 115 for (queue = 0; queue < range->num_irqs; queue++) in knav_acc_int_handler() 116 if (range->irqs[queue].irq == irq) in knav_acc_int_handler() [all …]
|
/linux-4.4.14/drivers/net/ethernet/hisilicon/hns/ |
D | hns_dsaf_rcb.c | 728 void hns_rcb_update_stats(struct hnae_queue *queue) in hns_rcb_update_stats() argument 731 container_of(queue, struct ring_pair_cb, q); in hns_rcb_update_stats() 737 hw_stats->rx_pkts += dsaf_read_dev(queue, in hns_rcb_update_stats() 739 dsaf_write_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG, 0x1); in hns_rcb_update_stats() 746 hw_stats->tx_pkts += dsaf_read_dev(queue, in hns_rcb_update_stats() 748 dsaf_write_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG, 0x1); in hns_rcb_update_stats() 761 void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data) in hns_rcb_get_stats() argument 765 container_of(queue, struct ring_pair_cb, q); in hns_rcb_get_stats() 772 dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG); in hns_rcb_get_stats() 774 regs_buff[4] = queue->tx_ring.stats.tx_pkts; in hns_rcb_get_stats() [all …]
|
D | hns_ae_adapt.c | 341 static void hns_ae_toggle_queue_status(struct hnae_queue *queue, u32 val) in hns_ae_toggle_queue_status() argument 343 hns_rcb_start(queue, val); in hns_ae_toggle_queue_status() 372 static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue, in hns_ae_get_ring_bdnum_limit() argument 490 struct hnae_queue *queue; in hns_ae_update_stats() local 504 queue = handle->qs[idx]; in hns_ae_update_stats() 505 hns_rcb_update_stats(queue); in hns_ae_update_stats() 507 tx_bytes += queue->tx_ring.stats.tx_bytes; in hns_ae_update_stats() 508 tx_packets += queue->tx_ring.stats.tx_pkts; in hns_ae_update_stats() 509 rx_bytes += queue->rx_ring.stats.rx_bytes; in hns_ae_update_stats() 510 rx_packets += queue->rx_ring.stats.rx_pkts; in hns_ae_update_stats() [all …]
|
/linux-4.4.14/Documentation/ABI/testing/ |
D | sysfs-class-net-queues | 1 What: /sys/class/<iface>/queues/rx-<queue>/rps_cpus 8 network device queue. Possible values depend on the number 11 What: /sys/class/<iface>/queues/rx-<queue>/rps_flow_cnt 17 processed by this particular network device receive queue. 19 What: /sys/class/<iface>/queues/tx-<queue>/tx_timeout 25 network interface transmit queue. 27 What: /sys/class/<iface>/queues/tx-<queue>/tx_maxrate 32 A Mbps max-rate set for the queue, a value of zero means disabled, 35 What: /sys/class/<iface>/queues/tx-<queue>/xps_cpus 42 network device transmit queue. Possible vaules depend on the [all …]
|
/linux-4.4.14/drivers/staging/rtl8723au/os_dep/ |
D | xmit_linux.c | 63 u16 queue; in rtw_os_pkt_complete23a() local 65 queue = skb_get_queue_mapping(pkt); in rtw_os_pkt_complete23a() 67 if (__netif_subqueue_stopped(padapter->pnetdev, queue) && in rtw_os_pkt_complete23a() 68 (pxmitpriv->hwxmits[queue].accnt < WMM_XMIT_THRESHOLD)) in rtw_os_pkt_complete23a() 69 netif_wake_subqueue(padapter->pnetdev, queue); in rtw_os_pkt_complete23a() 71 if (__netif_subqueue_stopped(padapter->pnetdev, queue)) in rtw_os_pkt_complete23a() 72 netif_wake_subqueue(padapter->pnetdev, queue); in rtw_os_pkt_complete23a() 105 u16 queue; in rtw_check_xmit_resource() local 107 queue = skb_get_queue_mapping(pkt); in rtw_check_xmit_resource() 110 if (pxmitpriv->hwxmits[queue].accnt > WMM_XMIT_THRESHOLD) in rtw_check_xmit_resource() [all …]
|
/linux-4.4.14/drivers/isdn/i4l/ |
D | isdn_net.h | 85 lp = nd->queue; /* get lp on top of queue */ in isdn_net_get_locked_lp() 86 while (isdn_net_lp_busy(nd->queue)) { in isdn_net_get_locked_lp() 87 nd->queue = nd->queue->next; in isdn_net_get_locked_lp() 88 if (nd->queue == lp) { /* not found -- should never happen */ in isdn_net_get_locked_lp() 93 lp = nd->queue; in isdn_net_get_locked_lp() 94 nd->queue = nd->queue->next; in isdn_net_get_locked_lp() 114 lp = nd->queue; in isdn_net_add_to_bundle() 121 nd->queue = nlp; in isdn_net_add_to_bundle() 141 if (master_lp->netdev->queue == lp) { in isdn_net_rm_from_bundle() 142 master_lp->netdev->queue = lp->next; in isdn_net_rm_from_bundle() [all …]
|
/linux-4.4.14/drivers/block/rsxx/ |
D | dev.c | 270 card->queue = blk_alloc_queue(GFP_KERNEL); in rsxx_setup_dev() 271 if (!card->queue) { in rsxx_setup_dev() 280 blk_cleanup_queue(card->queue); in rsxx_setup_dev() 287 blk_queue_dma_alignment(card->queue, blk_size - 1); in rsxx_setup_dev() 288 blk_queue_logical_block_size(card->queue, blk_size); in rsxx_setup_dev() 291 blk_queue_make_request(card->queue, rsxx_make_request); in rsxx_setup_dev() 292 blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY); in rsxx_setup_dev() 293 blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors); in rsxx_setup_dev() 294 blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE); in rsxx_setup_dev() 296 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue); in rsxx_setup_dev() [all …]
|
/linux-4.4.14/drivers/net/fddi/skfp/ |
D | hwmtm.c | 85 static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue); 86 static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue); 358 struct s_smt_tx_queue *queue ; in init_txd_ring() local 366 queue = smc->hw.fp.tx[QUEUE_A0] ; in init_txd_ring() 372 queue->tx_curr_put = queue->tx_curr_get = ds ; in init_txd_ring() 374 queue->tx_free = HWM_ASYNC_TXD_COUNT ; in init_txd_ring() 375 queue->tx_used = 0 ; in init_txd_ring() 380 queue = smc->hw.fp.tx[QUEUE_S] ; in init_txd_ring() 386 queue->tx_curr_put = queue->tx_curr_get = ds ; in init_txd_ring() 387 queue->tx_free = HWM_SYNC_TXD_COUNT ; in init_txd_ring() [all …]
|
/linux-4.4.14/net/core/ |
D | net-sysfs.c | 635 struct netdev_rx_queue *queue = to_rx_queue(kobj); in rx_queue_attr_show() local 640 return attribute->show(queue, attribute, buf); in rx_queue_attr_show() 647 struct netdev_rx_queue *queue = to_rx_queue(kobj); in rx_queue_attr_store() local 652 return attribute->store(queue, attribute, buf, count); in rx_queue_attr_store() 661 static ssize_t show_rps_map(struct netdev_rx_queue *queue, in show_rps_map() argument 672 map = rcu_dereference(queue->rps_map); in show_rps_map() 684 static ssize_t store_rps_map(struct netdev_rx_queue *queue, in store_rps_map() argument 725 old_map = rcu_dereference_protected(queue->rps_map, in store_rps_map() 727 rcu_assign_pointer(queue->rps_map, map); in store_rps_map() 743 static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, in show_rps_dev_flow_table_cnt() argument [all …]
|
D | request_sock.c | 40 void reqsk_queue_alloc(struct request_sock_queue *queue) in reqsk_queue_alloc() argument 42 spin_lock_init(&queue->rskq_lock); in reqsk_queue_alloc() 44 spin_lock_init(&queue->fastopenq.lock); in reqsk_queue_alloc() 45 queue->fastopenq.rskq_rst_head = NULL; in reqsk_queue_alloc() 46 queue->fastopenq.rskq_rst_tail = NULL; in reqsk_queue_alloc() 47 queue->fastopenq.qlen = 0; in reqsk_queue_alloc() 49 queue->rskq_accept_head = NULL; in reqsk_queue_alloc()
|
/linux-4.4.14/drivers/gpu/drm/ttm/ |
D | ttm_lock.c | 48 init_waitqueue_head(&lock->queue); in ttm_lock_init() 60 wake_up_all(&lock->queue); in ttm_read_unlock() 88 ret = wait_event_interruptible(lock->queue, in ttm_read_lock() 91 wait_event(lock->queue, __ttm_read_lock(lock)); in ttm_read_lock() 127 (lock->queue, __ttm_read_trylock(lock, &locked)); in ttm_read_trylock() 129 wait_event(lock->queue, __ttm_read_trylock(lock, &locked)); in ttm_read_trylock() 143 wake_up_all(&lock->queue); in ttm_write_unlock() 174 ret = wait_event_interruptible(lock->queue, in ttm_write_lock() 179 wake_up_all(&lock->queue); in ttm_write_lock() 183 wait_event(lock->queue, __ttm_write_lock(lock)); in ttm_write_lock() [all …]
|
/linux-4.4.14/drivers/staging/unisys/visorbus/ |
D | visorchannel.c | 333 #define SIG_WRITE_FIELD(channel, queue, sig_hdr, FIELD) \ argument 335 SIG_QUEUE_OFFSET(&channel->chan_hdr, queue) +\ 341 sig_read_header(struct visorchannel *channel, u32 queue, in sig_read_header() argument 351 SIG_QUEUE_OFFSET(&channel->chan_hdr, queue), in sig_read_header() 360 sig_read_data(struct visorchannel *channel, u32 queue, in sig_read_data() argument 364 int signal_data_offset = SIG_DATA_OFFSET(&channel->chan_hdr, queue, in sig_read_data() 376 sig_write_data(struct visorchannel *channel, u32 queue, in sig_write_data() argument 380 int signal_data_offset = SIG_DATA_OFFSET(&channel->chan_hdr, queue, in sig_write_data() 392 signalremove_inner(struct visorchannel *channel, u32 queue, void *msg) in signalremove_inner() argument 396 if (!sig_read_header(channel, queue, &sig_hdr)) in signalremove_inner() [all …]
|
/linux-4.4.14/drivers/net/ethernet/cadence/ |
D | macb.c | 71 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue, in macb_tx_desc() argument 74 return &queue->tx_ring[macb_tx_ring_wrap(index)]; in macb_tx_desc() 77 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue, in macb_tx_skb() argument 80 return &queue->tx_skb[macb_tx_ring_wrap(index)]; in macb_tx_skb() 83 static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index) in macb_tx_dma() argument 89 return queue->tx_ring_dma + offset; in macb_tx_dma() 552 struct macb_queue *queue = container_of(work, struct macb_queue, in macb_tx_error_task() local 554 struct macb *bp = queue->bp; in macb_tx_error_task() 562 (unsigned int)(queue - bp->queues), in macb_tx_error_task() 563 queue->tx_tail, queue->tx_head); in macb_tx_error_task() [all …]
|
/linux-4.4.14/Documentation/block/ |
D | null_blk.txt | 10 Single-queue block-layer 12 - Single submission queue per device. 14 Multi-queue block-layer 21 All of them have a completion queue for each core in the system. 25 queue_mode=[0-2]: Default: 2-Multi-queue 29 1: Single-queue. 30 2: Multi-queue. 60 defaults to 1 on single-queue and bio-based instances. For multi-queue, 64 The hardware queue depth of the device. 66 III: Multi-queue specific parameters [all …]
|
D | switching-sched.txt | 5 Each io queue has a set of io scheduler tunables associated with it. These 9 /sys/block/<device>/queue/iosched 24 echo SCHEDNAME > /sys/block/DEV/queue/scheduler 30 a "cat /sys/block/DEV/queue/scheduler" - the list of valid names 33 # cat /sys/block/hda/queue/scheduler 35 # echo deadline > /sys/block/hda/queue/scheduler 36 # cat /sys/block/hda/queue/scheduler
|
D | cfq-iosched.txt | 7 CFQ maintains the per process queue for the processes which request I/O 19 queue is expired and CFQ selects next queue to dispatch from. 72 queue level. This was introduced after a bottleneck was observed 73 in higher end storage due to idle on sequential queue and allow dispatch 74 from a single queue. The idea with this parameter is that it can be run with 101 time for each process to issue I/O request before the cfq queue is switched. 112 This parameter is same as of slice_sync but for asynchronous queue. The 118 device request queue in queue's slice time. The maximum number of request that 124 When a queue is selected for execution, the queues IO requests are only 126 queue. This parameter is used to calculate the time slice of synchronous [all …]
|
/linux-4.4.14/arch/m68k/emu/ |
D | nfblock.c | 58 struct request_queue *queue; member 62 static blk_qc_t nfhd_make_request(struct request_queue *queue, struct bio *bio) in nfhd_make_request() argument 64 struct nfhd_device *dev = queue->queuedata; in nfhd_make_request() 121 dev->queue = blk_alloc_queue(GFP_KERNEL); in nfhd_init_one() 122 if (dev->queue == NULL) in nfhd_init_one() 125 dev->queue->queuedata = dev; in nfhd_init_one() 126 blk_queue_make_request(dev->queue, nfhd_make_request); in nfhd_init_one() 127 blk_queue_logical_block_size(dev->queue, bsize); in nfhd_init_one() 139 dev->disk->queue = dev->queue; in nfhd_init_one() 148 blk_cleanup_queue(dev->queue); in nfhd_init_one() [all …]
|
/linux-4.4.14/drivers/mmc/card/ |
D | queue.c | 52 struct request_queue *q = mq->queue; in mmc_queue_thread() 201 mq->queue = blk_init_queue(mmc_request_fn, lock); in mmc_init_queue() 202 if (!mq->queue) in mmc_init_queue() 207 mq->queue->queuedata = mq; in mmc_init_queue() 209 blk_queue_prep_rq(mq->queue, mmc_prep_request); in mmc_init_queue() 210 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); in mmc_init_queue() 211 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue); in mmc_init_queue() 213 mmc_queue_setup_discard(mq->queue, card); in mmc_init_queue() 246 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); in mmc_init_queue() 247 blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); in mmc_init_queue() [all …]
|
/linux-4.4.14/drivers/net/wireless/iwlwifi/ |
D | iwl-op-mode.h | 157 struct iwl_rx_cmd_buffer *rxb, unsigned int queue); 158 void (*queue_full)(struct iwl_op_mode *op_mode, int queue); 159 void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue); 201 unsigned int queue) in iwl_op_mode_rx_rss() argument 203 op_mode->ops->rx_rss(op_mode, napi, rxb, queue); in iwl_op_mode_rx_rss() 207 int queue) in iwl_op_mode_queue_full() argument 209 op_mode->ops->queue_full(op_mode, queue); in iwl_op_mode_queue_full() 213 int queue) in iwl_op_mode_queue_not_full() argument 215 op_mode->ops->queue_not_full(op_mode, queue); in iwl_op_mode_queue_not_full()
|
D | iwl-trans.h | 576 struct iwl_device_cmd *dev_cmd, int queue); 577 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn, 580 void (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn, 583 void (*txq_disable)(struct iwl_trans *trans, int queue, 919 struct iwl_device_cmd *dev_cmd, int queue) in iwl_trans_tx() argument 927 return trans->ops->tx(trans, skb, dev_cmd, queue); in iwl_trans_tx() 930 static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue, in iwl_trans_reclaim() argument 936 trans->ops->reclaim(trans, queue, ssn, skbs); in iwl_trans_reclaim() 939 static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue, in iwl_trans_txq_disable() argument 942 trans->ops->txq_disable(trans, queue, configure_scd); in iwl_trans_txq_disable() [all …]
|
/linux-4.4.14/scripts/ |
D | headerdep.pl | 88 my @queue = @_; 89 while(@queue) { 90 my $header = pop @queue; 105 push @queue, $dep; 142 my @queue = map { [[0, $_]] } @_; 143 while(@queue) { 144 my $top = pop @queue; 158 push @queue, $chain;
|
/linux-4.4.14/drivers/staging/rtl8188eu/os_dep/ |
D | xmit_linux.c | 108 u16 queue; in rtw_os_pkt_complete() local 111 queue = skb_get_queue_mapping(pkt); in rtw_os_pkt_complete() 113 if (__netif_subqueue_stopped(padapter->pnetdev, queue) && in rtw_os_pkt_complete() 114 (pxmitpriv->hwxmits[queue].accnt < WMM_XMIT_THRESHOLD)) in rtw_os_pkt_complete() 115 netif_wake_subqueue(padapter->pnetdev, queue); in rtw_os_pkt_complete() 117 if (__netif_subqueue_stopped(padapter->pnetdev, queue)) in rtw_os_pkt_complete() 118 netif_wake_subqueue(padapter->pnetdev, queue); in rtw_os_pkt_complete() 151 u16 queue; in rtw_check_xmit_resource() local 153 queue = skb_get_queue_mapping(pkt); in rtw_check_xmit_resource() 156 if (pxmitpriv->hwxmits[queue].accnt > WMM_XMIT_THRESHOLD) in rtw_check_xmit_resource() [all …]
|
/linux-4.4.14/crypto/ |
D | cryptd.c | 37 struct crypto_queue queue; member 47 struct cryptd_queue *queue; member 52 struct cryptd_queue *queue; member 57 struct cryptd_queue *queue; member 87 static int cryptd_init_queue(struct cryptd_queue *queue, in cryptd_init_queue() argument 93 queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue); in cryptd_init_queue() 94 if (!queue->cpu_queue) in cryptd_init_queue() 97 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); in cryptd_init_queue() 98 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); in cryptd_init_queue() 104 static void cryptd_fini_queue(struct cryptd_queue *queue) in cryptd_fini_queue() argument [all …]
|
D | mcryptd.c | 45 struct mcryptd_queue *queue; member 68 static int mcryptd_init_queue(struct mcryptd_queue *queue, in mcryptd_init_queue() argument 74 queue->cpu_queue = alloc_percpu(struct mcryptd_cpu_queue); in mcryptd_init_queue() 75 pr_debug("mqueue:%p mcryptd_cpu_queue %p\n", queue, queue->cpu_queue); in mcryptd_init_queue() 76 if (!queue->cpu_queue) in mcryptd_init_queue() 79 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); in mcryptd_init_queue() 80 pr_debug("cpu_queue #%d %p\n", cpu, queue->cpu_queue); in mcryptd_init_queue() 81 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen); in mcryptd_init_queue() 87 static void mcryptd_fini_queue(struct mcryptd_queue *queue) in mcryptd_fini_queue() argument 93 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu); in mcryptd_fini_queue() [all …]
|
D | algapi.c | 875 void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen) in crypto_init_queue() argument 877 INIT_LIST_HEAD(&queue->list); in crypto_init_queue() 878 queue->backlog = &queue->list; in crypto_init_queue() 879 queue->qlen = 0; in crypto_init_queue() 880 queue->max_qlen = max_qlen; in crypto_init_queue() 884 int crypto_enqueue_request(struct crypto_queue *queue, in crypto_enqueue_request() argument 889 if (unlikely(queue->qlen >= queue->max_qlen)) { in crypto_enqueue_request() 893 if (queue->backlog == &queue->list) in crypto_enqueue_request() 894 queue->backlog = &request->list; in crypto_enqueue_request() 897 queue->qlen++; in crypto_enqueue_request() [all …]
|
D | chainiv.c | 42 struct crypto_queue queue; member 121 if (!ctx->queue.qlen) { in async_chainiv_schedule_work() 125 if (!ctx->queue.qlen || in async_chainiv_schedule_work() 144 err = skcipher_enqueue_givcrypt(&ctx->queue, req); in async_chainiv_postpone_request() 190 if (ctx->queue.qlen) { in async_chainiv_givencrypt() 212 req = skcipher_dequeue_givcrypt(&ctx->queue); in async_chainiv_do_postponed() 238 crypto_init_queue(&ctx->queue, 100); in async_chainiv_init() 255 BUG_ON(test_bit(CHAINIV_STATE_INUSE, &ctx->state) || ctx->queue.qlen); in async_chainiv_exit()
|
/linux-4.4.14/Documentation/devicetree/bindings/soc/ti/ |
D | keystone-navigator-qmss.txt | 5 multi-core Navigator. QMSS consist of queue managers, packed-data structure 15 queue pool management (allocation, push, pop and notify) and descriptor 22 - queue-range : <start number> total range of queue numbers for the device. 28 - qmgrs : child node describing the individual queue managers on the 31 -- managed-queues : the actual queues managed by each queue manager 32 instance, specified as <"base queue #" "# of queues">. 40 - Queue Management/Queue Proxy region for queue Push. 41 - Queue Management/Queue Proxy region for queue Pop. 42 - queue-pools : child node classifying the queue ranges into pools. 50 -- qrange : number of queues to use per queue range, specified as [all …]
|
/linux-4.4.14/drivers/scsi/aacraid/ |
D | comminit.c | 296 comm->queue[HostNormCmdQueue].base = queues; in aac_comm_init() 297 aac_queue_init(dev, &comm->queue[HostNormCmdQueue], headers, HOST_NORM_CMD_ENTRIES); in aac_comm_init() 302 comm->queue[HostHighCmdQueue].base = queues; in aac_comm_init() 303 aac_queue_init(dev, &comm->queue[HostHighCmdQueue], headers, HOST_HIGH_CMD_ENTRIES); in aac_comm_init() 309 comm->queue[AdapNormCmdQueue].base = queues; in aac_comm_init() 310 aac_queue_init(dev, &comm->queue[AdapNormCmdQueue], headers, ADAP_NORM_CMD_ENTRIES); in aac_comm_init() 316 comm->queue[AdapHighCmdQueue].base = queues; in aac_comm_init() 317 aac_queue_init(dev, &comm->queue[AdapHighCmdQueue], headers, ADAP_HIGH_CMD_ENTRIES); in aac_comm_init() 323 comm->queue[HostNormRespQueue].base = queues; in aac_comm_init() 324 aac_queue_init(dev, &comm->queue[HostNormRespQueue], headers, HOST_NORM_RESP_ENTRIES); in aac_comm_init() [all …]
|
/linux-4.4.14/include/crypto/internal/ |
D | aead.h | 125 static inline void aead_init_queue(struct aead_queue *queue, in aead_init_queue() argument 128 crypto_init_queue(&queue->base, max_qlen); in aead_init_queue() 131 static inline int aead_enqueue_request(struct aead_queue *queue, in aead_enqueue_request() argument 134 return crypto_enqueue_request(&queue->base, &request->base); in aead_enqueue_request() 138 struct aead_queue *queue) in aead_dequeue_request() argument 142 req = crypto_dequeue_request(&queue->base); in aead_dequeue_request() 147 static inline struct aead_request *aead_get_backlog(struct aead_queue *queue) in aead_get_backlog() argument 151 req = crypto_get_backlog(&queue->base); in aead_get_backlog()
|
D | hash.h | 182 static inline int ahash_enqueue_request(struct crypto_queue *queue, in ahash_enqueue_request() argument 185 return crypto_enqueue_request(queue, &request->base); in ahash_enqueue_request() 189 struct crypto_queue *queue) in ahash_dequeue_request() argument 191 return ahash_request_cast(crypto_dequeue_request(queue)); in ahash_dequeue_request() 194 static inline int ahash_tfm_in_queue(struct crypto_queue *queue, in ahash_tfm_in_queue() argument 197 return crypto_tfm_in_queue(queue, crypto_ahash_tfm(tfm)); in ahash_tfm_in_queue()
|
D | skcipher.h | 76 struct crypto_queue *queue, struct skcipher_givcrypt_request *request) in skcipher_enqueue_givcrypt() argument 78 return ablkcipher_enqueue_request(queue, &request->creq); in skcipher_enqueue_givcrypt() 82 struct crypto_queue *queue) in skcipher_dequeue_givcrypt() argument 84 return skcipher_givcrypt_cast(crypto_dequeue_request(queue)); in skcipher_dequeue_givcrypt()
|
/linux-4.4.14/Documentation/networking/ |
D | scaling.txt | 24 (multi-queue). On reception, a NIC can send different packets to different 28 queue, which in turn can be processed by separate CPUs. This mechanism is 31 Multi-queue distribution can also be used for traffic prioritization, but 38 stores a queue number. The receive queue for a packet is determined 45 can be directed to their own receive queue. Such “n-tuple” filters can 50 The driver for a multi-queue capable NIC typically provides a kernel 53 num_queues. A typical RSS configuration would be to have one receive queue 58 The indirection table of an RSS device, which resolves a queue by masked 68 Each receive queue has a separate IRQ associated with it. The NIC triggers 69 this to notify a CPU when new packets arrive on the given queue. The [all …]
|
D | multiqueue.txt | 23 netif_{start|stop|wake}_subqueue() functions to manage each queue while the 33 default pfifo_fast qdisc. This qdisc supports one qdisc per hardware queue. 37 the base driver to determine which queue to send the skb to. 40 blocking. It will cycle though the bands and verify that the hardware queue 45 will be queued to the band associated with the hardware queue. 61 band 0 => queue 0 62 band 1 => queue 1 63 band 2 => queue 2 64 band 3 => queue 3 66 Traffic will begin flowing through each queue based on either the simple_tx_hash [all …]
|
/linux-4.4.14/drivers/net/wireless/ti/wlcore/ |
D | tx.h | 198 static inline int wl1271_tx_get_queue(int queue) in wl1271_tx_get_queue() argument 200 switch (queue) { in wl1271_tx_get_queue() 215 int wlcore_tx_get_mac80211_queue(struct wl12xx_vif *wlvif, int queue) in wlcore_tx_get_mac80211_queue() argument 219 switch (queue) { in wlcore_tx_get_mac80211_queue() 263 u8 queue, enum wlcore_queue_stop_reason reason); 264 void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue, 266 void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue, 273 struct wl12xx_vif *wlvif, u8 queue, 278 u8 queue, 281 u8 queue);
|
/linux-4.4.14/drivers/watchdog/ |
D | mtx-1_wdt.c | 64 int queue; member 81 if (mtx1_wdt_device.queue && ticks) in mtx1_wdt_trigger() 99 if (!mtx1_wdt_device.queue) { in mtx1_wdt_start() 100 mtx1_wdt_device.queue = 1; in mtx1_wdt_start() 114 if (mtx1_wdt_device.queue) { in mtx1_wdt_stop() 115 mtx1_wdt_device.queue = 0; in mtx1_wdt_stop() 220 mtx1_wdt_device.queue = 0; in mtx1_wdt_probe() 238 if (mtx1_wdt_device.queue) { in mtx1_wdt_remove() 239 mtx1_wdt_device.queue = 0; in mtx1_wdt_remove()
|
D | cpu5wdt.c | 65 int queue; member 85 if (cpu5wdt_device.queue && ticks) in cpu5wdt_trigger() 109 if (!cpu5wdt_device.queue) { in cpu5wdt_start() 110 cpu5wdt_device.queue = 1; in cpu5wdt_start() 226 cpu5wdt_device.queue = 0; in cpu5wdt_init() 265 if (cpu5wdt_device.queue) { in cpu5wdt_exit() 266 cpu5wdt_device.queue = 0; in cpu5wdt_exit()
|
D | rdc321x_wdt.c | 60 int queue; member 88 if (rdc321x_wdt_device.queue && ticks) in rdc321x_wdt_trigger() 107 if (!rdc321x_wdt_device.queue) { in rdc321x_wdt_start() 108 rdc321x_wdt_device.queue = 1; in rdc321x_wdt_start() 261 rdc321x_wdt_device.queue = 0; in rdc321x_wdt_probe() 276 if (rdc321x_wdt_device.queue) { in rdc321x_wdt_remove() 277 rdc321x_wdt_device.queue = 0; in rdc321x_wdt_remove()
|
/linux-4.4.14/arch/mips/include/asm/octeon/ |
D | cvmx-pko.h | 152 uint64_t queue:9; member 157 uint64_t queue:9; 326 static inline void cvmx_pko_doorbell(uint64_t port, uint64_t queue, in cvmx_pko_doorbell() argument 336 ptr.s.queue = queue; in cvmx_pko_doorbell() 378 static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue, in cvmx_pko_send_packet_prepare() argument 396 (CVMX_TAG_SUBGROUP_MASK & queue); in cvmx_pko_send_packet_prepare() 421 uint64_t queue, in cvmx_pko_send_packet_finish() argument 429 result = cvmx_cmd_queue_write2(CVMX_CMD_QUEUE_PKO(queue), in cvmx_pko_send_packet_finish() 433 cvmx_pko_doorbell(port, queue, 2); in cvmx_pko_send_packet_finish() 464 uint64_t queue, in cvmx_pko_send_packet_finish3() argument [all …]
|
/linux-4.4.14/Documentation/device-mapper/ |
D | dm-queue-length.txt | 1 dm-queue-length 4 dm-queue-length is a path selector module for device-mapper targets, 6 The path selector name is 'queue-length'. 23 dm-queue-length increments/decrements 'in-flight' when an I/O is 25 dm-queue-length selects a path with the minimum 'in-flight'. 32 # echo "0 10 multipath 0 0 1 1 queue-length 0 2 1 8:0 128 8:16 128" \ 36 test: 0 10 multipath 0 0 1 1 queue-length 0 2 1 8:0 128 8:16 128
|
/linux-4.4.14/block/ |
D | noop-iosched.c | 12 struct list_head queue; member 26 rq = list_first_entry_or_null(&nd->queue, struct request, queuelist); in noop_dispatch() 39 list_add_tail(&rq->queuelist, &nd->queue); in noop_add_request() 47 if (rq->queuelist.prev == &nd->queue) in noop_former_request() 57 if (rq->queuelist.next == &nd->queue) in noop_latter_request() 78 INIT_LIST_HEAD(&nd->queue); in noop_init_queue() 90 BUG_ON(!list_empty(&nd->queue)); in noop_exit_queue()
|
D | blk-mq-cpumap.c | 37 unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling; in blk_mq_update_queue_map() local 53 queue = 0; in blk_mq_update_queue_map() 66 map[i] = cpu_to_queue_index(nr_cpus, nr_queues, queue); in blk_mq_update_queue_map() 67 queue++; in blk_mq_update_queue_map() 79 queue); in blk_mq_update_queue_map() 80 queue++; in blk_mq_update_queue_map()
|
D | blk-integrity.c | 145 struct blk_integrity *b1 = &gd1->queue->integrity; in blk_integrity_compare() 146 struct blk_integrity *b2 = &gd2->queue->integrity; in blk_integrity_compare() 249 struct blk_integrity *bi = &disk->queue->integrity; in integrity_attr_show() 261 struct blk_integrity *bi = &disk->queue->integrity; in integrity_attr_store() 411 struct blk_integrity *bi = &disk->queue->integrity; in blk_integrity_register() 415 bi->interval_exp = ilog2(queue_logical_block_size(disk->queue)); in blk_integrity_register() 434 memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity)); in blk_integrity_unregister() 440 struct blk_integrity *bi = &disk->queue->integrity; in blk_integrity_revalidate() 446 disk->queue->backing_dev_info.capabilities |= in blk_integrity_revalidate() 449 disk->queue->backing_dev_info.capabilities &= in blk_integrity_revalidate()
|
/linux-4.4.14/sound/core/seq/oss/ |
D | seq_oss_init.c | 58 static int delete_seq_queue(int queue); 196 dp->queue = -1; in snd_seq_oss_open() 286 delete_seq_queue(dp->queue); in snd_seq_oss_open() 372 dp->queue = qinfo.queue; in alloc_seq_queue() 380 delete_seq_queue(int queue) in delete_seq_queue() argument 385 if (queue < 0) in delete_seq_queue() 388 qinfo.queue = queue; in delete_seq_queue() 391 pr_err("ALSA: seq_oss: unable to delete queue %d (%d)\n", queue, rc); in delete_seq_queue() 420 int queue; in snd_seq_oss_release() local 431 queue = dp->queue; in snd_seq_oss_release() [all …]
|
D | seq_oss_timer.c | 150 ev.queue = dp->queue; in send_timer_event() 151 ev.data.queue.queue = dp->queue; in send_timer_event() 152 ev.data.queue.param.value = value; in send_timer_event() 169 tmprec.queue = dp->queue; in snd_seq_oss_timer_start()
|
D | seq_oss_device.h | 87 int queue; /* sequencer queue number */ member 165 ev->queue = dp->queue; in snd_seq_oss_fill_addr()
|
/linux-4.4.14/drivers/net/wireless/iwlwifi/mvm/ |
D | utils.c | 676 void iwl_mvm_enable_txq(struct iwl_mvm *mvm, int queue, int mac80211_queue, in iwl_mvm_enable_txq() argument 685 if (mvm->queue_info[queue].tid_bitmap & BIT(cfg->tid)) { in iwl_mvm_enable_txq() 693 mvm->queue_info[queue].hw_queue_to_mac80211 |= BIT(mac80211_queue); in iwl_mvm_enable_txq() 694 mvm->queue_info[queue].hw_queue_refcount++; in iwl_mvm_enable_txq() 695 if (mvm->queue_info[queue].hw_queue_refcount > 1) in iwl_mvm_enable_txq() 697 mvm->queue_info[queue].tid_bitmap |= BIT(cfg->tid); in iwl_mvm_enable_txq() 701 queue, mvm->queue_info[queue].hw_queue_refcount, in iwl_mvm_enable_txq() 702 mvm->queue_info[queue].hw_queue_to_mac80211); in iwl_mvm_enable_txq() 709 .scd_queue = queue, in iwl_mvm_enable_txq() 719 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, in iwl_mvm_enable_txq() [all …]
|
/linux-4.4.14/drivers/mfd/ |
D | pcf50633-adc.c | 47 struct pcf50633_adc_request *queue[PCF50633_MAX_ADC_FIFO_DEPTH]; member 78 if (!adc->queue[head]) in trigger_next_adc_job_if_any() 81 adc_setup(pcf, adc->queue[head]->mux, adc->queue[head]->avg); in trigger_next_adc_job_if_any() 95 if (adc->queue[tail]) { in adc_enqueue_request() 101 adc->queue[tail] = req; in adc_enqueue_request() 182 req = adc->queue[head]; in pcf50633_adc_irq() 188 adc->queue[head] = NULL; in pcf50633_adc_irq() 230 if (WARN_ON(adc->queue[head])) in pcf50633_adc_remove() 235 kfree(adc->queue[i]); in pcf50633_adc_remove()
|
/linux-4.4.14/drivers/block/ |
D | ps3disk.c | 44 struct request_queue *queue; member 281 ps3disk_do_request(dev, priv->queue); in ps3disk_interrupt() 409 struct request_queue *queue; in ps3disk_probe() local 453 queue = blk_init_queue(ps3disk_request, &priv->lock); in ps3disk_probe() 454 if (!queue) { in ps3disk_probe() 461 priv->queue = queue; in ps3disk_probe() 462 queue->queuedata = dev; in ps3disk_probe() 464 blk_queue_bounce_limit(queue, BLK_BOUNCE_HIGH); in ps3disk_probe() 466 blk_queue_max_hw_sectors(queue, dev->bounce_size >> 9); in ps3disk_probe() 467 blk_queue_segment_boundary(queue, -1UL); in ps3disk_probe() [all …]
|
D | ps3vram.c | 69 struct request_queue *queue; member 630 struct request_queue *queue; in ps3vram_probe() local 749 queue = blk_alloc_queue(GFP_KERNEL); in ps3vram_probe() 750 if (!queue) { in ps3vram_probe() 756 priv->queue = queue; in ps3vram_probe() 757 queue->queuedata = dev; in ps3vram_probe() 758 blk_queue_make_request(queue, ps3vram_make_request); in ps3vram_probe() 759 blk_queue_max_segments(queue, BLK_MAX_SEGMENTS); in ps3vram_probe() 760 blk_queue_max_segment_size(queue, BLK_MAX_SEGMENT_SIZE); in ps3vram_probe() 761 blk_queue_max_hw_sectors(queue, BLK_SAFE_MAX_SECTORS); in ps3vram_probe() [all …]
|
/linux-4.4.14/virt/kvm/ |
D | async_pf.c | 68 INIT_LIST_HEAD(&vcpu->async_pf.queue); in kvm_async_pf_vcpu_init() 111 while (!list_empty(&vcpu->async_pf.queue)) { in kvm_clear_async_pf_completion_queue() 113 list_entry(vcpu->async_pf.queue.next, in kvm_clear_async_pf_completion_queue() 114 typeof(*work), queue); in kvm_clear_async_pf_completion_queue() 115 list_del(&work->queue); in kvm_clear_async_pf_completion_queue() 156 list_del(&work->queue); in kvm_check_async_pf_completion() 198 list_add_tail(&work->queue, &vcpu->async_pf.queue); in kvm_setup_async_pf() 221 INIT_LIST_HEAD(&work->queue); /* for list_del to work */ in kvm_async_pf_wakeup_all()
|
/linux-4.4.14/include/crypto/ |
D | algapi.h | 182 void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); 183 int crypto_enqueue_request(struct crypto_queue *queue, 185 struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); 186 int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm); 324 struct crypto_queue *queue) in crypto_get_backlog() argument 326 return queue->backlog == &queue->list ? NULL : in crypto_get_backlog() 327 container_of(queue->backlog, struct crypto_async_request, list); in crypto_get_backlog() 330 static inline int ablkcipher_enqueue_request(struct crypto_queue *queue, in ablkcipher_enqueue_request() argument 333 return crypto_enqueue_request(queue, &request->base); in ablkcipher_enqueue_request() 337 struct crypto_queue *queue) in ablkcipher_dequeue_request() argument [all …]
|
/linux-4.4.14/drivers/net/wireless/ath/carl9170/ |
D | tx.c | 49 unsigned int queue) in __carl9170_get_queue() argument 52 return queue; in __carl9170_get_queue() 78 int queue, i; in carl9170_tx_accounting() local 83 queue = skb_get_queue_mapping(skb); in carl9170_tx_accounting() 92 ar->tx_stats[queue].len++; in carl9170_tx_accounting() 93 ar->tx_stats[queue].count++; in carl9170_tx_accounting() 158 int queue; in carl9170_tx_accounting_free() local 160 queue = skb_get_queue_mapping(skb); in carl9170_tx_accounting_free() 164 ar->tx_stats[queue].len--; in carl9170_tx_accounting_free() 455 int queue = skb_get_queue_mapping(skb); in carl9170_tx_bar_status() local [all …]
|
/linux-4.4.14/include/uapi/sound/ |
D | asequencer.h | 251 unsigned char queue; /* affected queue */ member 277 unsigned char queue; /* schedule queue */ member 290 struct snd_seq_ev_queue_control queue; member 394 unsigned char queue; /* Queue for REMOVE_DEST */ member 473 int queue; /* queue id */ member 489 int queue; /* queue id */ member 501 int queue; /* sequencer queue */ member 517 int queue; /* sequencer queue */ member 530 int queue; /* sequencer queue */ member 548 unsigned char queue; /* input time-stamp queue (optional) */ member [all …]
|
/linux-4.4.14/drivers/staging/lustre/lustre/obdclass/ |
D | cl_io.c | 331 int cl_queue_match(const struct list_head *queue, in cl_queue_match() argument 336 list_for_each_entry(scan, queue, cill_linkage) { in cl_queue_match() 344 static int cl_queue_merge(const struct list_head *queue, in cl_queue_merge() argument 349 list_for_each_entry(scan, queue, cill_linkage) { in cl_queue_merge() 743 struct cl_2queue *queue; in cl_io_read_page() local 752 queue = &io->ci_queue; in cl_io_read_page() 754 cl_2queue_init(queue); in cl_io_read_page() 777 result = cl_io_submit_rw(env, io, CRT_READ, queue); in cl_io_read_page() 781 cl_page_list_disown(env, io, &queue->c2_qin); in cl_io_read_page() 782 cl_2queue_fini(env, queue); in cl_io_read_page() [all …]
|
/linux-4.4.14/drivers/misc/vmw_vmci/ |
D | vmci_queue_pair.c | 142 typedef int vmci_memcpy_to_queue_func(struct vmci_queue *queue, 146 const struct vmci_queue *queue, 273 struct vmci_queue *queue = q; in qp_free_queue() local 275 if (queue) { in qp_free_queue() 281 queue->kernel_if->u.g.vas[i], in qp_free_queue() 282 queue->kernel_if->u.g.pas[i]); in qp_free_queue() 285 vfree(queue); in qp_free_queue() 297 struct vmci_queue *queue; in qp_alloc_queue() local 300 size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if); in qp_alloc_queue() 305 (sizeof(*queue->kernel_if->u.g.pas) + in qp_alloc_queue() [all …]
|
/linux-4.4.14/arch/arm/boot/dts/ |
D | k2l-netcp.dtsi | 18 queue-range = <0 0x2000>; 38 queue-pools { 80 multi-queue; 157 tx-queue = <896>; 191 rx-queue-depth = <128 128 0 0>; 193 rx-queue = <528>; 194 tx-completion-queue = <530>; 203 rx-queue-depth = <128 128 0 0>; 205 rx-queue = <529>; 206 tx-completion-queue = <531>;
|
D | k2e-netcp.dtsi | 18 queue-range = <0 0x2000>; 38 queue-pools { 80 multi-queue; 158 tx-queue = <896>; 208 rx-queue-depth = <128 128 0 0>; 210 rx-queue = <528>; 211 tx-completion-queue = <530>; 220 rx-queue-depth = <128 128 0 0>; 222 rx-queue = <529>; 223 tx-completion-queue = <531>;
|
D | k2hk-netcp.dtsi | 18 queue-range = <0 0x4000>; 51 queue-pools { 97 multi-queue; 177 tx-queue = <648>; 211 rx-queue-depth = <128 128 0 0>; 213 rx-queue = <8704>; 214 tx-completion-queue = <8706>; 223 rx-queue-depth = <128 128 0 0>; 225 rx-queue = <8705>; 226 tx-completion-queue = <8707>;
|
/linux-4.4.14/net/mac80211/ |
D | tkip.c | 245 u8 *ra, int only_iv, int queue, in ieee80211_tkip_decrypt_data() argument 268 if (key->u.tkip.rx[queue].state != TKIP_STATE_NOT_INIT && in ieee80211_tkip_decrypt_data() 269 (iv32 < key->u.tkip.rx[queue].iv32 || in ieee80211_tkip_decrypt_data() 270 (iv32 == key->u.tkip.rx[queue].iv32 && in ieee80211_tkip_decrypt_data() 271 iv16 <= key->u.tkip.rx[queue].iv16))) in ieee80211_tkip_decrypt_data() 276 key->u.tkip.rx[queue].state = TKIP_STATE_PHASE1_HW_UPLOADED; in ieee80211_tkip_decrypt_data() 280 if (key->u.tkip.rx[queue].state == TKIP_STATE_NOT_INIT || in ieee80211_tkip_decrypt_data() 281 key->u.tkip.rx[queue].iv32 != iv32) { in ieee80211_tkip_decrypt_data() 283 tkip_mixing_phase1(tk, &key->u.tkip.rx[queue], ta, iv32); in ieee80211_tkip_decrypt_data() 287 key->u.tkip.rx[queue].state != TKIP_STATE_PHASE1_HW_UPLOADED) { in ieee80211_tkip_decrypt_data() [all …]
|
/linux-4.4.14/drivers/nvdimm/ |
D | blk.c | 25 struct request_queue *queue; member 250 blk_dev->queue = blk_alloc_queue(GFP_KERNEL); in nd_blk_attach_disk() 251 if (!blk_dev->queue) in nd_blk_attach_disk() 254 blk_queue_make_request(blk_dev->queue, nd_blk_make_request); in nd_blk_attach_disk() 255 blk_queue_max_hw_sectors(blk_dev->queue, UINT_MAX); in nd_blk_attach_disk() 256 blk_queue_bounce_limit(blk_dev->queue, BLK_BOUNCE_ANY); in nd_blk_attach_disk() 257 blk_queue_logical_block_size(blk_dev->queue, blk_dev->sector_size); in nd_blk_attach_disk() 258 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, blk_dev->queue); in nd_blk_attach_disk() 262 blk_cleanup_queue(blk_dev->queue); in nd_blk_attach_disk() 271 disk->queue = blk_dev->queue; in nd_blk_attach_disk() [all …]
|
/linux-4.4.14/drivers/media/platform/xilinx/ |
D | xilinx-dma.c | 289 struct list_head queue; member 301 list_del(&buf->queue); in xvip_dma_complete() 351 if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { in xvip_dma_buffer_queue() 380 list_add_tail(&buf->queue, &dma->queued_bufs); in xvip_dma_buffer_queue() 385 if (vb2_is_streaming(&dma->queue)) in xvip_dma_buffer_queue() 439 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) { in xvip_dma_start_streaming() 441 list_del(&buf->queue); in xvip_dma_start_streaming() 466 list_for_each_entry_safe(buf, nbuf, &dma->queued_bufs, queue) { in xvip_dma_stop_streaming() 468 list_del(&buf->queue); in xvip_dma_stop_streaming() 496 if (dma->queue.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) in xvip_dma_querycap() [all …]
|
/linux-4.4.14/drivers/staging/rtl8723au/include/ |
D | osdep_service.h | 51 struct list_head queue; member 55 static inline struct list_head *get_list_head(struct rtw_queue *queue) in get_list_head() argument 57 return &queue->queue; in get_list_head()
|
/linux-4.4.14/drivers/net/ethernet/tile/ |
D | tilepro.c | 172 struct tile_netio_queue queue; member 342 struct tile_netio_queue *queue = &info->queue; in tile_net_return_credit() local 343 netio_queue_user_impl_t *qup = &queue->__user_part; in tile_net_return_credit() 361 struct tile_netio_queue *queue = &info->queue; in tile_net_provide_linux_buffer() local 366 __netio_fastio_free_buffer(queue->__user_part.__fastio_index, buffer); in tile_net_provide_linux_buffer() 627 struct tile_netio_queue *queue = &info->queue; in tile_net_discard_aux() local 628 netio_queue_impl_t *qsp = queue->__system_part; in tile_net_discard_aux() 629 netio_queue_user_impl_t *qup = &queue->__user_part; in tile_net_discard_aux() 664 struct tile_netio_queue *queue = &info->queue; in tile_net_discard_packets() local 665 netio_queue_impl_t *qsp = queue->__system_part; in tile_net_discard_packets() [all …]
|
/linux-4.4.14/Documentation/DocBook/ |
D | kernel-api.xml.db | 191 API-add-page-wait-queue 594 API-blk-delay-queue 595 API-blk-start-queue-async 596 API-blk-start-queue 597 API-blk-stop-queue 598 API-blk-sync-queue 599 API---blk-run-queue-uncond 600 API---blk-run-queue 601 API-blk-run-queue-async 602 API-blk-run-queue [all …]
|
D | networking.xml.db | 13 API-skb-queue-empty 14 API-skb-queue-is-last 15 API-skb-queue-is-first 16 API-skb-queue-next 17 API-skb-queue-prev 29 API-skb-queue-len 30 API---skb-queue-head-init 31 API-skb-queue-splice 32 API-skb-queue-splice-init 33 API-skb-queue-splice-tail [all …]
|
/linux-4.4.14/drivers/net/ethernet/marvell/ |
D | mvneta.c | 809 int queue; in mvneta_port_up() local 814 for (queue = 0; queue < txq_number; queue++) { in mvneta_port_up() 815 struct mvneta_tx_queue *txq = &pp->txqs[queue]; in mvneta_port_up() 817 q_map |= (1 << queue); in mvneta_port_up() 923 static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue) in mvneta_set_ucast_table() argument 928 if (queue == -1) { in mvneta_set_ucast_table() 931 val = 0x1 | (queue << 1); in mvneta_set_ucast_table() 940 static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue) in mvneta_set_special_mcast_table() argument 945 if (queue == -1) { in mvneta_set_special_mcast_table() 948 val = 0x1 | (queue << 1); in mvneta_set_special_mcast_table() [all …]
|
D | mvpp2.c | 3933 int tx_port_num, val, queue, ptxq, lrxq; in mvpp2_defaults_set() local 3953 for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) { in mvpp2_defaults_set() 3954 ptxq = mvpp2_txq_phys(port->id, queue); in mvpp2_defaults_set() 3979 queue = port->rxqs[lrxq]->id; in mvpp2_defaults_set() 3980 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); in mvpp2_defaults_set() 3983 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); in mvpp2_defaults_set() 3994 int lrxq, queue; in mvpp2_ingress_enable() local 3997 queue = port->rxqs[lrxq]->id; in mvpp2_ingress_enable() 3998 val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); in mvpp2_ingress_enable() 4000 mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); in mvpp2_ingress_enable() [all …]
|
/linux-4.4.14/tools/testing/selftests/mqueue/ |
D | mq_open_tests.c | 56 mqd_t queue = -1; variable 87 if (queue != -1) in shutdown() 88 if (mq_close(queue)) in shutdown() 202 if ((queue = mq_open(queue_path, flags, perms, attr)) == -1) in test_queue() 204 if (mq_getattr(queue, result)) in test_queue() 206 if (mq_close(queue)) in test_queue() 208 queue = -1; in test_queue() 224 if ((queue = mq_open(queue_path, flags, perms, attr)) == -1) in test_queue_fail() 226 if (mq_getattr(queue, result)) in test_queue_fail() 228 if (mq_close(queue)) in test_queue_fail() [all …]
|
D | mq_perf_tests.c | 97 mqd_t queue = -1; variable 187 if (queue != -1) in shutdown() 188 if (mq_close(queue)) in shutdown() 292 queue = mq_open(queue_path, flags, perms, attr); in open_queue() 293 if (queue == -1) in open_queue() 295 if (mq_getattr(queue, &result)) in open_queue() 329 while (mq_send(queue, buff, sizeof(buff), 0) == 0) in cont_thread() 331 mq_receive(queue, buff, sizeof(buff), &priority); in cont_thread() 336 while (mq_receive(queue, buff, MSG_SIZE, &prio_in) == MSG_SIZE) 340 if (mq_send(queue, buff, MSG_SIZE, prio_out)) \ [all …]
|
/linux-4.4.14/drivers/media/v4l2-core/ |
D | videobuf2-v4l2.c | 1401 return vdev->queue->owner && vdev->queue->owner != file->private_data; in vb2_queue_is_busy() 1410 int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type); in vb2_ioctl_reqbufs() 1416 res = vb2_core_reqbufs(vdev->queue, p->memory, &p->count); in vb2_ioctl_reqbufs() 1420 vdev->queue->owner = p->count ? file->private_data : NULL; in vb2_ioctl_reqbufs() 1429 int res = vb2_verify_memory_type(vdev->queue, p->memory, in vb2_ioctl_create_bufs() 1432 p->index = vdev->queue->num_buffers; in vb2_ioctl_create_bufs() 1443 res = vb2_core_create_bufs(vdev->queue, p->memory, &p->count, in vb2_ioctl_create_bufs() 1446 vdev->queue->owner = file->private_data; in vb2_ioctl_create_bufs() 1458 return vb2_prepare_buf(vdev->queue, p); in vb2_ioctl_prepare_buf() 1467 return vb2_querybuf(vdev->queue, p); in vb2_ioctl_querybuf() [all …]
|
/linux-4.4.14/drivers/usb/gadget/udc/ |
D | s3c-hsudc.c | 114 struct list_head queue; member 128 struct list_head queue; member 251 list_del_init(&hsreq->queue); in s3c_hsudc_complete_request() 275 while (!list_empty(&hsep->queue)) { in s3c_hsudc_nuke_ep() 276 hsreq = list_entry(hsep->queue.next, in s3c_hsudc_nuke_ep() 277 struct s3c_hsudc_req, queue); in s3c_hsudc_nuke_ep() 444 if (list_empty(&hsep->queue)) in s3c_hsudc_epin_intr() 447 hsreq = list_entry(hsep->queue.next, in s3c_hsudc_epin_intr() 448 struct s3c_hsudc_req, queue); in s3c_hsudc_epin_intr() 481 if (list_empty(&hsep->queue)) in s3c_hsudc_epout_intr() [all …]
|
D | atmel_usba_udc.c | 48 list_for_each_entry(req, &ep->queue, queue) { in queue_dbg_open() 52 list_add_tail(&req_copy->queue, queue_data); in queue_dbg_open() 61 list_for_each_entry_safe(req, req_copy, queue_data, queue) { in queue_dbg_open() 62 list_del(&req->queue); in queue_dbg_open() 86 struct list_head *queue = file->private_data; in queue_dbg_read() local 95 list_for_each_entry_safe(req, tmp_req, queue, queue) { in queue_dbg_read() 110 list_del(&req->queue); in queue_dbg_read() 131 list_for_each_entry_safe(req, tmp_req, queue_data, queue) { in queue_dbg_release() 132 list_del(&req->queue); in queue_dbg_release() 416 if (list_empty(&ep->queue)) { in submit_next_request() [all …]
|
D | goku_udc.c | 276 INIT_LIST_HEAD(&req->queue); in goku_alloc_request() 289 WARN_ON(!list_empty(&req->queue)); in goku_free_request() 301 list_del_init(&req->queue); in done() 487 if (dbuff && !list_empty(&ep->queue)) { in read_fifo() 488 req = list_entry(ep->queue.next, in read_fifo() 489 struct goku_request, queue); in read_fifo() 521 if (unlikely(list_empty (&ep->queue))) in pio_advance() 523 req = list_entry(ep->queue.next, struct goku_request, queue); in pio_advance() 594 if (unlikely(list_empty(&ep->queue))) { in dma_advance() 603 req = list_entry(ep->queue.next, struct goku_request, queue); in dma_advance() [all …]
|
D | omap_udc.c | 276 INIT_LIST_HEAD(&req->queue); in omap_alloc_request() 297 list_del_init(&req->queue); in done() 648 if (!list_empty(&ep->queue)) { in dma_irq() 649 req = container_of(ep->queue.next, in dma_irq() 650 struct omap_req, queue); in dma_irq() 655 if (!list_empty(&ep->queue)) { in dma_irq() 656 req = container_of(ep->queue.next, in dma_irq() 657 struct omap_req, queue); in dma_irq() 667 if (!list_empty(&ep->queue)) { in dma_irq() 668 req = container_of(ep->queue.next, in dma_irq() [all …]
|
D | m66592-udc.c | 102 INIT_LIST_HEAD(&m66592->ep[0].queue); in m66592_usb_disconnect() 366 INIT_LIST_HEAD(&ep->queue); in m66592_ep_setting() 722 list_del_init(&req->queue); in transfer_complete() 728 if (!list_empty(&ep->queue)) in transfer_complete() 736 req = list_entry(ep->queue.next, struct m66592_request, queue); in transfer_complete() 910 req = list_entry(ep->queue.next, struct m66592_request, queue); in irq_pipe_ready() 918 req = list_entry(ep->queue.next, in irq_pipe_ready() 919 struct m66592_request, queue); in irq_pipe_ready() 941 req = list_entry(ep->queue.next, struct m66592_request, queue); in irq_pipe_empty() 954 req = list_entry(ep->queue.next, in irq_pipe_empty() [all …]
|
D | fotg210-udc.c | 64 list_del_init(&req->queue); in fotg210_done() 77 if (list_empty(&ep->queue)) in fotg210_done() 222 while (!list_empty(&ep->queue)) { in fotg210_ep_disable() 223 req = list_entry(ep->queue.next, in fotg210_ep_disable() 224 struct fotg210_request, queue); in fotg210_ep_disable() 242 INIT_LIST_HEAD(&req->queue); in fotg210_ep_alloc_request() 415 if (list_empty(&ep->queue)) in fotg210_ep_queue() 418 list_add_tail(&req->queue, &ep->queue); in fotg210_ep_queue() 443 if (!list_empty(&ep->queue)) in fotg210_ep_dequeue() 508 if (!list_empty(&ep->queue)) in fotg210_set_halt_and_wedge() [all …]
|
D | pch_udc.c | 298 struct list_head queue; member 414 struct list_head queue; member 1454 list_del_init(&req->queue); in complete_req() 1508 while (!list_empty(&ep->queue)) { in empty_req_queue() 1509 req = list_entry(ep->queue.next, struct pch_udc_request, queue); in empty_req_queue() 1752 INIT_LIST_HEAD(&ep->queue); in pch_udc_pcd_ep_disable() 1784 INIT_LIST_HEAD(&req->queue); in pch_udc_alloc_request() 1821 if (!list_empty(&req->queue)) in pch_udc_free_request() 1860 if (!list_empty(&req->queue)) in pch_udc_pcd_queue() 1907 if (list_empty(&ep->queue) && !ep->halted) { in pch_udc_pcd_queue() [all …]
|
D | net2272.c | 267 INIT_LIST_HEAD(&ep->queue); in net2272_ep_reset() 343 INIT_LIST_HEAD(&req->queue); in net2272_alloc_request() 359 WARN_ON(!list_empty(&req->queue)); in net2272_free_request() 377 list_del_init(&req->queue); in net2272_done() 482 if (!list_empty(&ep->queue)) { in net2272_write_fifo() 483 req = list_entry(ep->queue.next, in net2272_write_fifo() 485 queue); in net2272_write_fifo() 612 if (!list_empty(&ep->queue)) { in net2272_read_fifo() 613 req = list_entry(ep->queue.next, in net2272_read_fifo() 614 struct net2272_request, queue); in net2272_read_fifo() [all …]
|
D | gr_udc.c | 161 if (list_empty(&ep->queue)) { in gr_seq_ep_show() 167 list_for_each_entry(req, &ep->queue, queue) { in gr_seq_ep_show() 310 list_del_init(&req->queue); in gr_finish_request() 375 INIT_LIST_HEAD(&req->queue); in gr_alloc_request() 390 if (list_empty(&ep->queue)) { in gr_start_dma() 395 req = list_first_entry(&ep->queue, struct gr_request, queue); in gr_start_dma() 431 req = list_first_entry(&ep->queue, struct gr_request, queue); in gr_dma_advance() 607 if (unlikely(!req->req.buf || !list_empty(&req->queue))) { in gr_queue() 610 ep->ep.name, req->req.buf, list_empty(&req->queue)); in gr_queue() 641 list_add_tail(&req->queue, &ep->queue); in gr_queue() [all …]
|
D | pxa25x_udc.c | 309 INIT_LIST_HEAD (&req->queue); in pxa25x_ep_alloc_request() 323 WARN_ON(!list_empty (&req->queue)); in pxa25x_ep_free_request() 336 list_del_init(&req->queue); in done() 426 if (list_empty(&ep->queue)) in write_fifo() 573 if (list_empty(&ep->queue)) in read_fifo() 638 || !list_empty(&req->queue))) { in pxa25x_ep_queue() 672 if (list_empty(&ep->queue) && !ep->stopped) { in pxa25x_ep_queue() 728 list_add_tail(&req->queue, &ep->queue); in pxa25x_ep_queue() 743 while (!list_empty(&ep->queue)) { in nuke() 744 req = list_entry(ep->queue.next, in nuke() [all …]
|
D | fsl_udc_core.c | 172 list_del_init(&req->queue); in done() 219 while (!list_empty(&ep->queue)) { in nuke() 222 req = list_entry(ep->queue.next, struct fsl_req, queue); in nuke() 690 INIT_LIST_HEAD(&req->queue); in fsl_alloc_request() 739 if (!(list_empty(&ep->queue)) && !(ep_index(ep) == 0)) { in fsl_queue_td() 742 lastreq = list_entry(ep->queue.prev, struct fsl_req, queue); in fsl_queue_td() 882 || !list_empty(&req->queue)) { in fsl_ep_queue() 919 list_add_tail(&req->queue, &ep->queue); in fsl_ep_queue() 951 list_for_each_entry(req, &ep->queue, queue) { in fsl_ep_dequeue() 961 if (ep->queue.next == &req->queue) { in fsl_ep_dequeue() [all …]
|
D | bcm63xx_udc.c | 247 struct list_head queue; member 260 struct list_head queue; /* ep's requests */ member 782 INIT_LIST_HEAD(&bep->queue); in iudma_init_channel() 975 INIT_LIST_HEAD(&bep->queue); in bcm63xx_init_udc_hw() 1062 BUG_ON(!list_empty(&bep->queue)); in bcm63xx_ep_enable() 1101 if (!list_empty(&bep->queue)) { in bcm63xx_ep_disable() 1102 list_for_each_safe(pos, n, &bep->queue) { in bcm63xx_ep_disable() 1104 list_entry(pos, struct bcm63xx_req, queue); in bcm63xx_ep_disable() 1108 list_del(&breq->queue); in bcm63xx_ep_disable() 1198 list_add_tail(&breq->queue, &bep->queue); in bcm63xx_udc_queue() [all …]
|
D | fusb300_udc.c | 261 while (!list_empty(&ep->queue)) { in fusb300_disable() 262 req = list_entry(ep->queue.next, struct fusb300_request, queue); in fusb300_disable() 279 INIT_LIST_HEAD(&req->queue); in fusb300_alloc_request() 438 if (list_empty(&ep->queue)) in fusb300_queue() 441 list_add_tail(&req->queue, &ep->queue); in fusb300_queue() 466 if (!list_empty(&ep->queue)) in fusb300_dequeue() 486 if (!list_empty(&ep->queue)) { in fusb300_set_halt_and_wedge() 528 .queue = fusb300_queue, 782 if (!list_empty(&ep->queue)) in clear_feature() 870 list_del_init(&req->queue); in done() [all …]
|
/linux-4.4.14/net/ipv4/ |
D | inet_connection_sock.c | 306 struct request_sock_queue *queue = &icsk->icsk_accept_queue; in inet_csk_accept() local 321 if (reqsk_queue_empty(queue)) { in inet_csk_accept() 333 req = reqsk_queue_remove(queue, sk); in inet_csk_accept() 338 spin_lock_bh(&queue->fastopenq.lock); in inet_csk_accept() 349 spin_unlock_bh(&queue->fastopenq.lock); in inet_csk_accept() 522 static bool reqsk_queue_unlink(struct request_sock_queue *queue, in reqsk_queue_unlink() argument 561 struct request_sock_queue *queue = &icsk->icsk_accept_queue; in reqsk_timer_handler() local 588 qlen = reqsk_queue_len(queue); in reqsk_timer_handler() 590 int young = reqsk_queue_len_young(queue) << 1; in reqsk_timer_handler() 599 defer_accept = READ_ONCE(queue->rskq_defer_accept); in reqsk_timer_handler() [all …]
|
D | tcp_yeah.c | 131 u32 rtt, queue; in tcp_yeah_cong_avoid() local 153 queue = bw; in tcp_yeah_cong_avoid() 155 if (queue > TCP_YEAH_ALPHA || in tcp_yeah_cong_avoid() 157 if (queue > TCP_YEAH_ALPHA && in tcp_yeah_cong_avoid() 159 u32 reduction = min(queue / TCP_YEAH_GAMMA , in tcp_yeah_cong_avoid() 188 yeah->lastQ = queue; in tcp_yeah_cong_avoid()
|
/linux-4.4.14/arch/xtensa/platforms/iss/ |
D | simdisk.c | 31 struct request_queue *queue; member 276 dev->queue = blk_alloc_queue(GFP_KERNEL); in simdisk_setup() 277 if (dev->queue == NULL) { in simdisk_setup() 282 blk_queue_make_request(dev->queue, simdisk_make_request); in simdisk_setup() 283 dev->queue->queuedata = dev; in simdisk_setup() 293 dev->gd->queue = dev->queue; in simdisk_setup() 303 blk_cleanup_queue(dev->queue); in simdisk_setup() 304 dev->queue = NULL; in simdisk_setup() 360 if (dev->queue) in simdisk_teardown() 361 blk_cleanup_queue(dev->queue); in simdisk_teardown()
|
/linux-4.4.14/drivers/s390/net/ |
D | qeth_core_main.c | 64 static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, 68 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, 512 static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue) in qeth_is_cq() argument 516 queue != 0 && in qeth_is_cq() 517 queue == card->qdio.no_in_queues - 1; in qeth_is_cq() 1248 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, in qeth_clear_output_buffer() argument 1256 atomic_dec(&queue->set_pci_flags_count); in qeth_clear_output_buffer() 1261 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) { in qeth_clear_output_buffer() 3263 struct qeth_qdio_q *queue = card->qdio.in_q; in qeth_queue_input_buffer() local 3270 count = (index < queue->next_buf_to_init)? in qeth_queue_input_buffer() [all …]
|
/linux-4.4.14/drivers/ptp/ |
D | ptp_clock.c | 53 static void enqueue_external_timestamp(struct timestamp_event_queue *queue, in enqueue_external_timestamp() argument 63 spin_lock_irqsave(&queue->lock, flags); in enqueue_external_timestamp() 65 dst = &queue->buf[queue->tail]; in enqueue_external_timestamp() 70 if (!queue_free(queue)) in enqueue_external_timestamp() 71 queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS; in enqueue_external_timestamp() 73 queue->tail = (queue->tail + 1) % PTP_MAX_TIMESTAMPS; in enqueue_external_timestamp() 75 spin_unlock_irqrestore(&queue->lock, flags); in enqueue_external_timestamp()
|
D | ptp_chardev.c | 274 struct timestamp_event_queue *queue = &ptp->tsevq; in ptp_read() local 292 ptp->defunct || queue_cnt(queue))) { in ptp_read() 308 spin_lock_irqsave(&queue->lock, flags); in ptp_read() 310 qcnt = queue_cnt(queue); in ptp_read() 316 event[i] = queue->buf[queue->head]; in ptp_read() 317 queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS; in ptp_read() 320 spin_unlock_irqrestore(&queue->lock, flags); in ptp_read()
|
D | ptp_sysfs.c | 99 struct timestamp_event_queue *queue = &ptp->tsevq; in extts_fifo_show() local 110 spin_lock_irqsave(&queue->lock, flags); in extts_fifo_show() 111 qcnt = queue_cnt(queue); in extts_fifo_show() 113 event = queue->buf[queue->head]; in extts_fifo_show() 114 queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS; in extts_fifo_show() 116 spin_unlock_irqrestore(&queue->lock, flags); in extts_fifo_show()
|
/linux-4.4.14/Documentation/devicetree/bindings/powerpc/fsl/ |
D | raideng.txt | 30 There must be a sub-node for each job queue present in RAID Engine 33 - compatible: Should contain "fsl,raideng-v1.0-job-queue" as the value 34 This identifies the job queue interface 35 - reg: offset and length of the register set for job queue 42 compatible = "fsl,raideng-v1.0-job-queue"; 49 This node must be a sub-node of job queue node 70 compatible = "fsl,raideng-v1.0-job-queue";
|
/linux-4.4.14/drivers/media/platform/vsp1/ |
D | vsp1_video.c | 597 struct vsp1_video_buffer, queue); in vsp1_video_complete_buffer() 605 list_del(&done->queue); in vsp1_video_complete_buffer() 609 struct vsp1_video_buffer, queue); in vsp1_video_complete_buffer() 634 video->ops->queue(video, buf); in vsp1_video_frame_end() 855 list_add_tail(&buf->queue, &video->irqqueue); in vsp1_video_buffer_queue() 863 video->ops->queue(video, buf); in vsp1_video_buffer_queue() 866 if (vb2_is_streaming(&video->queue) && in vsp1_video_buffer_queue() 959 list_for_each_entry(buffer, &video->irqqueue, queue) in vsp1_video_stop_streaming() 1010 if (format->type != video->queue.type) in vsp1_video_get_format() 1026 if (format->type != video->queue.type) in vsp1_video_try_format() [all …]
|
D | vsp1_video.h | 98 struct list_head queue; member 111 void (*queue)(struct vsp1_video *video, struct vsp1_video_buffer *buf); member 131 struct vb2_queue queue; member
|
/linux-4.4.14/Documentation/devicetree/bindings/net/ |
D | keystone-netcp.txt | 93 - tx-queue: the navigator queue number associated with the tx dma channel. 119 - rx-queue: the navigator queue number associated with rx dma channel. 124 - rx-queue-depth: number of descriptors in each of the free descriptor 125 queue (FDQ) for the pktdma Rx flow. There can be at 128 - tx-completion-queue: the navigator queue number where the descriptors are 168 tx-queue = <648>; 200 rx-queue-depth = <128 128 0 0>; 202 rx-queue = <8704>; 203 tx-completion-queue = <8706>; 212 rx-queue-depth = <128 128 0 0>; [all …]
|
/linux-4.4.14/drivers/media/platform/omap3isp/ |
D | ispvideo.c | 322 static int isp_video_queue_setup(struct vb2_queue *queue, in isp_video_queue_setup() argument 327 struct isp_video_fh *vfh = vb2_get_drv_priv(queue); in isp_video_queue_setup() 416 video->ops->queue(video, buffer); in isp_video_buffer_queue() 576 vb2_discard_done(video->queue); in omap3isp_video_resume() 583 video->ops->queue(video, buf); in omap3isp_video_resume() 831 ret = vb2_reqbufs(&vfh->queue, rb); in isp_video_reqbufs() 845 ret = vb2_querybuf(&vfh->queue, b); in isp_video_querybuf() 859 ret = vb2_qbuf(&vfh->queue, b); in isp_video_qbuf() 873 ret = vb2_dqbuf(&vfh->queue, b, file->f_flags & O_NONBLOCK); in isp_video_dqbuf() 1069 video->queue = &vfh->queue; in isp_video_streamon() [all …]
|
D | ispvideo.h | 148 int(*queue)(struct isp_video *video, struct isp_buffer *buffer); member 175 struct vb2_queue *queue; member 189 struct vb2_queue queue; member 196 container_of(q, struct isp_video_fh, queue)
|
/linux-4.4.14/drivers/staging/rtl8188eu/include/ |
D | osdep_service.h | 61 struct list_head queue; member 65 static inline struct list_head *get_list_head(struct __queue *queue) in get_list_head() argument 67 return &(queue->queue); in get_list_head()
|
/linux-4.4.14/drivers/scsi/ibmvscsi/ |
D | ibmvscsi.c | 152 static void ibmvscsi_release_crq_queue(struct crq_queue *queue, in ibmvscsi_release_crq_queue() argument 166 queue->msg_token, in ibmvscsi_release_crq_queue() 167 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); in ibmvscsi_release_crq_queue() 168 free_page((unsigned long)queue->msgs); in ibmvscsi_release_crq_queue() 178 static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue) in crq_queue_next_crq() argument 183 spin_lock_irqsave(&queue->lock, flags); in crq_queue_next_crq() 184 crq = &queue->msgs[queue->cur]; in crq_queue_next_crq() 186 if (++queue->cur == queue->size) in crq_queue_next_crq() 187 queue->cur = 0; in crq_queue_next_crq() 195 spin_unlock_irqrestore(&queue->lock, flags); in crq_queue_next_crq() [all …]
|
/linux-4.4.14/drivers/infiniband/hw/cxgb4/ |
D | t4.h | 288 union t4_wr *queue; member 316 union t4_recv_wr *queue; member 384 return wq->rq.queue[wq->rq.size].status.host_wq_pidx; in t4_rq_host_wq_pidx() 434 return wq->sq.queue[wq->sq.size].status.host_wq_pidx; in t4_sq_host_wq_pidx() 513 return wq->rq.queue[wq->rq.size].status.qp_err; in t4_wq_in_error() 518 wq->rq.queue[wq->rq.size].status.qp_err = 1; in t4_set_wq_in_error() 523 wq->rq.queue[wq->rq.size].status.db_off = 1; in t4_disable_wq_db() 528 wq->rq.queue[wq->rq.size].status.db_off = 0; in t4_enable_wq_db() 533 return !wq->rq.queue[wq->rq.size].status.db_off; in t4_wq_db_enabled() 541 struct t4_cqe *queue; member [all …]
|
/linux-4.4.14/drivers/usb/isp1760/ |
D | isp1760-udc.c | 31 struct list_head queue; member 238 list_del(&req->queue); in isp1760_udc_receive() 303 if (list_empty(&ep->queue)) { in isp1760_ep_rx_ready() 311 req = list_first_entry(&ep->queue, struct isp1760_request, in isp1760_ep_rx_ready() 312 queue); in isp1760_ep_rx_ready() 337 if (list_empty(&ep->queue)) { in isp1760_ep_tx_complete() 355 req = list_first_entry(&ep->queue, struct isp1760_request, in isp1760_ep_tx_complete() 356 queue); in isp1760_ep_tx_complete() 374 list_del(&req->queue); in isp1760_ep_tx_complete() 379 if (!list_empty(&ep->queue)) in isp1760_ep_tx_complete() [all …]
|
/linux-4.4.14/tools/perf/util/ |
D | intel-bts.c | 172 struct auxtrace_queue *queue, in intel_bts_setup_queue() argument 175 struct intel_bts_queue *btsq = queue->priv; in intel_bts_setup_queue() 177 if (list_empty(&queue->head)) in intel_bts_setup_queue() 184 queue->priv = btsq; in intel_bts_setup_queue() 186 if (queue->cpu != -1) in intel_bts_setup_queue() 187 btsq->cpu = queue->cpu; in intel_bts_setup_queue() 188 btsq->tid = queue->tid; in intel_bts_setup_queue() 197 btsq->buffer = auxtrace_buffer__next(queue, NULL); in intel_bts_setup_queue() 253 static int intel_bts_do_fix_overlap(struct auxtrace_queue *queue, in intel_bts_do_fix_overlap() argument 259 if (b->list.prev == &queue->head) in intel_bts_do_fix_overlap() [all …]
|
/linux-4.4.14/Documentation/arm/keystone/ |
D | knav-qmss.txt | 9 multi-core Navigator. QMSS consist of queue managers, packed-data structure 19 queue pool management (allocation, push, pop and notify) and descriptor 32 queue or multiple contiguous queues. drivers/soc/ti/knav_qmss_acc.c is the 53 file system. The driver doesn't acc queues to the supported queue range if 54 PDSP is not running in the SoC. The API call fails if there is a queue open 55 request to an acc queue and PDSP is not running. So make sure to copy firmware 56 to file system before using these queue types.
|
/linux-4.4.14/drivers/net/ethernet/freescale/ |
D | ucc_geth_ethtool.c | 219 int queue = 0; in uec_get_ringparam() local 226 ring->rx_pending = ug_info->bdRingLenRx[queue]; in uec_get_ringparam() 227 ring->rx_mini_pending = ug_info->bdRingLenRx[queue]; in uec_get_ringparam() 228 ring->rx_jumbo_pending = ug_info->bdRingLenRx[queue]; in uec_get_ringparam() 229 ring->tx_pending = ug_info->bdRingLenTx[queue]; in uec_get_ringparam() 238 int queue = 0, ret = 0; in uec_set_ringparam() local 256 ug_info->bdRingLenRx[queue] = ring->rx_pending; in uec_set_ringparam() 257 ug_info->bdRingLenTx[queue] = ring->tx_pending; in uec_set_ringparam()
|
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/ |
D | host.fuc | 39 // HOST HOST->PWR queue description 40 .equ #fifo_qlen 4 // log2(size of queue entry in bytes) 41 .equ #fifo_qnum 3 // log2(max number of entries in queue) 42 .equ #fifo_qmaskb (1 << #fifo_qnum) // max number of entries in queue 48 // HOST PWR->HOST queue description 49 .equ #rfifo_qlen 4 // log2(size of queue entry in bytes) 50 .equ #rfifo_qnum 3 // log2(max number of entries in queue) 51 .equ #rfifo_qmaskb (1 << #rfifo_qnum) // max number of entries in queue
|
/linux-4.4.14/drivers/hid/ |
D | hid-wiimote-core.c | 46 struct wiimote_queue *queue = container_of(work, struct wiimote_queue, in wiimote_queue_worker() local 48 struct wiimote_data *wdata = container_of(queue, struct wiimote_data, in wiimote_queue_worker() 49 queue); in wiimote_queue_worker() 53 spin_lock_irqsave(&wdata->queue.lock, flags); in wiimote_queue_worker() 55 while (wdata->queue.head != wdata->queue.tail) { in wiimote_queue_worker() 56 spin_unlock_irqrestore(&wdata->queue.lock, flags); in wiimote_queue_worker() 58 wdata->queue.outq[wdata->queue.tail].data, in wiimote_queue_worker() 59 wdata->queue.outq[wdata->queue.tail].size); in wiimote_queue_worker() 65 spin_lock_irqsave(&wdata->queue.lock, flags); in wiimote_queue_worker() 67 wdata->queue.tail = (wdata->queue.tail + 1) % WIIMOTE_BUFSIZE; in wiimote_queue_worker() [all …]
|
/linux-4.4.14/include/net/netfilter/ |
D | nf_queue.h | 83 nfqueue_hash(const struct sk_buff *skb, u16 queue, u16 queues_total, u8 family, in nfqueue_hash() argument 87 queue += ((u64) hash_v4(skb, jhash_initval) * queues_total) >> 32; in nfqueue_hash() 90 queue += ((u64) hash_v6(skb, jhash_initval) * queues_total) >> 32; in nfqueue_hash() 93 return queue; in nfqueue_hash()
|
/linux-4.4.14/drivers/net/ethernet/intel/i40evf/ |
D | i40e_lan_hmc.h | 171 u16 queue); 173 u16 queue, 176 u16 queue); 178 u16 queue,
|
/linux-4.4.14/drivers/staging/media/omap4iss/ |
D | iss_video.h | 143 int (*queue)(struct iss_video *video, struct iss_buffer *buffer); member 169 struct vb2_queue *queue; member 183 struct vb2_queue queue; member 190 container_of(q, struct iss_video_fh, queue)
|
D | iss_video.c | 383 video->ops->queue(video, buffer); in iss_video_buf_queue() 508 vb2_queue_error(video->queue); in omap4iss_video_cancel_stream() 682 return vb2_reqbufs(&vfh->queue, rb); in iss_video_reqbufs() 690 return vb2_querybuf(&vfh->queue, b); in iss_video_querybuf() 698 return vb2_qbuf(&vfh->queue, b); in iss_video_qbuf() 706 return vb2_expbuf(&vfh->queue, e); in iss_video_expbuf() 714 return vb2_dqbuf(&vfh->queue, b, file->f_flags & O_NONBLOCK); in iss_video_dqbuf() 830 video->queue = &vfh->queue; in iss_video_streamon() 835 ret = vb2_streamon(&vfh->queue, type); in iss_video_streamon() 860 vb2_streamoff(&vfh->queue, type); in iss_video_streamon() [all …]
|
/linux-4.4.14/drivers/net/ethernet/intel/i40e/ |
D | i40e_lan_hmc.h | 171 u16 queue); 173 u16 queue, 176 u16 queue); 178 u16 queue,
|
/linux-4.4.14/drivers/staging/unisys/include/ |
D | visorbus.h | 200 bool visorchannel_signalremove(struct visorchannel *channel, u32 queue, 202 bool visorchannel_signalinsert(struct visorchannel *channel, u32 queue, 204 bool visorchannel_signalempty(struct visorchannel *channel, u32 queue); 207 u32 queue); 208 int visorchannel_signalqueue_max_slots(struct visorchannel *channel, u32 queue);
|
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb4/ |
D | cxgb4_uld.h | 173 unsigned int queue); 176 unsigned int queue); 178 unsigned int queue, bool ipv6); 181 unsigned int queue, 184 unsigned int queue, bool ipv6); 186 static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue) in set_wr_txq() argument 188 skb_set_queue_mapping(skb, (queue << 1) | prio); in set_wr_txq()
|
/linux-4.4.14/Documentation/scsi/ |
D | hptiop.txt | 84 All queued requests are handled via inbound/outbound queue port. 89 - Get a free request packet by reading the inbound queue port or 92 The value returned from the inbound queue port is an offset 99 - Post the packet to IOP by writing it to inbound queue. For requests 100 allocated in IOP memory, write the offset to inbound queue port. For 102 to the inbound queue port. 105 will be put into outbound queue. An outbound interrupt will be 109 outbound queue. 112 is posted to the outbound queue. If IOP_REQUEST_FLAG_OUTPUT_CONTEXT 116 - The host read the outbound queue and complete the request. [all …]
|
/linux-4.4.14/drivers/gpu/drm/atmel-hlcdc/ |
D | atmel_hlcdc_layer.c | 163 dma->queue = fb_flip; in atmel_hlcdc_layer_update_apply() 199 flip = dma->queue ? dma->queue : dma->cur; in atmel_hlcdc_layer_irq() 266 dma->cur = dma->queue; in atmel_hlcdc_layer_irq() 267 dma->queue = NULL; in atmel_hlcdc_layer_irq() 279 if (dma->queue) in atmel_hlcdc_layer_irq() 281 dma->queue); in atmel_hlcdc_layer_irq() 288 dma->queue = NULL; in atmel_hlcdc_layer_irq() 291 if (!dma->queue) { in atmel_hlcdc_layer_irq() 326 if (dma->queue) { in atmel_hlcdc_layer_disable() 327 atmel_hlcdc_layer_fb_flip_release_queue(layer, dma->queue); in atmel_hlcdc_layer_disable() [all …]
|
/linux-4.4.14/drivers/media/platform/s5p-mfc/ |
D | s5p_mfc_intr.c | 28 ret = wait_event_interruptible_timeout(dev->queue, in s5p_mfc_wait_for_done_dev() 60 ret = wait_event_interruptible_timeout(ctx->queue, in s5p_mfc_wait_for_done_ctx() 65 ret = wait_event_timeout(ctx->queue, in s5p_mfc_wait_for_done_ctx()
|
/linux-4.4.14/drivers/usb/musb/ |
D | musb_host.h | 141 struct list_head *queue; in next_urb() local 145 queue = &qh->hep->urb_list; in next_urb() 146 if (list_empty(queue)) in next_urb() 148 return list_entry(queue->next, struct urb, urb_list); in next_urb()
|
D | musb_gadget.h | 134 struct list_head *queue = &ep->req_list; in next_request() local 136 if (list_empty(queue)) in next_request() 138 return container_of(queue->next, struct musb_request, list); in next_request()
|
/linux-4.4.14/drivers/virtio/ |
D | virtio_pci_legacy.c | 138 info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); in setup_vq() 139 if (info->queue == NULL) in setup_vq() 143 iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT, in setup_vq() 149 true, info->queue, vp_notify, callback, name); in setup_vq() 172 free_pages_exact(info->queue, size); in setup_vq() 197 free_pages_exact(info->queue, size); in del_vq()
|
/linux-4.4.14/drivers/staging/rtl8712/ |
D | rtl871x_cmd.c | 122 static sint _enqueue_cmd(struct __queue *queue, struct cmd_obj *obj) in _enqueue_cmd() argument 128 spin_lock_irqsave(&queue->lock, irqL); in _enqueue_cmd() 129 list_add_tail(&obj->list, &queue->queue); in _enqueue_cmd() 130 spin_unlock_irqrestore(&queue->lock, irqL); in _enqueue_cmd() 134 static struct cmd_obj *_dequeue_cmd(struct __queue *queue) in _dequeue_cmd() argument 139 spin_lock_irqsave(&(queue->lock), irqL); in _dequeue_cmd() 140 if (list_empty(&(queue->queue))) { in _dequeue_cmd() 143 obj = LIST_CONTAINOR(queue->queue.next, in _dequeue_cmd() 147 spin_unlock_irqrestore(&(queue->lock), irqL); in _dequeue_cmd() 185 struct __queue *queue; in r8712_enqueue_cmd_ex() local [all …]
|
/linux-4.4.14/drivers/md/ |
D | raid0.c | 102 rdev1->bdev->bd_disk->queue)); in create_strip_zones() 375 if (mddev->queue) { in raid0_run() 379 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); in raid0_run() 380 blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors); in raid0_run() 381 blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors); in raid0_run() 383 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9); in raid0_run() 384 blk_queue_io_opt(mddev->queue, in raid0_run() 394 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); in raid0_run() 396 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); in raid0_run() 406 if (mddev->queue) { in raid0_run() [all …]
|
D | dm-cache-policy-mq.c | 132 struct queue { struct 140 static void queue_init(struct queue *q) in queue_init() argument 155 static unsigned queue_size(struct queue *q) in queue_size() 160 static bool queue_empty(struct queue *q) in queue_empty() 168 static void queue_push(struct queue *q, unsigned level, struct list_head *elt) in queue_push() 174 static void queue_remove(struct queue *q, struct list_head *elt) in queue_remove() 180 static bool is_sentinel(struct queue *q, struct list_head *h) in is_sentinel() 189 static struct list_head *queue_peek(struct queue *q) in queue_peek() 202 static struct list_head *queue_pop(struct queue *q) in queue_pop() 217 static struct list_head *queue_pop_old(struct queue *q) in queue_pop_old() [all …]
|
/linux-4.4.14/drivers/net/wimax/i2400m/ |
D | rx.c | 491 struct sk_buff_head queue; member 500 skb_queue_head_init(&roq->queue); in __i2400m_roq_init() 676 if (skb_queue_empty(&roq->queue)) { in __i2400m_roq_queue() 678 __skb_queue_head(&roq->queue, skb); in __i2400m_roq_queue() 682 skb_itr = skb_peek_tail(&roq->queue); in __i2400m_roq_queue() 689 __skb_queue_tail(&roq->queue, skb); in __i2400m_roq_queue() 698 skb_queue_walk(&roq->queue, skb_itr) { in __i2400m_roq_queue() 706 __skb_queue_before(&roq->queue, skb_itr, skb); in __i2400m_roq_queue() 715 skb_queue_walk(&roq->queue, skb_itr) { in __i2400m_roq_queue() 756 skb_queue_walk_safe(&roq->queue, skb_itr, tmp_itr) { in __i2400m_roq_update_ws() [all …]
|
/linux-4.4.14/drivers/block/zram/ |
D | zram_drv.c | 897 static blk_qc_t zram_make_request(struct request_queue *queue, struct bio *bio) in zram_make_request() argument 899 struct zram *zram = queue->queuedata; in zram_make_request() 904 blk_queue_split(queue, &bio, queue->bio_split); in zram_make_request() 1194 struct request_queue *queue; in zram_add() local 1208 queue = blk_alloc_queue(GFP_KERNEL); in zram_add() 1209 if (!queue) { in zram_add() 1216 blk_queue_make_request(queue, zram_make_request); in zram_add() 1230 zram->disk->queue = queue; in zram_add() 1231 zram->disk->queue->queuedata = zram; in zram_add() 1238 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue); in zram_add() [all …]
|
/linux-4.4.14/drivers/media/pci/cx23885/ |
D | cx23885-vbi.c | 210 list_add_tail(&buf->queue, &q->active); in buffer_queue() 218 queue); in buffer_queue() 220 list_add_tail(&buf->queue, &q->active); in buffer_queue() 233 struct cx23885_buffer, queue); in cx23885_start_streaming() 249 struct cx23885_buffer, queue); in cx23885_stop_streaming() 251 list_del(&buf->queue); in cx23885_stop_streaming()
|
/linux-4.4.14/drivers/char/ |
D | apm-emulation.c | 119 struct apm_queue queue; member 203 queue_add_event(&as->queue, event); in queue_event() 218 if (queue_empty(&as->queue) && fp->f_flags & O_NONBLOCK) in apm_read() 221 wait_event_interruptible(apm_waitqueue, !queue_empty(&as->queue)); in apm_read() 223 while ((i >= sizeof(event)) && !queue_empty(&as->queue)) { in apm_read() 224 event = queue_get_event(&as->queue); in apm_read() 251 return queue_empty(&as->queue) ? 0 : POLLIN | POLLRDNORM; in apm_poll() 557 queue_add_event(&as->queue, apm_event); in apm_suspend_notifier()
|
/linux-4.4.14/drivers/net/wireless/p54/ |
D | p54.h | 104 #define P54_SET_QUEUE(queue, ai_fs, cw_min, cw_max, _txop) \ argument 106 queue.aifs = cpu_to_le16(ai_fs); \ 107 queue.cwmin = cpu_to_le16(cw_min); \ 108 queue.cwmax = cpu_to_le16(cw_max); \ 109 queue.txop = cpu_to_le16(_txop); \
|
D | txrx.c | 189 struct p54_tx_queue_stats *queue; in p54_tx_qos_accounting_alloc() local 195 queue = &priv->tx_stats[p54_queue]; in p54_tx_qos_accounting_alloc() 198 if (unlikely(queue->len >= queue->limit && IS_QOS_QUEUE(p54_queue))) { in p54_tx_qos_accounting_alloc() 203 queue->len++; in p54_tx_qos_accounting_alloc() 204 queue->count++; in p54_tx_qos_accounting_alloc() 206 if (unlikely(queue->len == queue->limit && IS_QOS_QUEUE(p54_queue))) { in p54_tx_qos_accounting_alloc() 684 u8 *queue, u32 *extra_len, u16 *flags, u16 *aid, in p54_tx_80211_header() argument 703 *queue = skb_get_queue_mapping(skb) + P54_QUEUE_DATA; in p54_tx_80211_header() 723 *queue = P54_QUEUE_CAB; in p54_tx_80211_header() 747 *queue = P54_QUEUE_BEACON; in p54_tx_80211_header() [all …]
|
/linux-4.4.14/include/drm/ |
D | drm_os_linux.h | 43 #define DRM_WAIT_ON( ret, queue, timeout, condition ) \ argument 47 add_wait_queue(&(queue), &entry); \ 64 remove_wait_queue(&(queue), &entry); \
|
/linux-4.4.14/drivers/atm/ |
D | firestream.c | 588 static inline struct FS_QENTRY *get_qentry (struct fs_dev *dev, struct queue *q) in get_qentry() 594 static void submit_qentry (struct fs_dev *dev, struct queue *q, struct FS_QENTRY *qe) in submit_qentry() 638 static void submit_queue (struct fs_dev *dev, struct queue *q, in submit_queue() 665 static void submit_command (struct fs_dev *dev, struct queue *q, in submit_command() 677 static void process_return_queue (struct fs_dev *dev, struct queue *q) in process_return_queue() 703 static void process_txdone_queue (struct fs_dev *dev, struct queue *q) in process_txdone_queue() 773 static void process_incoming (struct fs_dev *dev, struct queue *q) in process_incoming() 1401 static int init_q(struct fs_dev *dev, struct queue *txq, int queue, in init_q() argument 1410 queue, nentries); in init_q() 1417 write_fs (dev, Q_SA(queue), virt_to_bus(p)); in init_q() [all …]
|
D | idt77252.c | 95 struct sk_buff *, int queue); 100 static void add_rx_skb(struct idt77252_dev *, int queue, 582 sb_pool_add(struct idt77252_dev *card, struct sk_buff *skb, int queue) in sb_pool_add() argument 584 struct sb_pool *pool = &card->sbpool[queue]; in sb_pool_add() 595 IDT77252_PRV_POOL(skb) = POOL_HANDLE(queue, index); in sb_pool_add() 604 unsigned int queue, index; in sb_pool_remove() local 609 queue = POOL_QUEUE(handle); in sb_pool_remove() 610 if (queue > 3) in sb_pool_remove() 617 card->sbpool[queue].skb[index] = NULL; in sb_pool_remove() 623 unsigned int queue, index; in sb_pool_skb() local [all …]
|
/linux-4.4.14/drivers/net/wireless/prism54/ |
D | isl_38xx.c | 223 isl38xx_in_queue(isl38xx_control_block *cb, int queue) in isl38xx_in_queue() argument 225 const s32 delta = (le32_to_cpu(cb->driver_curr_frag[queue]) - in isl38xx_in_queue() 226 le32_to_cpu(cb->device_curr_frag[queue])); in isl38xx_in_queue() 233 switch (queue) { in isl38xx_in_queue()
|
/linux-4.4.14/drivers/media/common/saa7146/ |
D | saa7146_fops.c | 83 list_add_tail(&buf->vb.queue,&q->queue); in saa7146_buffer_queue() 124 if (!list_empty(&q->queue)) { in saa7146_buffer_next() 126 buf = list_entry(q->queue.next,struct saa7146_buf,vb.queue); in saa7146_buffer_next() 127 list_del(&buf->vb.queue); in saa7146_buffer_next() 128 if (!list_empty(&q->queue)) in saa7146_buffer_next() 129 next = list_entry(q->queue.next,struct saa7146_buf, vb.queue); in saa7146_buffer_next() 132 buf, q->queue.prev, q->queue.next); in saa7146_buffer_next()
|
/linux-4.4.14/arch/powerpc/boot/dts/fsl/ |
D | p1020rdb-pc_camp_core1.dts | 111 35 36 40 /* enet1-queue-group0 */ 112 51 52 67 /* enet1-queue-group1 */ 113 31 32 33 /* enet2-queue-group0 */ 114 25 26 27 /* enet2-queue-group1 */
|
/linux-4.4.14/drivers/spi/ |
D | spi-txx9.c | 78 struct list_head queue; member 287 while (!list_empty(&c->queue)) { in txx9spi_work() 290 m = container_of(c->queue.next, struct spi_message, queue); in txx9spi_work() 291 list_del_init(&m->queue); in txx9spi_work() 317 list_add_tail(&m->queue, &c->queue); in txx9spi_transfer() 341 INIT_LIST_HEAD(&c->queue); in txx9spi_probe()
|
/linux-4.4.14/drivers/staging/rdma/ipath/ |
D | ipath_cq.c | 61 wc = cq->queue; in ipath_cq_enter() 145 wc = cq->queue; in ipath_poll_cq() 295 cq->queue = wc; in ipath_create_cq() 331 vfree(cq->queue); in ipath_destroy_cq() 362 cq->queue->head != cq->queue->tail) in ipath_req_notify_cq() 418 old_wc = cq->queue; in ipath_resize_cq() 446 cq->queue = wc; in ipath_resize_cq()
|
/linux-4.4.14/drivers/staging/lustre/lustre/llite/ |
D | rw26.c | 233 struct cl_2queue *queue; in ll_direct_rw_pages() local 245 queue = &io->ci_queue; in ll_direct_rw_pages() 246 cl_2queue_init(queue); in ll_direct_rw_pages() 301 cl_2queue_add(queue, clp); in ll_direct_rw_pages() 321 queue, 0); in ll_direct_rw_pages() 326 cl_2queue_discard(env, io, queue); in ll_direct_rw_pages() 327 cl_2queue_disown(env, io, queue); in ll_direct_rw_pages() 328 cl_2queue_fini(env, queue); in ll_direct_rw_pages()
|
/linux-4.4.14/security/integrity/ima/ |
D | ima_queue.c | 37 .queue[0 ... IMA_MEASURE_HTABLE_SIZE - 1] = HLIST_HEAD_INIT 55 hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) { in ima_lookup_digest_entry() 88 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]); in ima_add_digest_entry()
|
/linux-4.4.14/drivers/staging/rdma/hfi1/ |
D | Kconfig | 29 bool "Enable prescanning of the RX queue for ECNs" 33 This option toggles the prescanning of the receive queue for 36 After the prescanning step, the receive queue is processed as
|