Home
last modified time | relevance | path

Searched refs:queues (Results 1 – 185 of 185) sorted by relevance

/linux-4.1.27/net/sched/
Dsch_multiq.c35 struct Qdisc **queues; member
62 return q->queues[0]; in multiq_classify()
64 return q->queues[band]; in multiq_classify()
112 qdisc = q->queues[q->curband]; in multiq_dequeue()
144 qdisc = q->queues[curband]; in multiq_peek()
162 qdisc = q->queues[band]; in multiq_drop()
182 qdisc_reset(q->queues[band]); in multiq_reset()
195 qdisc_destroy(q->queues[band]); in multiq_destroy()
197 kfree(q->queues); in multiq_destroy()
218 if (q->queues[i] != &noop_qdisc) { in multiq_tune()
[all …]
Dsch_prio.c29 struct Qdisc *queues[TCQ_PRIO_BANDS]; member
58 return q->queues[q->prio2band[band & TC_PRIO_MAX]]; in prio_classify()
64 return q->queues[q->prio2band[0]]; in prio_classify()
66 return q->queues[band]; in prio_classify()
102 struct Qdisc *qdisc = q->queues[prio]; in prio_peek()
116 struct Qdisc *qdisc = q->queues[prio]; in prio_dequeue()
136 qdisc = q->queues[prio]; in prio_drop()
153 qdisc_reset(q->queues[prio]); in prio_reset()
165 qdisc_destroy(q->queues[prio]); in prio_destroy()
191 struct Qdisc *child = q->queues[i]; in prio_tune()
[all …]
DKconfig114 to support devices that have multiple hardware transmit queues.
/linux-4.1.27/drivers/scsi/aacraid/
Dcomminit.c274 struct aac_entry * queues; in aac_comm_init() local
276 struct aac_queue_block * comm = dev->queues; in aac_comm_init()
295 queues = (struct aac_entry *)(((ulong)headers) + hdrsize); in aac_comm_init()
298 comm->queue[HostNormCmdQueue].base = queues; in aac_comm_init()
300 queues += HOST_NORM_CMD_ENTRIES; in aac_comm_init()
304 comm->queue[HostHighCmdQueue].base = queues; in aac_comm_init()
307 queues += HOST_HIGH_CMD_ENTRIES; in aac_comm_init()
311 comm->queue[AdapNormCmdQueue].base = queues; in aac_comm_init()
314 queues += ADAP_NORM_CMD_ENTRIES; in aac_comm_init()
318 comm->queue[AdapHighCmdQueue].base = queues; in aac_comm_init()
[all …]
Dcommsup.c325 q = &dev->queues->queue[qid]; in aac_get_entry()
524 if (!dev->queues) in aac_fib_send()
596 struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue]; in aac_fib_send()
777 q = &dev->queues->queue[AdapNormRespQueue]; in aac_fib_adapter_complete()
1371 kfree(aac->queues); in _aac_reset_adapter()
1372 aac->queues = NULL; in _aac_reset_adapter()
1732 add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait); in aac_command_thread()
1736 spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags); in aac_command_thread()
1737 while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) { in aac_command_thread()
1743 entry = dev->queues->queue[HostNormCmdQueue].cmdq.next; in aac_command_thread()
[all …]
Ddpcsup.c87 atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending); in aac_response_normal()
293 struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue]; in aac_intr_normal()
357 atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending); in aac_intr_normal()
Drx.c68 aac_command_normal(&dev->queues->queue[HostNormCmdQueue]); in aac_rx_intr_producer()
72 aac_response_normal(&dev->queues->queue[HostNormRespQueue]); in aac_rx_intr_producer()
402 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; in aac_rx_deliver_producer()
425 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; in aac_rx_deliver_message()
Dsa.c68 aac_command_normal(&dev->queues->queue[HostNormCmdQueue]); in aac_sa_intr()
71 aac_response_normal(&dev->queues->queue[HostNormRespQueue]); in aac_sa_intr()
Dlinit.c1308 kfree(aac->queues); in aac_probe_one()
1338 kfree(aac->queues); in aac_remove_one()
Dcommctrl.c316 if (status && !dev->in_reset && dev->queues && dev->fsa_dev) { in next_getadapter_fib()
Dsrc.c446 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; in aac_src_deliver_message()
Daacraid.h1123 struct aac_queue_block *queues; member
/linux-4.1.27/Documentation/ABI/testing/
Dsysfs-class-net-queues1 What: /sys/class/<iface>/queues/rx-<queue>/rps_cpus
11 What: /sys/class/<iface>/queues/rx-<queue>/rps_flow_cnt
19 What: /sys/class/<iface>/queues/tx-<queue>/tx_timeout
27 What: /sys/class/<iface>/queues/tx-<queue>/tx_maxrate
35 What: /sys/class/<iface>/queues/tx-<queue>/xps_cpus
45 What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/hold_time
54 What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/inflight
62 What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit
71 What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit_max
80 What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit_min
/linux-4.1.27/Documentation/devicetree/bindings/soc/ti/
Dkeystone-navigator-qmss.txt9 management of the packet queues. Packets are queued/de-queued by writing or
31 -- managed-queues : the actual queues managed by each queue manager
32 instance, specified as <"base queue #" "# of queues">.
44 - qpend : pool of qpend(interruptible) queues
45 - general-purpose : pool of general queues, primarly used
46 as free descriptor queues or the
47 transmit DMA queues.
48 - accumulator : pool of queues on PDSP accumulator channel
50 -- qrange : number of queues to use per queue range, specified as
51 <"base queue #" "# of queues">.
[all …]
Dkeystone-navigator-dma.txt6 the actual data movements across clients using destination queues. Every
/linux-4.1.27/Documentation/block/
Dcfq-iosched.txt17 This specifies how long CFQ should idle for next request on certain cfq queues
22 queues/service trees. This can be very helpful on highly seeky media like
26 Setting slice_idle to 0 will remove all the idling on queues/service tree
76 queues in the group but happens overall on the group and thus still keeps the
78 Not idling on individual queues in the group will dispatch requests from
79 multiple queues in the group at the same time and achieve higher throughput
124 When a queue is selected for execution, the queues IO requests are only
209 it would be better to dispatch multiple requests from multiple cfq queues in
227 dispatch requests from other cfq queues even if requests are pending there.
236 CFQ has following service trees and various queues are put on these trees.
[all …]
Dnull_blk.txt16 - Configurable submission queues per device.
59 The number of submission queues attached to the device driver. If unset, it
69 0: The number of submit queues are set to the value of the submit_queues
Drequest.txt36 queues
Dbiodoc.txt1000 elevators may implement queues as they please.
1032 always the right thing to do. Devices typically have their own queues,
1054 per-queue, with a provision for sharing a lock across queues if
/linux-4.1.27/Documentation/networking/
Dmultiqueue.txt18 the subqueue memory, as well as netdev configuration of where the queues
21 The base driver will also need to manage the queues as it does the global
34 A new round-robin qdisc, sch_multiq also supports multiple hardware queues. The
36 bands and queues based on the value in skb->queue_mapping. Use this field in
43 On qdisc load, the number of bands is based on the number of queues on the
57 The qdisc will allocate the number of bands to equal the number of queues that
59 queues, the band mapping would look like:
Dscaling.txt23 Contemporary NICs support multiple receive and transmit descriptor queues
25 queues to distribute processing among CPUs. The NIC distributes packets by
43 Some advanced NICs allow steering packets to queues based on
51 module parameter for specifying the number of hardware queues to
54 for each CPU if the device supports enough queues, or otherwise at least
60 default mapping is to distribute the queues evenly in the table, but the
63 indirection table could be done to give different queues different
72 of queues to IRQs can be determined from /proc/interrupts. By default,
85 is to allocate as many queues as there are CPUs in the system (or the
87 is likely the one with the smallest number of receive queues where no
[all …]
Dtuntap.txt111 file descriptors (queues) to parallelize packets sending or receiving. The
113 queues, TUNSETIFF with the same device name must be called many times with
116 char *dev should be the name of the device, queues is the number of queues to
117 be created, fds is used to store and return the file descriptors (queues)
124 int tun_alloc_mq(char *dev, int queues, int *fds)
142 for (i = 0; i < queues; i++) {
Dvxge.txt32 i) Single function mode (up to 17 queues)
63 x) Multiple hardware queues: (Enabled by default)
Ds2io.txt58 f. Multi-FIFO/Ring. Supports up to 8 transmit queues and receive rings,
63 Number of transmit queues
74 Valid range: Total length of all queues should not exceed 8192
Dtc-actions-env-rules.txt7 For example if your action queues a packet to be processed later,
Dnetdevices.txt90 Synchronization: netif_tx_lock spinlock; all TX queues frozen.
Dnetdev-FAQ.txt141 A: No, not for networking. Check the stable queues as per above 1st to see
Dopenvswitch.txt21 no match, it queues the packet to userspace for processing (as part of
Dixgbe.txt125 different queues. Enables tight control on routing a flow in the platform.
Drds.txt240 avoids allocation in the interrupt handling path which queues
Dip-sysctl.txt124 begins to remove incomplete fragment queues to free up resources.
146 result in unnecessarily dropping fragment queues when normal
211 packet for the flow is waiting in Qdisc queues or device transmit
Dppp_generic.txt156 The start_xmit function always accepts and queues the packet which it
Dbonding.txt1559 By default the bonding driver is multiqueue aware and 16 queues are created
1561 for details). If more or less queues are desired the module parameter
1595 arguments can be added to BONDING_OPTS to set all needed slave queues.
1619 a pass-through for selecting output queues on the slave device rather than
/linux-4.1.27/drivers/gpu/drm/amd/amdkfd/
Dkfd_process_queue_manager.c37 list_for_each_entry(pqn, &pqm->queues, process_queue_list) { in get_queue_by_qid()
77 INIT_LIST_HEAD(&pqm->queues); in pqm_init()
97 list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) { in pqm_uninit()
179 if (list_empty(&pqm->queues)) { in pqm_create_queue()
236 list_add(&pqn->process_queue_list, &pqm->queues); in pqm_create_queue()
251 if (list_empty(&pqm->queues)) in pqm_create_queue()
310 if (list_empty(&pqm->queues)) in pqm_destroy_queue()
Dkfd_process.c187 kfree(p->queues); in kfd_process_wq_release()
264 process->queues = kmalloc_array(INITIAL_QUEUE_ARRAY_SIZE, in create_process()
265 sizeof(process->queues[0]), GFP_KERNEL); in create_process()
266 if (!process->queues) in create_process()
312 kfree(process->queues); in create_process()
Dkfd_device_queue_manager.c414 list_add(&n->list, &dqm->queues); in register_process_nocpsch()
441 list_for_each_entry_safe(cur, next, &dqm->queues, list) { in unregister_process_nocpsch()
548 INIT_LIST_HEAD(&dqm->queues); in initialize_nocpsch()
711 INIT_LIST_HEAD(&dqm->queues); in initialize_cpsch()
754 list_for_each_entry(node, &dqm->queues, list) in start_cpsch()
778 list_for_each_entry(node, &dqm->queues, list) { in stop_cpsch()
1007 retval = pm_send_runlist(&dqm->packets, &dqm->queues); in execute_queues_cpsch()
Dkfd_device_queue_manager.h143 struct list_head queues; member
Dkfd_packet_manager.c237 struct list_head *queues, in pm_create_runlist_ib() argument
250 BUG_ON(!pm || !queues || !rl_size_bytes || !rl_gpu_addr); in pm_create_runlist_ib()
266 list_for_each_entry(cur, queues, list) { in pm_create_runlist_ib()
Dkfd_priv.h377 struct list_head queues; member
474 struct kfd_queue **queues; member
/linux-4.1.27/drivers/net/xen-netback/
Dinterface.c160 queue = &vif->queues[index]; in xenvif_start_xmit()
193 if (vif->queues == NULL) in xenvif_get_stats()
198 queue = &vif->queues[index]; in xenvif_get_stats()
221 queue = &vif->queues[queue_index]; in xenvif_up()
237 queue = &vif->queues[queue_index]; in xenvif_down()
347 void *vif_stats = &vif->queues[queue_index].stats; in xenvif_get_ethtool_stats()
420 vif->queues = NULL; in xenvif_alloc()
631 queue = &vif->queues[queue_index]; in xenvif_disconnect()
678 queue = &vif->queues[queue_index]; in xenvif_free()
682 vfree(vif->queues); in xenvif_free()
[all …]
Dxenbus.c206 &vif->queues[i], in xenvif_debugfs_addif()
667 struct xenvif_queue *queue = &vif->queues[queue_index]; in xen_net_rate_changed()
781 be->vif->queues = vzalloc(requested_num_queues * in connect()
787 queue = &be->vif->queues[queue_index]; in connect()
849 vfree(be->vif->queues); in connect()
850 be->vif->queues = NULL; in connect()
Dcommon.h237 struct xenvif_queue *queues; member
Dnetback.c675 if (vif->queues) in xenvif_fatal_tx_err()
676 xenvif_kick_thread(&vif->queues[0]); in xenvif_fatal_tx_err()
/linux-4.1.27/Documentation/devicetree/bindings/net/
Dfsl-fec.txt19 - fsl,num-tx-queues : The property is valid for enet-avb IP, which supports
20 hw multi queues. Should specify the tx queue number, otherwise set tx queue
22 - fsl,num-rx-queues : The property is valid for enet-avb IP, which supports
23 hw multi queues. Should specify the rx queue number, otherwise set rx queue
Dbrcm,systemport.txt7 interrupts, and the second cell should be for the transmit queues. An
18 - systemport,num-txq: number of HW transmit queues, an integer
19 - systemport,num-rxq: number of HW receive queues, an integer
Dbrcm,bcmgenet.txt9 RX and TX queues operating in ring mode
Dkeystone-netcp.txt126 present a maximum of 4 queues per Rx flow.
/linux-4.1.27/drivers/block/
Dnull_blk.c40 struct nullb_queue *queues; member
310 return &nullb->queues[index]; in nullb_to_queue()
381 struct nullb_queue *nq = &nullb->queues[index]; in null_init_hctx()
461 cleanup_queue(&nullb->queues[i]); in cleanup_queues()
463 kfree(nullb->queues); in cleanup_queues()
468 nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue), in setup_queues()
470 if (!nullb->queues) in setup_queues()
485 nq = &nullb->queues[i]; in init_driver_queues()
Dnvme-core.c183 struct nvme_queue *nvmeq = dev->queues[0]; in nvme_admin_init_hctx()
197 struct nvme_queue *nvmeq = dev->queues[0]; in nvme_admin_init_request()
215 struct nvme_queue *nvmeq = dev->queues[ in nvme_init_hctx()
235 struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1]; in nvme_init_request()
1022 struct nvme_queue *nvmeq = dev->queues[0]; in nvme_submit_async_admin_req()
1047 struct nvme_queue *nvmeq = dev->queues[0]; in nvme_submit_admin_async_cmd()
1252 if (nvme_submit_cmd(dev->queues[0], &cmd) < 0) { in nvme_abort_req()
1322 struct nvme_queue *nvmeq = dev->queues[i]; in nvme_free_queues()
1324 dev->queues[i] = NULL; in nvme_free_queues()
1368 struct nvme_queue *nvmeq = dev->queues[qid]; in nvme_disable_queue()
[all …]
/linux-4.1.27/fs/autofs4/
Dwaitq.c40 wq = sbi->queues; in autofs4_catatonic_mode()
41 sbi->queues = NULL; /* Erase all wait queues */ in autofs4_catatonic_mode()
230 for (wq = sbi->queues; wq; wq = wq->next) { in autofs4_find_wait()
424 wq->next = sbi->queues; in autofs4_wait()
425 sbi->queues = wq; in autofs4_wait()
544 for (wql = &sbi->queues; (wq = *wql) != NULL; wql = &wq->next) { in autofs4_wait_release()
Dautofs_i.h125 struct autofs_wait_queue *queues; /* Wait queue pointer */ member
Dinode.c238 sbi->queues = NULL; in autofs4_fill_super()
/linux-4.1.27/drivers/net/
Dxen-netfront.c154 struct netfront_queue *queues; member
346 queue = &np->queues[i]; in xennet_open()
535 queue = &np->queues[queue_index]; in xennet_start_xmit()
649 queue = &np->queues[i]; in xennet_close()
1206 xennet_interrupt(0, &info->queues[i]); in xennet_poll_controller()
1252 np->queues = NULL; in xennet_create_dev()
1345 struct netfront_queue *queue = &info->queues[i]; in xennet_disconnect_backend()
1698 struct netfront_queue *queue = &info->queues[i]; in xennet_destroy_queues()
1708 kfree(info->queues); in xennet_destroy_queues()
1709 info->queues = NULL; in xennet_destroy_queues()
[all …]
Dtun.c1608 int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ? in tun_set_iff() local
1633 NET_NAME_UNKNOWN, tun_setup, queues, in tun_set_iff()
1634 queues); in tun_set_iff()
/linux-4.1.27/net/mac80211/
Dutil.c295 if (local->hw.queues < IEEE80211_NUM_ACS) in ieee80211_propagate_queue_wake()
333 if (WARN_ON(queue >= hw->queues)) in __ieee80211_wake_queue()
389 if (WARN_ON(queue >= hw->queues)) in __ieee80211_stop_queue()
400 if (local->hw.queues < IEEE80211_NUM_ACS) in __ieee80211_stop_queue()
487 for (i = 0; i < hw->queues; i++) in ieee80211_add_pending_skbs()
495 unsigned long queues, in ieee80211_stop_queues_by_reason() argument
505 for_each_set_bit(i, &queues, hw->queues) in ieee80211_stop_queues_by_reason()
525 if (WARN_ON(queue >= hw->queues)) in ieee80211_queue_stopped()
537 unsigned long queues, in ieee80211_wake_queues_by_reason() argument
547 for_each_set_bit(i, &queues, hw->queues) in ieee80211_wake_queues_by_reason()
[all …]
Dwme.c126 if (local->hw.queues < IEEE80211_NUM_ACS) in ieee80211_select_queue_80211()
155 if (local->hw.queues < IEEE80211_NUM_ACS || skb->len < 6) { in ieee80211_select_queue()
Ddebugfs.c166 for (q = 0; q < local->hw.queues; q++) in queues_read()
176 DEBUGFS_READONLY_FILE_OPS(queues);
244 DEBUGFS_ADD(queues); in debugfs_hw_add()
Dmain.c574 local->hw.queues = 1; in ieee80211_alloc_hw_nm()
800 local->hw.offchannel_tx_hw_queue >= local->hw.queues)) in ieee80211_register_hw()
1014 if (hw->queues > IEEE80211_MAX_QUEUES) in ieee80211_register_hw()
1015 hw->queues = IEEE80211_MAX_QUEUES; in ieee80211_register_hw()
Diface.c324 int n_queues = sdata->local->hw.queues; in ieee80211_check_queues()
383 else if (local->hw.queues >= IEEE80211_NUM_ACS) in ieee80211_set_default_queues()
726 if (local->hw.queues < IEEE80211_NUM_ACS) in ieee80211_do_open()
1141 if (local->hw.queues < IEEE80211_NUM_ACS) in ieee80211_monitor_select_queue()
1700 if (local->hw.queues >= IEEE80211_NUM_ACS) in ieee80211_if_add()
Dtrace.h1000 u32 queues, bool drop),
1002 TP_ARGS(local, queues, drop),
1007 __field(u32, queues)
1013 __entry->queues = queues;
1018 LOCAL_PR_ARG, __entry->queues, __entry->drop
Ddriver-ops.h791 u32 queues, bool drop) in drv_flush() argument
800 trace_drv_flush(local, queues, drop); in drv_flush()
802 local->ops->flush(&local->hw, vif, queues, drop); in drv_flush()
Dtx.c1322 if (WARN_ON_ONCE(q >= local->hw.queues)) { in ieee80211_tx_frags()
2480 for (i = 0; i < local->hw.queues; i++) { in ieee80211_clear_tx_pending()
2539 for (i = 0; i < local->hw.queues; i++) { in ieee80211_tx_pending()
3273 u32 queues; in ieee80211_reserve_tid() local
3314 queues = BIT(sdata->vif.hw_queue[ieee802_1d_to_ac[tid]]); in ieee80211_reserve_tid()
3315 __ieee80211_flush_queues(local, sdata, queues, false); in ieee80211_reserve_tid()
Dieee80211_i.h1864 unsigned long queues,
1874 unsigned long queues,
1892 unsigned int queues, bool drop);
Dmlme.c1586 for (q = 0; q < local->hw.queues; q++) { in ieee80211_dynamic_ps_enable_work()
1655 if (local->hw.queues < IEEE80211_NUM_ACS) in __ieee80211_sta_handle_tspec_ac_params()
1751 if (local->hw.queues < IEEE80211_NUM_ACS) in ieee80211_sta_wmm_params()
3006 sta->sta.wme = elems.wmm_param && local->hw.queues >= IEEE80211_NUM_ACS; in ieee80211_assoc_success()
4676 (local->hw.queues >= IEEE80211_NUM_ACS); in ieee80211_mgd_assoc()
4740 local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used || in ieee80211_mgd_assoc()
4751 local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used || in ieee80211_mgd_assoc()
Dtdls.c333 if (local->hw.queues >= IEEE80211_NUM_ACS && in ieee80211_tdls_add_setup_start_ies()
508 if (local->hw.queues >= IEEE80211_NUM_ACS && sta->sta.wme) in ieee80211_tdls_add_setup_cfm_ies()
Dibss.c204 if (local->hw.queues >= IEEE80211_NUM_ACS) in ieee80211_ibss_build_presp()
1035 if (sta && elems->wmm_info && local->hw.queues >= IEEE80211_NUM_ACS) in ieee80211_update_sta_info()
Dcfg.c1063 local->hw.queues >= IEEE80211_NUM_ACS) in sta_apply_parameters()
1929 if (local->hw.queues < IEEE80211_NUM_ACS) in ieee80211_set_txq_params()
/linux-4.1.27/drivers/scsi/arm/
Dfas216.c207 info->stats.queues, info->stats.removes, info->stats.fins, in fas216_dumpinfo()
995 queue_add_cmd_tail(&info->queues.disconnected, info->SCpnt); in fas216_reselected_intr()
1001 info->SCpnt = queue_remove_tgtluntag(&info->queues.disconnected, in fas216_reselected_intr()
1926 SCpnt = queue_remove_exclude(&info->queues.issue, in fas216_kick()
1950 queue_add_cmd_tail(&info->queues.disconnected, info->SCpnt); in fas216_kick()
2216 info->stats.queues += 1; in fas216_queue_command_lck()
2225 result = !queue_add_cmd_ordered(&info->queues.issue, SCpnt); in fas216_queue_command_lck()
2353 if (queue_remove_cmd(&info->queues.issue, SCpnt)) { in fas216_find_command()
2363 } else if (queue_remove_cmd(&info->queues.disconnected, SCpnt)) { in fas216_find_command()
2497 queue_remove_all_target(&info->queues.issue, target); in fas216_eh_device_reset()
[all …]
Dacornscsi.h305 unsigned int queues; member
320 } queues; member
Dacornscsi.c708 SCpnt = queue_remove_exclude(&host->queues.issue, host->busyluns); in acornscsi_kick()
716 queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt); in acornscsi_kick()
1797 if (!ok && queue_probetgtlun(&host->queues.disconnected, target, lun)) in acornscsi_reconnect()
1812 queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt); in acornscsi_reconnect()
1839 queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt); in acornscsi_reconnect_finish()
1849 host->SCpnt = queue_remove_tgtluntag(&host->queues.disconnected, in acornscsi_reconnect_finish()
2501 host->stats.queues += 1; in acornscsi_queuecmd_lck()
2506 if (!queue_add_cmd_ordered(&host->queues.issue, SCpnt)) { in acornscsi_queuecmd_lck()
2558 if (queue_remove_cmd(&host->queues.issue, SCpnt)) { in acornscsi_do_abort()
2569 } else if (queue_remove_cmd(&host->queues.disconnected, SCpnt)) { in acornscsi_do_abort()
[all …]
Dfas216.h258 unsigned int queues; member
286 } queues; member
/linux-4.1.27/Documentation/devicetree/bindings/mailbox/
Domap-mailbox.txt10 Each mailbox IP block has a certain number of h/w fifo queues and output
21 The number of h/w fifo queues and interrupt lines dictate the usable registers.
23 instance. DRA7xx has multiple instances with different number of h/w fifo queues
51 - ti,mbox-num-fifos: Number of h/w fifo queues within the mailbox IP block
/linux-4.1.27/drivers/net/ethernet/cadence/
Dmacb.c510 (unsigned int)(queue - bp->queues), in macb_tx_error_task()
610 u16 queue_index = queue - bp->queues; in macb_tx_interrupt()
1004 (unsigned int)(queue - bp->queues), in macb_interrupt()
1102 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_poll_controller()
1247 struct macb_queue *queue = &bp->queues[queue_index]; in macb_start_xmit()
1374 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_free_consistent()
1422 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_alloc_consistent()
1465 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_init_rings()
1495 bp->queues[0].tx_ring[i].addr = 0; in macb_init_rings()
1496 bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED); in macb_init_rings()
[all …]
Dmacb.h792 struct macb_queue queues[MACB_MAX_QUEUES]; member
/linux-4.1.27/drivers/net/wireless/iwlwifi/mvm/
Dtime-event.c102 u32 queues = 0; in iwl_mvm_roc_done_wk() local
112 queues |= BIT(IWL_MVM_OFFCHANNEL_QUEUE); in iwl_mvm_roc_done_wk()
116 queues |= BIT(mvm->aux_queue); in iwl_mvm_roc_done_wk()
132 iwl_mvm_flush_tx_path(mvm, queues, false); in iwl_mvm_roc_done_wk()
Dmac80211.c434 hw->queues = mvm->first_agg_queue; in iwl_mvm_mac_setup_register()
3849 struct ieee80211_vif *vif, u32 queues, bool drop) in iwl_mvm_mac_flush() argument
/linux-4.1.27/Documentation/cgroups/
Dblkio-controller.txt226 scheduler queues for service. This can be greater than the total time
253 queues of this cgroup gets a timeslice.
259 its queues. This is different from the io_wait_time which is the
270 spent idling for one of the queues of the cgroup. This is in
279 from other queues/cgroups. This is in nanoseconds. If this is read
371 That means CFQ will not idle between cfq queues of a cfq group and hence be
378 If one disables idling on individual cfq queues and cfq service trees by
392 - Currently only sync IO queues are support. All the buffered writes are
/linux-4.1.27/drivers/soc/ti/
DKconfig15 is responsible for accelerating management of the packet queues.
/linux-4.1.27/Documentation/devicetree/bindings/misc/
Dfsl,qoriq-mc.txt7 queues, buffer pools, I/O interfaces. These resources are building
/linux-4.1.27/Documentation/device-mapper/
Dcache-policies.txt33 The multiqueue policy has three sets of 16 queues: one set for entries
37 Cache entries in the queues are aged based on logical time. Entry into
/linux-4.1.27/drivers/dma/
Dcppi41.c835 const struct chan_queues *queues; in cpp41_dma_filter_fn() local
850 queues = cdd->queues_tx; in cpp41_dma_filter_fn()
852 queues = cdd->queues_rx; in cpp41_dma_filter_fn()
858 cchan->q_num = queues[cchan->port_num].submit; in cpp41_dma_filter_fn()
859 cchan->q_comp_num = queues[cchan->port_num].complete; in cpp41_dma_filter_fn()
/linux-4.1.27/drivers/net/wireless/ath/carl9170/
Dmain.c229 for (i = 0; i < ar->hw->queues; i++) { in carl9170_flush()
286 for (i = 0; i < ar->hw->queues; i++) { in carl9170_zap_queues()
307 for (i = 0; i < ar->hw->queues; i++) in carl9170_zap_queues()
368 for (i = 0; i < ar->hw->queues; i++) { in carl9170_op_start()
1391 if (queue < ar->hw->queues) { in carl9170_op_conf_tx()
1703 u32 queues, bool drop) in carl9170_op_flush() argument
1813 hw->queues = __AR9170_NUM_TXQ; in carl9170_alloc()
1824 for (i = 0; i < ar->hw->queues; i++) { in carl9170_alloc()
Dtx.c96 for (i = 0; i < ar->hw->queues; i++) { in carl9170_tx_accounting()
168 for (i = 0; i < ar->hw->queues; i++) { in carl9170_tx_accounting_free()
566 for (i = 0; i < ar->hw->queues; i++) { in carl9170_check_queue_stop_timeout()
1349 for (i = 0; i < ar->hw->queues; i++) { in carl9170_tx()
Ddebug.c401 for (i = 0; i < ar->hw->queues; i++) { in carl9170_debugfs_tx_stuck_read()
/linux-4.1.27/include/linux/
Dnvme.h72 struct nvme_queue **queues; member
Dnetdevice.h717 u16 queues[0]; member
/linux-4.1.27/net/core/
Dflow_dissector.c435 queue_index = map->queues[0]; in get_xps_queue()
437 queue_index = map->queues[reciprocal_scale(skb_get_hash(skb), in get_xps_queue()
Ddev.c1914 if (map->queues[pos] == index) { in remove_xps_queue()
1916 map->queues[pos] = map->queues[--map->len]; in remove_xps_queue()
1971 if (map->queues[pos] != index) in expand_xps_map()
1991 new_map->queues[i] = map->queues[i]; in expand_xps_map()
2042 while ((pos < map->len) && (map->queues[pos] != index)) in netif_set_xps_queue()
2046 map->queues[map->len++] = index; in netif_set_xps_queue()
Dnet-sysfs.c1176 if (map->queues[j] == index) { in show_xps_map()
/linux-4.1.27/drivers/net/wireless/cw1200/
Dsta.h44 u32 queues, bool drop);
Dmain.c303 hw->queues = 4; in cw1200_init_common()
Dsta.c625 if (queue < dev->queues) { in cw1200_conf_tx()
942 u32 queues, bool drop) in cw1200_flush() argument
/linux-4.1.27/drivers/net/wireless/p54/
Dmain.c419 if (queue < dev->queues) { in p54_conf_tx()
675 u32 queues, bool drop) in p54_flush() argument
769 dev->queues = 1; in p54_init_common()
Dfwio.c155 priv->hw->queues = P54_QUEUE_AC_NUM; in p54_parse_firmware()
Dtxrx.c177 for (i = 0; i < priv->hw->queues; i++) { in p54_wake_queues()
/linux-4.1.27/Documentation/RCU/
Drcubarrier.txt200 queues. His implementation queues an RCU callback on each of the per-CPU
201 callback queues, and then waits until they have all started executing, at
318 callback queues, things will have to change. One simple change
Dtrace.txt160 queues by rcu_do_batch(), but which have not yet been
/linux-4.1.27/arch/sh/
DKconfig.cpu67 the store queues integrated in the SH-4 processors.
/linux-4.1.27/Documentation/sysctl/
Dfs.txt253 3. /proc/sys/fs/mqueue - POSIX message queues filesystem
257 creation of a user space library that implements the POSIX message queues
265 maximum number of message queues allowed on the system.
/linux-4.1.27/Documentation/sound/alsa/
DProcfile.txt195 seq/queues
196 Lists the currently allocated/running sequencer queues.
/linux-4.1.27/include/uapi/sound/
Dasequencer.h312 int queues; /* maximum queues count */ member
/linux-4.1.27/Documentation/scsi/
Dlpfc.txt48 queuing. Removing the queues from the LLDD makes a more predictable
Dscsi_eh.txt190 4. Kicks queues in all devices on the host in the asses
DChangeLog.ncr53c8xx438 - Resources management using doubly linked queues.
DChangeLog.lpfc28 find command in both TX and TX completion queues. Return ERROR
/linux-4.1.27/drivers/net/wireless/ath/ath9k/
Dinit.c807 hw->queues = ATH9K_NUM_TX_QUEUES; in ath9k_set_mcc_capab()
808 hw->offchannel_tx_hw_queue = hw->queues - 1; in ath9k_set_mcc_capab()
878 hw->queues = 4; in ath9k_set_hw_capab()
Dmain.c1222 vif->cab_queue = hw->queues - 2; in ath9k_assign_hw_queues()
2008 u32 queues, bool drop) in ath9k_flush() argument
2027 __ath9k_flush(hw, queues, drop, true, true); in ath9k_flush()
2032 __ath9k_flush(hw, queues, drop, true, false); in ath9k_flush()
2036 void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop, in __ath9k_flush() argument
Dchannel.c1367 ieee80211_stop_queue(sc->hw, sc->hw->queues - 2); in ath9k_chanctx_stop_queues()
1386 ieee80211_wake_queue(sc->hw, sc->hw->queues - 2); in ath9k_chanctx_wake_queues()
Dath9k.h724 void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop,
Dhtc_drv_init.c751 hw->queues = 4; in ath9k_set_hw_capab()
/linux-4.1.27/drivers/usb/gadget/udc/
Dpxa27x_udc.c247 struct dentry *root, *state, *queues, *eps; in pxa_init_debugfs() local
257 queues = debugfs_create_file("queues", 0400, root, udc, in pxa_init_debugfs()
259 if (!queues) in pxa_init_debugfs()
268 udc->debugfs_queues = queues; in pxa_init_debugfs()
274 debugfs_remove(queues); in pxa_init_debugfs()
Dnet2280.c1757 static DEVICE_ATTR_RO(queues);
/linux-4.1.27/drivers/net/wireless/rtl818x/rtl8180/
Ddev.c1123 for (i = 0; i < (dev->queues + 1); i++) in rtl8180_start()
1233 for (i = 0; i < (dev->queues + 1); i++) in rtl8180_start()
1263 for (i = 0; i < (dev->queues + 1); i++) in rtl8180_stop()
1861 dev->queues = RTL8187SE_NR_TX_QUEUES - 1; in rtl8180_probe()
1863 dev->queues = RTL8180_NR_TX_QUEUES - 1; in rtl8180_probe()
/linux-4.1.27/drivers/net/ethernet/sfc/
Dfarch.c1537 u32 queues; in efx_farch_legacy_interrupt() local
1542 queues = EFX_EXTRACT_DWORD(reg, 0, 31); in efx_farch_legacy_interrupt()
1555 if (queues & (1U << efx->irq_level) && soft_enabled) { in efx_farch_legacy_interrupt()
1562 if (queues != 0) { in efx_farch_legacy_interrupt()
1568 if (queues & 1) in efx_farch_legacy_interrupt()
1570 queues >>= 1; in efx_farch_legacy_interrupt()
Def10.c1170 u32 queues; in efx_ef10_legacy_interrupt() local
1174 queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG); in efx_ef10_legacy_interrupt()
1176 if (queues == 0) in efx_ef10_legacy_interrupt()
1181 if (queues & (1U << efx->irq_level)) in efx_ef10_legacy_interrupt()
1185 if (queues & 1) in efx_ef10_legacy_interrupt()
1187 queues >>= 1; in efx_ef10_legacy_interrupt()
Dfalcon.c434 int queues; in falcon_legacy_interrupt_a1() local
462 queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q); in falcon_legacy_interrupt_a1()
467 if (queues & 1) in falcon_legacy_interrupt_a1()
469 if (queues & 2) in falcon_legacy_interrupt_a1()
/linux-4.1.27/Documentation/locking/
Drt-mutex.txt34 priority waiters list. This list too queues in priority order. Whenever
/linux-4.1.27/Documentation/filesystems/
Dinotify.txt47 - We'd have to maintain n fd's and n internal queues with state,
Dcoda.txt287 determine the position of the message on queues and pointers to
1604 1. message queues
1611 can easily be manipulated. The message queues will generally have
1633 1. The message queues should have open and close routines. On Unix
1643 +o Close will free all memory allocated by the message queues.
1648 3. Before the message queues are open, all VFS operations will fail.
1652 4. After closing of the queues, no VFS operations can succeed. Here
Dspufs.txt15 message queues. Users that have write permissions on the file system
Dxfs-delayed-logging-design.txt678 serialisation queues. They use the same lock as the CIL, too. If we see too
/linux-4.1.27/drivers/net/wireless/ti/wlcore/
Dconf.h1267 u8 queues; member
Dacx.c1506 conf_queues = wl->conf.rx_streaming.queues; in wl1271_acx_ps_rx_streaming()
Dmain.c5515 u32 queues, bool drop) in wlcore_op_flush() argument
6153 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1; in wl1271_init_ieee80211()
6156 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1; in wl1271_init_ieee80211()
/linux-4.1.27/drivers/net/ethernet/intel/ixgbe/
Dixgbe_main.c2543 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, in ixgbe_irq_enable() argument
2587 if (queues) in ixgbe_irq_enable()
4581 unsigned int rxbase, txbase, queues; in ixgbe_fwd_ring_up() local
4611 queues = min_t(unsigned int, in ixgbe_fwd_ring_up()
4613 err = netif_set_real_num_tx_queues(vdev, queues); in ixgbe_fwd_ring_up()
4617 err = netif_set_real_num_rx_queues(vdev, queues); in ixgbe_fwd_ring_up()
5675 int err, queues; in ixgbe_open() local
5701 queues = adapter->num_rx_queues_per_pool; in ixgbe_open()
5703 queues = adapter->num_tx_queues; in ixgbe_open()
5705 err = netif_set_real_num_tx_queues(netdev, queues); in ixgbe_open()
[all …]
/linux-4.1.27/Documentation/usb/
Dhotplug.txt15 queues may need to be enabled, networks brought up, disk
Dehci.txt118 usb-storage doing disk I/O; watch the request queues!)
/linux-4.1.27/Documentation/
Dstable_kernel_rules.txt121 - The queues of patches, for both completed versions and in progress
Dvme_api.txt295 The following function queues a list for execution. The function will return
Dkernel-docs.txt163 Keywords: interrupts, irqs, DMA, bottom halves, task queues.
292 event queues.
DHOWTO307 in use, or patch queues being published as quilt series. Addresses of
/linux-4.1.27/init/
DKconfig246 POSIX variant of message queues is a part of IPC. In POSIX message
247 queues every message has a priority which decides about succession
250 queues (functions mq_*) say Y here.
252 POSIX message queues are visible as a filesystem called 'mqueue'
254 operations on message queues.
1694 per cpu and per node queues.
1700 instead of managing queues of cached objects (SLAB approach).
1702 of queues of objects. SLUB can use memory efficiently
/linux-4.1.27/Documentation/misc-devices/mei/
Dmei.txt62 handles this internally by maintaining request queues for the applications.
/linux-4.1.27/drivers/net/wireless/rt2x00/
Drt2x00mac.c756 u32 queues, bool drop) in rt2x00mac_flush() argument
Drt2x00.h1459 u32 queues, bool drop);
Drt2x00dev.c1047 rt2x00dev->hw->queues = rt2x00dev->ops->tx_queues; in rt2x00lib_probe_hw()
/linux-4.1.27/drivers/net/wireless/iwlwifi/dvm/
Dmac80211.c197 hw->queues = IWLAGN_FIRST_AMPDU_QUEUE; in iwlagn_mac_setup_register()
1096 u32 queues, bool drop) in iwlagn_mac_flush() argument
/linux-4.1.27/drivers/net/wireless/libertas_tf/
Dmain.c639 hw->queues = 1; in lbtf_add_card()
/linux-4.1.27/include/net/
Dmac80211.h1976 u16 queues; member
3286 u32 queues, bool drop);
/linux-4.1.27/drivers/net/wireless/brcm80211/brcmsmac/
Dmac80211_if.c904 u32 queues, bool drop) in brcms_ops_flush() argument
1072 hw->queues = N_TX_QUEUES; in ieee_hw_init()
/linux-4.1.27/Documentation/timers/
Dhrtimers.txt101 queues while keeping the time-order intact.)
Dhighres.txt195 The softirq for running the hrtimer queues and executing the callbacks has been
/linux-4.1.27/drivers/scsi/qla4xxx/
Dql4_def.h691 void *queues; member
Dql4_os.c4130 if (ha->queues) in qla4xxx_mem_free()
4131 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues, in qla4xxx_mem_free()
4138 ha->queues = NULL; in qla4xxx_mem_free()
4201 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len, in qla4xxx_mem_alloc()
4203 if (ha->queues == NULL) { in qla4xxx_mem_alloc()
4209 memset(ha->queues, 0, ha->queues_len); in qla4xxx_mem_alloc()
4222 ha->request_ring = (struct queue_entry *) (ha->queues + align); in qla4xxx_mem_alloc()
4225 ha->response_ring = (struct queue_entry *) (ha->queues + align + in qla4xxx_mem_alloc()
4231 ha->shadow_regs = (struct shadow_regs *) (ha->queues + align + in qla4xxx_mem_alloc()
/linux-4.1.27/drivers/net/wireless/ath/ar5523/
Dar5523.c1095 u32 queues, bool drop) in ar5523_flush() argument
1692 hw->queues = 1; in ar5523_probe()
/linux-4.1.27/drivers/net/wireless/rtl818x/rtl8187/
Ddev.c1621 dev->queues = 1; in rtl8187_probe()
1623 dev->queues = 4; in rtl8187_probe()
/linux-4.1.27/arch/arm/boot/dts/
Dimx6sx.dtsi793 fsl,num-tx-queues=<3>;
794 fsl,num-rx-queues=<3>;
/linux-4.1.27/drivers/net/wireless/ath/wcn36xx/
Dmain.c973 wcn->hw->queues = 4; in wcn36xx_init_ieee80211()
/linux-4.1.27/drivers/net/wireless/rsi/
Drsi_91x_mac80211.c1070 hw->queues = MAX_HW_QUEUES; in rsi_mac80211_attach()
/linux-4.1.27/drivers/net/wireless/rtlwifi/
Dcore.c1716 u32 queues, in rtl_op_flush() argument
1722 rtlpriv->intf_ops->flush(hw, queues, drop); in rtl_op_flush()
Dpci.c1754 static void rtl_pci_flush(struct ieee80211_hw *hw, u32 queues, bool drop) in rtl_pci_flush() argument
1770 if (((queues >> queue_id) & 0x1) == 0) { in rtl_pci_flush()
Dbase.c424 hw->queues = AC_MAX; in _rtl_init_mac80211()
Dwifi.h2200 void (*flush)(struct ieee80211_hw *hw, u32 queues, bool drop);
/linux-4.1.27/Documentation/powerpc/
Dcxl.txt241 queues the WED may describe.
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4/
Dcxgb4.h1075 int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
Dcxgb4_main.c860 int cxgb4_write_rss(const struct port_info *pi, const u16 *queues) in cxgb4_write_rss() argument
871 for (i = 0; i < pi->rss_size; i++, queues++) in cxgb4_write_rss()
872 rss[i] = q[*queues].rspq.abs_id; in cxgb4_write_rss()
/linux-4.1.27/Documentation/nfc/
Dnfc-hci.txt190 Only when llc_shdlc is used: handles shdlc rx & tx queues.
/linux-4.1.27/drivers/net/wireless/ath/ath5k/
Dbase.c3062 hw->queues = 4; in ath5k_init()
3071 hw->queues = 1; in ath5k_init()
/linux-4.1.27/drivers/usb/gadget/legacy/
DKconfig114 driver, so that deep I/O queues can be supported. On 2.4 kernels,
/linux-4.1.27/drivers/net/wireless/ti/wl12xx/
Dmain.c330 .queues = 0x1,
/linux-4.1.27/Documentation/filesystems/caching/
Dfscache.txt282 Ops pend=N Number of times async ops added to pending queues
/linux-4.1.27/drivers/net/wireless/zd1211rw/
Dzd_mac.c1412 hw->queues = 1; in zd_mac_alloc_hw()
/linux-4.1.27/drivers/net/wireless/ti/wl1251/
Dmain.c1491 wl->hw->queues = 4; in wl1251_init_ieee80211()
/linux-4.1.27/drivers/net/wireless/
Dmac80211_hwsim.c1849 u32 queues, bool drop) in mac80211_hwsim_flush() argument
2384 hw->queues = 5; in mac80211_hwsim_new_radio()
Dadm8211.c1871 dev->queues = 1; /* ADM8211C supports more, maybe ADM8211B too */ in adm8211_probe()
Dmwl8k.c6076 hw->queues = MWL8K_TX_WMM_QUEUES; in mwl8k_firmware_load_success()
/linux-4.1.27/drivers/net/wireless/ti/wl18xx/
Dmain.c455 .queues = 0x1,
/linux-4.1.27/drivers/net/usb/
DKconfig115 that supports deep queues for efficient transfers. (This gives
/linux-4.1.27/drivers/net/wireless/b43/
Dmain.c2597 wl->hw->queues = B43_QOS_QUEUE_NUM; in b43_request_firmware()
2599 wl->hw->queues = 1; in b43_request_firmware()
2710 dev->qos_enabled = dev->wl->hw->queues > 1; in b43_upload_microcode()
/linux-4.1.27/Documentation/security/
Dcredentials.txt43 - Message queues
DSmack.txt463 IPC objects, message queues, semaphore sets, and memory segments exist in flat
/linux-4.1.27/drivers/net/wireless/iwlegacy/
Dcommon.h1727 u32 queues, bool drop);
D3945-mac.c3581 hw->queues = 4; in il3945_setup_mac()
Dcommon.c4761 u32 queues, bool drop) in il_mac_flush() argument
D4965-mac.c5784 hw->queues = 4; in il4965_mac_setup_register()
/linux-4.1.27/Documentation/cdrom/
Dcdrom-standard.tex320 the information from $media_changed()$ to two separate queues. Other
394 queues for the VFS and a new $ioctl()$ function that can report device
/linux-4.1.27/Documentation/spi/
Dspi-summary134 SPI requests always go into I/O queues. Requests for a given SPI device
/linux-4.1.27/sound/core/seq/
Dseq_clientmgr.c1137 info.queues = SNDRV_SEQ_MAX_QUEUES; in snd_seq_ioctl_system_info()
/linux-4.1.27/drivers/net/wireless/ath/ath10k/
Dmac.c4486 u32 queues, bool drop) in ath10k_flush() argument
5566 ar->hw->queues = 4; in ath10k_mac_register()
/linux-4.1.27/Documentation/power/
Ddevices.txt243 before reactivating its class I/O queues.
Dpci.txt1003 device the PM core automatically queues a request to check if the device is
/linux-4.1.27/drivers/net/wireless/b43legacy/
Dmain.c3846 hw->queues = 1; /* FIXME: hardware has more queues */ in b43legacy_wireless_init()
/linux-4.1.27/net/netfilter/
DKconfig818 As opposed to QUEUE, it supports 65535 different queues,
/linux-4.1.27/
DCREDITS313 D: POSIX message queues fs (with M. Wronski)
3957 D: POSIX message queues fs (with K. Benedyczak)
/linux-4.1.27/drivers/scsi/aic7xxx/
Daic79xx.reg3722 * Per "other-id" execution queues. We use an array of