/linux-4.1.27/net/sched/ |
D | sch_multiq.c | 35 struct Qdisc **queues; member 62 return q->queues[0]; in multiq_classify() 64 return q->queues[band]; in multiq_classify() 112 qdisc = q->queues[q->curband]; in multiq_dequeue() 144 qdisc = q->queues[curband]; in multiq_peek() 162 qdisc = q->queues[band]; in multiq_drop() 182 qdisc_reset(q->queues[band]); in multiq_reset() 195 qdisc_destroy(q->queues[band]); in multiq_destroy() 197 kfree(q->queues); in multiq_destroy() 218 if (q->queues[i] != &noop_qdisc) { in multiq_tune() [all …]
|
D | sch_prio.c | 29 struct Qdisc *queues[TCQ_PRIO_BANDS]; member 58 return q->queues[q->prio2band[band & TC_PRIO_MAX]]; in prio_classify() 64 return q->queues[q->prio2band[0]]; in prio_classify() 66 return q->queues[band]; in prio_classify() 102 struct Qdisc *qdisc = q->queues[prio]; in prio_peek() 116 struct Qdisc *qdisc = q->queues[prio]; in prio_dequeue() 136 qdisc = q->queues[prio]; in prio_drop() 153 qdisc_reset(q->queues[prio]); in prio_reset() 165 qdisc_destroy(q->queues[prio]); in prio_destroy() 191 struct Qdisc *child = q->queues[i]; in prio_tune() [all …]
|
D | Kconfig | 114 to support devices that have multiple hardware transmit queues.
|
/linux-4.1.27/drivers/scsi/aacraid/ |
D | comminit.c | 274 struct aac_entry * queues; in aac_comm_init() local 276 struct aac_queue_block * comm = dev->queues; in aac_comm_init() 295 queues = (struct aac_entry *)(((ulong)headers) + hdrsize); in aac_comm_init() 298 comm->queue[HostNormCmdQueue].base = queues; in aac_comm_init() 300 queues += HOST_NORM_CMD_ENTRIES; in aac_comm_init() 304 comm->queue[HostHighCmdQueue].base = queues; in aac_comm_init() 307 queues += HOST_HIGH_CMD_ENTRIES; in aac_comm_init() 311 comm->queue[AdapNormCmdQueue].base = queues; in aac_comm_init() 314 queues += ADAP_NORM_CMD_ENTRIES; in aac_comm_init() 318 comm->queue[AdapHighCmdQueue].base = queues; in aac_comm_init() [all …]
|
D | commsup.c | 325 q = &dev->queues->queue[qid]; in aac_get_entry() 524 if (!dev->queues) in aac_fib_send() 596 struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue]; in aac_fib_send() 777 q = &dev->queues->queue[AdapNormRespQueue]; in aac_fib_adapter_complete() 1371 kfree(aac->queues); in _aac_reset_adapter() 1372 aac->queues = NULL; in _aac_reset_adapter() 1732 add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait); in aac_command_thread() 1736 spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags); in aac_command_thread() 1737 while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) { in aac_command_thread() 1743 entry = dev->queues->queue[HostNormCmdQueue].cmdq.next; in aac_command_thread() [all …]
|
D | dpcsup.c | 87 atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending); in aac_response_normal() 293 struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue]; in aac_intr_normal() 357 atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending); in aac_intr_normal()
|
D | rx.c | 68 aac_command_normal(&dev->queues->queue[HostNormCmdQueue]); in aac_rx_intr_producer() 72 aac_response_normal(&dev->queues->queue[HostNormRespQueue]); in aac_rx_intr_producer() 402 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; in aac_rx_deliver_producer() 425 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; in aac_rx_deliver_message()
|
D | sa.c | 68 aac_command_normal(&dev->queues->queue[HostNormCmdQueue]); in aac_sa_intr() 71 aac_response_normal(&dev->queues->queue[HostNormRespQueue]); in aac_sa_intr()
|
D | linit.c | 1308 kfree(aac->queues); in aac_probe_one() 1338 kfree(aac->queues); in aac_remove_one()
|
D | commctrl.c | 316 if (status && !dev->in_reset && dev->queues && dev->fsa_dev) { in next_getadapter_fib()
|
D | src.c | 446 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; in aac_src_deliver_message()
|
D | aacraid.h | 1123 struct aac_queue_block *queues; member
|
/linux-4.1.27/Documentation/ABI/testing/ |
D | sysfs-class-net-queues | 1 What: /sys/class/<iface>/queues/rx-<queue>/rps_cpus 11 What: /sys/class/<iface>/queues/rx-<queue>/rps_flow_cnt 19 What: /sys/class/<iface>/queues/tx-<queue>/tx_timeout 27 What: /sys/class/<iface>/queues/tx-<queue>/tx_maxrate 35 What: /sys/class/<iface>/queues/tx-<queue>/xps_cpus 45 What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/hold_time 54 What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/inflight 62 What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit 71 What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit_max 80 What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit_min
|
/linux-4.1.27/Documentation/devicetree/bindings/soc/ti/ |
D | keystone-navigator-qmss.txt | 9 management of the packet queues. Packets are queued/de-queued by writing or 31 -- managed-queues : the actual queues managed by each queue manager 32 instance, specified as <"base queue #" "# of queues">. 44 - qpend : pool of qpend(interruptible) queues 45 - general-purpose : pool of general queues, primarly used 46 as free descriptor queues or the 47 transmit DMA queues. 48 - accumulator : pool of queues on PDSP accumulator channel 50 -- qrange : number of queues to use per queue range, specified as 51 <"base queue #" "# of queues">. [all …]
|
D | keystone-navigator-dma.txt | 6 the actual data movements across clients using destination queues. Every
|
/linux-4.1.27/Documentation/block/ |
D | cfq-iosched.txt | 17 This specifies how long CFQ should idle for next request on certain cfq queues 22 queues/service trees. This can be very helpful on highly seeky media like 26 Setting slice_idle to 0 will remove all the idling on queues/service tree 76 queues in the group but happens overall on the group and thus still keeps the 78 Not idling on individual queues in the group will dispatch requests from 79 multiple queues in the group at the same time and achieve higher throughput 124 When a queue is selected for execution, the queues IO requests are only 209 it would be better to dispatch multiple requests from multiple cfq queues in 227 dispatch requests from other cfq queues even if requests are pending there. 236 CFQ has following service trees and various queues are put on these trees. [all …]
|
D | null_blk.txt | 16 - Configurable submission queues per device. 59 The number of submission queues attached to the device driver. If unset, it 69 0: The number of submit queues are set to the value of the submit_queues
|
D | request.txt | 36 queues
|
D | biodoc.txt | 1000 elevators may implement queues as they please. 1032 always the right thing to do. Devices typically have their own queues, 1054 per-queue, with a provision for sharing a lock across queues if
|
/linux-4.1.27/Documentation/networking/ |
D | multiqueue.txt | 18 the subqueue memory, as well as netdev configuration of where the queues 21 The base driver will also need to manage the queues as it does the global 34 A new round-robin qdisc, sch_multiq also supports multiple hardware queues. The 36 bands and queues based on the value in skb->queue_mapping. Use this field in 43 On qdisc load, the number of bands is based on the number of queues on the 57 The qdisc will allocate the number of bands to equal the number of queues that 59 queues, the band mapping would look like:
|
D | scaling.txt | 23 Contemporary NICs support multiple receive and transmit descriptor queues 25 queues to distribute processing among CPUs. The NIC distributes packets by 43 Some advanced NICs allow steering packets to queues based on 51 module parameter for specifying the number of hardware queues to 54 for each CPU if the device supports enough queues, or otherwise at least 60 default mapping is to distribute the queues evenly in the table, but the 63 indirection table could be done to give different queues different 72 of queues to IRQs can be determined from /proc/interrupts. By default, 85 is to allocate as many queues as there are CPUs in the system (or the 87 is likely the one with the smallest number of receive queues where no [all …]
|
D | tuntap.txt | 111 file descriptors (queues) to parallelize packets sending or receiving. The 113 queues, TUNSETIFF with the same device name must be called many times with 116 char *dev should be the name of the device, queues is the number of queues to 117 be created, fds is used to store and return the file descriptors (queues) 124 int tun_alloc_mq(char *dev, int queues, int *fds) 142 for (i = 0; i < queues; i++) {
|
D | vxge.txt | 32 i) Single function mode (up to 17 queues) 63 x) Multiple hardware queues: (Enabled by default)
|
D | s2io.txt | 58 f. Multi-FIFO/Ring. Supports up to 8 transmit queues and receive rings, 63 Number of transmit queues 74 Valid range: Total length of all queues should not exceed 8192
|
D | tc-actions-env-rules.txt | 7 For example if your action queues a packet to be processed later,
|
D | netdevices.txt | 90 Synchronization: netif_tx_lock spinlock; all TX queues frozen.
|
D | netdev-FAQ.txt | 141 A: No, not for networking. Check the stable queues as per above 1st to see
|
D | openvswitch.txt | 21 no match, it queues the packet to userspace for processing (as part of
|
D | ixgbe.txt | 125 different queues. Enables tight control on routing a flow in the platform.
|
D | rds.txt | 240 avoids allocation in the interrupt handling path which queues
|
D | ip-sysctl.txt | 124 begins to remove incomplete fragment queues to free up resources. 146 result in unnecessarily dropping fragment queues when normal 211 packet for the flow is waiting in Qdisc queues or device transmit
|
D | ppp_generic.txt | 156 The start_xmit function always accepts and queues the packet which it
|
D | bonding.txt | 1559 By default the bonding driver is multiqueue aware and 16 queues are created 1561 for details). If more or less queues are desired the module parameter 1595 arguments can be added to BONDING_OPTS to set all needed slave queues. 1619 a pass-through for selecting output queues on the slave device rather than
|
/linux-4.1.27/drivers/gpu/drm/amd/amdkfd/ |
D | kfd_process_queue_manager.c | 37 list_for_each_entry(pqn, &pqm->queues, process_queue_list) { in get_queue_by_qid() 77 INIT_LIST_HEAD(&pqm->queues); in pqm_init() 97 list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) { in pqm_uninit() 179 if (list_empty(&pqm->queues)) { in pqm_create_queue() 236 list_add(&pqn->process_queue_list, &pqm->queues); in pqm_create_queue() 251 if (list_empty(&pqm->queues)) in pqm_create_queue() 310 if (list_empty(&pqm->queues)) in pqm_destroy_queue()
|
D | kfd_process.c | 187 kfree(p->queues); in kfd_process_wq_release() 264 process->queues = kmalloc_array(INITIAL_QUEUE_ARRAY_SIZE, in create_process() 265 sizeof(process->queues[0]), GFP_KERNEL); in create_process() 266 if (!process->queues) in create_process() 312 kfree(process->queues); in create_process()
|
D | kfd_device_queue_manager.c | 414 list_add(&n->list, &dqm->queues); in register_process_nocpsch() 441 list_for_each_entry_safe(cur, next, &dqm->queues, list) { in unregister_process_nocpsch() 548 INIT_LIST_HEAD(&dqm->queues); in initialize_nocpsch() 711 INIT_LIST_HEAD(&dqm->queues); in initialize_cpsch() 754 list_for_each_entry(node, &dqm->queues, list) in start_cpsch() 778 list_for_each_entry(node, &dqm->queues, list) { in stop_cpsch() 1007 retval = pm_send_runlist(&dqm->packets, &dqm->queues); in execute_queues_cpsch()
|
D | kfd_device_queue_manager.h | 143 struct list_head queues; member
|
D | kfd_packet_manager.c | 237 struct list_head *queues, in pm_create_runlist_ib() argument 250 BUG_ON(!pm || !queues || !rl_size_bytes || !rl_gpu_addr); in pm_create_runlist_ib() 266 list_for_each_entry(cur, queues, list) { in pm_create_runlist_ib()
|
D | kfd_priv.h | 377 struct list_head queues; member 474 struct kfd_queue **queues; member
|
/linux-4.1.27/drivers/net/xen-netback/ |
D | interface.c | 160 queue = &vif->queues[index]; in xenvif_start_xmit() 193 if (vif->queues == NULL) in xenvif_get_stats() 198 queue = &vif->queues[index]; in xenvif_get_stats() 221 queue = &vif->queues[queue_index]; in xenvif_up() 237 queue = &vif->queues[queue_index]; in xenvif_down() 347 void *vif_stats = &vif->queues[queue_index].stats; in xenvif_get_ethtool_stats() 420 vif->queues = NULL; in xenvif_alloc() 631 queue = &vif->queues[queue_index]; in xenvif_disconnect() 678 queue = &vif->queues[queue_index]; in xenvif_free() 682 vfree(vif->queues); in xenvif_free() [all …]
|
D | xenbus.c | 206 &vif->queues[i], in xenvif_debugfs_addif() 667 struct xenvif_queue *queue = &vif->queues[queue_index]; in xen_net_rate_changed() 781 be->vif->queues = vzalloc(requested_num_queues * in connect() 787 queue = &be->vif->queues[queue_index]; in connect() 849 vfree(be->vif->queues); in connect() 850 be->vif->queues = NULL; in connect()
|
D | common.h | 237 struct xenvif_queue *queues; member
|
D | netback.c | 675 if (vif->queues) in xenvif_fatal_tx_err() 676 xenvif_kick_thread(&vif->queues[0]); in xenvif_fatal_tx_err()
|
/linux-4.1.27/Documentation/devicetree/bindings/net/ |
D | fsl-fec.txt | 19 - fsl,num-tx-queues : The property is valid for enet-avb IP, which supports 20 hw multi queues. Should specify the tx queue number, otherwise set tx queue 22 - fsl,num-rx-queues : The property is valid for enet-avb IP, which supports 23 hw multi queues. Should specify the rx queue number, otherwise set rx queue
|
D | brcm,systemport.txt | 7 interrupts, and the second cell should be for the transmit queues. An 18 - systemport,num-txq: number of HW transmit queues, an integer 19 - systemport,num-rxq: number of HW receive queues, an integer
|
D | brcm,bcmgenet.txt | 9 RX and TX queues operating in ring mode
|
D | keystone-netcp.txt | 126 present a maximum of 4 queues per Rx flow.
|
/linux-4.1.27/drivers/block/ |
D | null_blk.c | 40 struct nullb_queue *queues; member 310 return &nullb->queues[index]; in nullb_to_queue() 381 struct nullb_queue *nq = &nullb->queues[index]; in null_init_hctx() 461 cleanup_queue(&nullb->queues[i]); in cleanup_queues() 463 kfree(nullb->queues); in cleanup_queues() 468 nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue), in setup_queues() 470 if (!nullb->queues) in setup_queues() 485 nq = &nullb->queues[i]; in init_driver_queues()
|
D | nvme-core.c | 183 struct nvme_queue *nvmeq = dev->queues[0]; in nvme_admin_init_hctx() 197 struct nvme_queue *nvmeq = dev->queues[0]; in nvme_admin_init_request() 215 struct nvme_queue *nvmeq = dev->queues[ in nvme_init_hctx() 235 struct nvme_queue *nvmeq = dev->queues[hctx_idx + 1]; in nvme_init_request() 1022 struct nvme_queue *nvmeq = dev->queues[0]; in nvme_submit_async_admin_req() 1047 struct nvme_queue *nvmeq = dev->queues[0]; in nvme_submit_admin_async_cmd() 1252 if (nvme_submit_cmd(dev->queues[0], &cmd) < 0) { in nvme_abort_req() 1322 struct nvme_queue *nvmeq = dev->queues[i]; in nvme_free_queues() 1324 dev->queues[i] = NULL; in nvme_free_queues() 1368 struct nvme_queue *nvmeq = dev->queues[qid]; in nvme_disable_queue() [all …]
|
/linux-4.1.27/fs/autofs4/ |
D | waitq.c | 40 wq = sbi->queues; in autofs4_catatonic_mode() 41 sbi->queues = NULL; /* Erase all wait queues */ in autofs4_catatonic_mode() 230 for (wq = sbi->queues; wq; wq = wq->next) { in autofs4_find_wait() 424 wq->next = sbi->queues; in autofs4_wait() 425 sbi->queues = wq; in autofs4_wait() 544 for (wql = &sbi->queues; (wq = *wql) != NULL; wql = &wq->next) { in autofs4_wait_release()
|
D | autofs_i.h | 125 struct autofs_wait_queue *queues; /* Wait queue pointer */ member
|
D | inode.c | 238 sbi->queues = NULL; in autofs4_fill_super()
|
/linux-4.1.27/drivers/net/ |
D | xen-netfront.c | 154 struct netfront_queue *queues; member 346 queue = &np->queues[i]; in xennet_open() 535 queue = &np->queues[queue_index]; in xennet_start_xmit() 649 queue = &np->queues[i]; in xennet_close() 1206 xennet_interrupt(0, &info->queues[i]); in xennet_poll_controller() 1252 np->queues = NULL; in xennet_create_dev() 1345 struct netfront_queue *queue = &info->queues[i]; in xennet_disconnect_backend() 1698 struct netfront_queue *queue = &info->queues[i]; in xennet_destroy_queues() 1708 kfree(info->queues); in xennet_destroy_queues() 1709 info->queues = NULL; in xennet_destroy_queues() [all …]
|
D | tun.c | 1608 int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ? in tun_set_iff() local 1633 NET_NAME_UNKNOWN, tun_setup, queues, in tun_set_iff() 1634 queues); in tun_set_iff()
|
/linux-4.1.27/net/mac80211/ |
D | util.c | 295 if (local->hw.queues < IEEE80211_NUM_ACS) in ieee80211_propagate_queue_wake() 333 if (WARN_ON(queue >= hw->queues)) in __ieee80211_wake_queue() 389 if (WARN_ON(queue >= hw->queues)) in __ieee80211_stop_queue() 400 if (local->hw.queues < IEEE80211_NUM_ACS) in __ieee80211_stop_queue() 487 for (i = 0; i < hw->queues; i++) in ieee80211_add_pending_skbs() 495 unsigned long queues, in ieee80211_stop_queues_by_reason() argument 505 for_each_set_bit(i, &queues, hw->queues) in ieee80211_stop_queues_by_reason() 525 if (WARN_ON(queue >= hw->queues)) in ieee80211_queue_stopped() 537 unsigned long queues, in ieee80211_wake_queues_by_reason() argument 547 for_each_set_bit(i, &queues, hw->queues) in ieee80211_wake_queues_by_reason() [all …]
|
D | wme.c | 126 if (local->hw.queues < IEEE80211_NUM_ACS) in ieee80211_select_queue_80211() 155 if (local->hw.queues < IEEE80211_NUM_ACS || skb->len < 6) { in ieee80211_select_queue()
|
D | debugfs.c | 166 for (q = 0; q < local->hw.queues; q++) in queues_read() 176 DEBUGFS_READONLY_FILE_OPS(queues); 244 DEBUGFS_ADD(queues); in debugfs_hw_add()
|
D | main.c | 574 local->hw.queues = 1; in ieee80211_alloc_hw_nm() 800 local->hw.offchannel_tx_hw_queue >= local->hw.queues)) in ieee80211_register_hw() 1014 if (hw->queues > IEEE80211_MAX_QUEUES) in ieee80211_register_hw() 1015 hw->queues = IEEE80211_MAX_QUEUES; in ieee80211_register_hw()
|
D | iface.c | 324 int n_queues = sdata->local->hw.queues; in ieee80211_check_queues() 383 else if (local->hw.queues >= IEEE80211_NUM_ACS) in ieee80211_set_default_queues() 726 if (local->hw.queues < IEEE80211_NUM_ACS) in ieee80211_do_open() 1141 if (local->hw.queues < IEEE80211_NUM_ACS) in ieee80211_monitor_select_queue() 1700 if (local->hw.queues >= IEEE80211_NUM_ACS) in ieee80211_if_add()
|
D | trace.h | 1000 u32 queues, bool drop), 1002 TP_ARGS(local, queues, drop), 1007 __field(u32, queues) 1013 __entry->queues = queues; 1018 LOCAL_PR_ARG, __entry->queues, __entry->drop
|
D | driver-ops.h | 791 u32 queues, bool drop) in drv_flush() argument 800 trace_drv_flush(local, queues, drop); in drv_flush() 802 local->ops->flush(&local->hw, vif, queues, drop); in drv_flush()
|
D | tx.c | 1322 if (WARN_ON_ONCE(q >= local->hw.queues)) { in ieee80211_tx_frags() 2480 for (i = 0; i < local->hw.queues; i++) { in ieee80211_clear_tx_pending() 2539 for (i = 0; i < local->hw.queues; i++) { in ieee80211_tx_pending() 3273 u32 queues; in ieee80211_reserve_tid() local 3314 queues = BIT(sdata->vif.hw_queue[ieee802_1d_to_ac[tid]]); in ieee80211_reserve_tid() 3315 __ieee80211_flush_queues(local, sdata, queues, false); in ieee80211_reserve_tid()
|
D | ieee80211_i.h | 1864 unsigned long queues, 1874 unsigned long queues, 1892 unsigned int queues, bool drop);
|
D | mlme.c | 1586 for (q = 0; q < local->hw.queues; q++) { in ieee80211_dynamic_ps_enable_work() 1655 if (local->hw.queues < IEEE80211_NUM_ACS) in __ieee80211_sta_handle_tspec_ac_params() 1751 if (local->hw.queues < IEEE80211_NUM_ACS) in ieee80211_sta_wmm_params() 3006 sta->sta.wme = elems.wmm_param && local->hw.queues >= IEEE80211_NUM_ACS; in ieee80211_assoc_success() 4676 (local->hw.queues >= IEEE80211_NUM_ACS); in ieee80211_mgd_assoc() 4740 local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used || in ieee80211_mgd_assoc() 4751 local->hw.queues < IEEE80211_NUM_ACS || !bss->wmm_used || in ieee80211_mgd_assoc()
|
D | tdls.c | 333 if (local->hw.queues >= IEEE80211_NUM_ACS && in ieee80211_tdls_add_setup_start_ies() 508 if (local->hw.queues >= IEEE80211_NUM_ACS && sta->sta.wme) in ieee80211_tdls_add_setup_cfm_ies()
|
D | ibss.c | 204 if (local->hw.queues >= IEEE80211_NUM_ACS) in ieee80211_ibss_build_presp() 1035 if (sta && elems->wmm_info && local->hw.queues >= IEEE80211_NUM_ACS) in ieee80211_update_sta_info()
|
D | cfg.c | 1063 local->hw.queues >= IEEE80211_NUM_ACS) in sta_apply_parameters() 1929 if (local->hw.queues < IEEE80211_NUM_ACS) in ieee80211_set_txq_params()
|
/linux-4.1.27/drivers/scsi/arm/ |
D | fas216.c | 207 info->stats.queues, info->stats.removes, info->stats.fins, in fas216_dumpinfo() 995 queue_add_cmd_tail(&info->queues.disconnected, info->SCpnt); in fas216_reselected_intr() 1001 info->SCpnt = queue_remove_tgtluntag(&info->queues.disconnected, in fas216_reselected_intr() 1926 SCpnt = queue_remove_exclude(&info->queues.issue, in fas216_kick() 1950 queue_add_cmd_tail(&info->queues.disconnected, info->SCpnt); in fas216_kick() 2216 info->stats.queues += 1; in fas216_queue_command_lck() 2225 result = !queue_add_cmd_ordered(&info->queues.issue, SCpnt); in fas216_queue_command_lck() 2353 if (queue_remove_cmd(&info->queues.issue, SCpnt)) { in fas216_find_command() 2363 } else if (queue_remove_cmd(&info->queues.disconnected, SCpnt)) { in fas216_find_command() 2497 queue_remove_all_target(&info->queues.issue, target); in fas216_eh_device_reset() [all …]
|
D | acornscsi.h | 305 unsigned int queues; member 320 } queues; member
|
D | acornscsi.c | 708 SCpnt = queue_remove_exclude(&host->queues.issue, host->busyluns); in acornscsi_kick() 716 queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt); in acornscsi_kick() 1797 if (!ok && queue_probetgtlun(&host->queues.disconnected, target, lun)) in acornscsi_reconnect() 1812 queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt); in acornscsi_reconnect() 1839 queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt); in acornscsi_reconnect_finish() 1849 host->SCpnt = queue_remove_tgtluntag(&host->queues.disconnected, in acornscsi_reconnect_finish() 2501 host->stats.queues += 1; in acornscsi_queuecmd_lck() 2506 if (!queue_add_cmd_ordered(&host->queues.issue, SCpnt)) { in acornscsi_queuecmd_lck() 2558 if (queue_remove_cmd(&host->queues.issue, SCpnt)) { in acornscsi_do_abort() 2569 } else if (queue_remove_cmd(&host->queues.disconnected, SCpnt)) { in acornscsi_do_abort() [all …]
|
D | fas216.h | 258 unsigned int queues; member 286 } queues; member
|
/linux-4.1.27/Documentation/devicetree/bindings/mailbox/ |
D | omap-mailbox.txt | 10 Each mailbox IP block has a certain number of h/w fifo queues and output 21 The number of h/w fifo queues and interrupt lines dictate the usable registers. 23 instance. DRA7xx has multiple instances with different number of h/w fifo queues 51 - ti,mbox-num-fifos: Number of h/w fifo queues within the mailbox IP block
|
/linux-4.1.27/drivers/net/ethernet/cadence/ |
D | macb.c | 510 (unsigned int)(queue - bp->queues), in macb_tx_error_task() 610 u16 queue_index = queue - bp->queues; in macb_tx_interrupt() 1004 (unsigned int)(queue - bp->queues), in macb_interrupt() 1102 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_poll_controller() 1247 struct macb_queue *queue = &bp->queues[queue_index]; in macb_start_xmit() 1374 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_free_consistent() 1422 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_alloc_consistent() 1465 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_init_rings() 1495 bp->queues[0].tx_ring[i].addr = 0; in macb_init_rings() 1496 bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED); in macb_init_rings() [all …]
|
D | macb.h | 792 struct macb_queue queues[MACB_MAX_QUEUES]; member
|
/linux-4.1.27/drivers/net/wireless/iwlwifi/mvm/ |
D | time-event.c | 102 u32 queues = 0; in iwl_mvm_roc_done_wk() local 112 queues |= BIT(IWL_MVM_OFFCHANNEL_QUEUE); in iwl_mvm_roc_done_wk() 116 queues |= BIT(mvm->aux_queue); in iwl_mvm_roc_done_wk() 132 iwl_mvm_flush_tx_path(mvm, queues, false); in iwl_mvm_roc_done_wk()
|
D | mac80211.c | 434 hw->queues = mvm->first_agg_queue; in iwl_mvm_mac_setup_register() 3849 struct ieee80211_vif *vif, u32 queues, bool drop) in iwl_mvm_mac_flush() argument
|
/linux-4.1.27/Documentation/cgroups/ |
D | blkio-controller.txt | 226 scheduler queues for service. This can be greater than the total time 253 queues of this cgroup gets a timeslice. 259 its queues. This is different from the io_wait_time which is the 270 spent idling for one of the queues of the cgroup. This is in 279 from other queues/cgroups. This is in nanoseconds. If this is read 371 That means CFQ will not idle between cfq queues of a cfq group and hence be 378 If one disables idling on individual cfq queues and cfq service trees by 392 - Currently only sync IO queues are support. All the buffered writes are
|
/linux-4.1.27/drivers/soc/ti/ |
D | Kconfig | 15 is responsible for accelerating management of the packet queues.
|
/linux-4.1.27/Documentation/devicetree/bindings/misc/ |
D | fsl,qoriq-mc.txt | 7 queues, buffer pools, I/O interfaces. These resources are building
|
/linux-4.1.27/Documentation/device-mapper/ |
D | cache-policies.txt | 33 The multiqueue policy has three sets of 16 queues: one set for entries 37 Cache entries in the queues are aged based on logical time. Entry into
|
/linux-4.1.27/drivers/dma/ |
D | cppi41.c | 835 const struct chan_queues *queues; in cpp41_dma_filter_fn() local 850 queues = cdd->queues_tx; in cpp41_dma_filter_fn() 852 queues = cdd->queues_rx; in cpp41_dma_filter_fn() 858 cchan->q_num = queues[cchan->port_num].submit; in cpp41_dma_filter_fn() 859 cchan->q_comp_num = queues[cchan->port_num].complete; in cpp41_dma_filter_fn()
|
/linux-4.1.27/drivers/net/wireless/ath/carl9170/ |
D | main.c | 229 for (i = 0; i < ar->hw->queues; i++) { in carl9170_flush() 286 for (i = 0; i < ar->hw->queues; i++) { in carl9170_zap_queues() 307 for (i = 0; i < ar->hw->queues; i++) in carl9170_zap_queues() 368 for (i = 0; i < ar->hw->queues; i++) { in carl9170_op_start() 1391 if (queue < ar->hw->queues) { in carl9170_op_conf_tx() 1703 u32 queues, bool drop) in carl9170_op_flush() argument 1813 hw->queues = __AR9170_NUM_TXQ; in carl9170_alloc() 1824 for (i = 0; i < ar->hw->queues; i++) { in carl9170_alloc()
|
D | tx.c | 96 for (i = 0; i < ar->hw->queues; i++) { in carl9170_tx_accounting() 168 for (i = 0; i < ar->hw->queues; i++) { in carl9170_tx_accounting_free() 566 for (i = 0; i < ar->hw->queues; i++) { in carl9170_check_queue_stop_timeout() 1349 for (i = 0; i < ar->hw->queues; i++) { in carl9170_tx()
|
D | debug.c | 401 for (i = 0; i < ar->hw->queues; i++) { in carl9170_debugfs_tx_stuck_read()
|
/linux-4.1.27/include/linux/ |
D | nvme.h | 72 struct nvme_queue **queues; member
|
D | netdevice.h | 717 u16 queues[0]; member
|
/linux-4.1.27/net/core/ |
D | flow_dissector.c | 435 queue_index = map->queues[0]; in get_xps_queue() 437 queue_index = map->queues[reciprocal_scale(skb_get_hash(skb), in get_xps_queue()
|
D | dev.c | 1914 if (map->queues[pos] == index) { in remove_xps_queue() 1916 map->queues[pos] = map->queues[--map->len]; in remove_xps_queue() 1971 if (map->queues[pos] != index) in expand_xps_map() 1991 new_map->queues[i] = map->queues[i]; in expand_xps_map() 2042 while ((pos < map->len) && (map->queues[pos] != index)) in netif_set_xps_queue() 2046 map->queues[map->len++] = index; in netif_set_xps_queue()
|
D | net-sysfs.c | 1176 if (map->queues[j] == index) { in show_xps_map()
|
/linux-4.1.27/drivers/net/wireless/cw1200/ |
D | sta.h | 44 u32 queues, bool drop);
|
D | main.c | 303 hw->queues = 4; in cw1200_init_common()
|
D | sta.c | 625 if (queue < dev->queues) { in cw1200_conf_tx() 942 u32 queues, bool drop) in cw1200_flush() argument
|
/linux-4.1.27/drivers/net/wireless/p54/ |
D | main.c | 419 if (queue < dev->queues) { in p54_conf_tx() 675 u32 queues, bool drop) in p54_flush() argument 769 dev->queues = 1; in p54_init_common()
|
D | fwio.c | 155 priv->hw->queues = P54_QUEUE_AC_NUM; in p54_parse_firmware()
|
D | txrx.c | 177 for (i = 0; i < priv->hw->queues; i++) { in p54_wake_queues()
|
/linux-4.1.27/Documentation/RCU/ |
D | rcubarrier.txt | 200 queues. His implementation queues an RCU callback on each of the per-CPU 201 callback queues, and then waits until they have all started executing, at 318 callback queues, things will have to change. One simple change
|
D | trace.txt | 160 queues by rcu_do_batch(), but which have not yet been
|
/linux-4.1.27/arch/sh/ |
D | Kconfig.cpu | 67 the store queues integrated in the SH-4 processors.
|
/linux-4.1.27/Documentation/sysctl/ |
D | fs.txt | 253 3. /proc/sys/fs/mqueue - POSIX message queues filesystem 257 creation of a user space library that implements the POSIX message queues 265 maximum number of message queues allowed on the system.
|
/linux-4.1.27/Documentation/sound/alsa/ |
D | Procfile.txt | 195 seq/queues 196 Lists the currently allocated/running sequencer queues.
|
/linux-4.1.27/include/uapi/sound/ |
D | asequencer.h | 312 int queues; /* maximum queues count */ member
|
/linux-4.1.27/Documentation/scsi/ |
D | lpfc.txt | 48 queuing. Removing the queues from the LLDD makes a more predictable
|
D | scsi_eh.txt | 190 4. Kicks queues in all devices on the host in the asses
|
D | ChangeLog.ncr53c8xx | 438 - Resources management using doubly linked queues.
|
D | ChangeLog.lpfc | 28 find command in both TX and TX completion queues. Return ERROR
|
/linux-4.1.27/drivers/net/wireless/ath/ath9k/ |
D | init.c | 807 hw->queues = ATH9K_NUM_TX_QUEUES; in ath9k_set_mcc_capab() 808 hw->offchannel_tx_hw_queue = hw->queues - 1; in ath9k_set_mcc_capab() 878 hw->queues = 4; in ath9k_set_hw_capab()
|
D | main.c | 1222 vif->cab_queue = hw->queues - 2; in ath9k_assign_hw_queues() 2008 u32 queues, bool drop) in ath9k_flush() argument 2027 __ath9k_flush(hw, queues, drop, true, true); in ath9k_flush() 2032 __ath9k_flush(hw, queues, drop, true, false); in ath9k_flush() 2036 void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop, in __ath9k_flush() argument
|
D | channel.c | 1367 ieee80211_stop_queue(sc->hw, sc->hw->queues - 2); in ath9k_chanctx_stop_queues() 1386 ieee80211_wake_queue(sc->hw, sc->hw->queues - 2); in ath9k_chanctx_wake_queues()
|
D | ath9k.h | 724 void __ath9k_flush(struct ieee80211_hw *hw, u32 queues, bool drop,
|
D | htc_drv_init.c | 751 hw->queues = 4; in ath9k_set_hw_capab()
|
/linux-4.1.27/drivers/usb/gadget/udc/ |
D | pxa27x_udc.c | 247 struct dentry *root, *state, *queues, *eps; in pxa_init_debugfs() local 257 queues = debugfs_create_file("queues", 0400, root, udc, in pxa_init_debugfs() 259 if (!queues) in pxa_init_debugfs() 268 udc->debugfs_queues = queues; in pxa_init_debugfs() 274 debugfs_remove(queues); in pxa_init_debugfs()
|
D | net2280.c | 1757 static DEVICE_ATTR_RO(queues);
|
/linux-4.1.27/drivers/net/wireless/rtl818x/rtl8180/ |
D | dev.c | 1123 for (i = 0; i < (dev->queues + 1); i++) in rtl8180_start() 1233 for (i = 0; i < (dev->queues + 1); i++) in rtl8180_start() 1263 for (i = 0; i < (dev->queues + 1); i++) in rtl8180_stop() 1861 dev->queues = RTL8187SE_NR_TX_QUEUES - 1; in rtl8180_probe() 1863 dev->queues = RTL8180_NR_TX_QUEUES - 1; in rtl8180_probe()
|
/linux-4.1.27/drivers/net/ethernet/sfc/ |
D | farch.c | 1537 u32 queues; in efx_farch_legacy_interrupt() local 1542 queues = EFX_EXTRACT_DWORD(reg, 0, 31); in efx_farch_legacy_interrupt() 1555 if (queues & (1U << efx->irq_level) && soft_enabled) { in efx_farch_legacy_interrupt() 1562 if (queues != 0) { in efx_farch_legacy_interrupt() 1568 if (queues & 1) in efx_farch_legacy_interrupt() 1570 queues >>= 1; in efx_farch_legacy_interrupt()
|
D | ef10.c | 1170 u32 queues; in efx_ef10_legacy_interrupt() local 1174 queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG); in efx_ef10_legacy_interrupt() 1176 if (queues == 0) in efx_ef10_legacy_interrupt() 1181 if (queues & (1U << efx->irq_level)) in efx_ef10_legacy_interrupt() 1185 if (queues & 1) in efx_ef10_legacy_interrupt() 1187 queues >>= 1; in efx_ef10_legacy_interrupt()
|
D | falcon.c | 434 int queues; in falcon_legacy_interrupt_a1() local 462 queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q); in falcon_legacy_interrupt_a1() 467 if (queues & 1) in falcon_legacy_interrupt_a1() 469 if (queues & 2) in falcon_legacy_interrupt_a1()
|
/linux-4.1.27/Documentation/locking/ |
D | rt-mutex.txt | 34 priority waiters list. This list too queues in priority order. Whenever
|
/linux-4.1.27/Documentation/filesystems/ |
D | inotify.txt | 47 - We'd have to maintain n fd's and n internal queues with state,
|
D | coda.txt | 287 determine the position of the message on queues and pointers to 1604 1. message queues 1611 can easily be manipulated. The message queues will generally have 1633 1. The message queues should have open and close routines. On Unix 1643 +o Close will free all memory allocated by the message queues. 1648 3. Before the message queues are open, all VFS operations will fail. 1652 4. After closing of the queues, no VFS operations can succeed. Here
|
D | spufs.txt | 15 message queues. Users that have write permissions on the file system
|
D | xfs-delayed-logging-design.txt | 678 serialisation queues. They use the same lock as the CIL, too. If we see too
|
/linux-4.1.27/drivers/net/wireless/ti/wlcore/ |
D | conf.h | 1267 u8 queues; member
|
D | acx.c | 1506 conf_queues = wl->conf.rx_streaming.queues; in wl1271_acx_ps_rx_streaming()
|
D | main.c | 5515 u32 queues, bool drop) in wlcore_op_flush() argument 6153 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1; in wl1271_init_ieee80211() 6156 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1; in wl1271_init_ieee80211()
|
/linux-4.1.27/drivers/net/ethernet/intel/ixgbe/ |
D | ixgbe_main.c | 2543 static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, in ixgbe_irq_enable() argument 2587 if (queues) in ixgbe_irq_enable() 4581 unsigned int rxbase, txbase, queues; in ixgbe_fwd_ring_up() local 4611 queues = min_t(unsigned int, in ixgbe_fwd_ring_up() 4613 err = netif_set_real_num_tx_queues(vdev, queues); in ixgbe_fwd_ring_up() 4617 err = netif_set_real_num_rx_queues(vdev, queues); in ixgbe_fwd_ring_up() 5675 int err, queues; in ixgbe_open() local 5701 queues = adapter->num_rx_queues_per_pool; in ixgbe_open() 5703 queues = adapter->num_tx_queues; in ixgbe_open() 5705 err = netif_set_real_num_tx_queues(netdev, queues); in ixgbe_open() [all …]
|
/linux-4.1.27/Documentation/usb/ |
D | hotplug.txt | 15 queues may need to be enabled, networks brought up, disk
|
D | ehci.txt | 118 usb-storage doing disk I/O; watch the request queues!)
|
/linux-4.1.27/Documentation/ |
D | stable_kernel_rules.txt | 121 - The queues of patches, for both completed versions and in progress
|
D | vme_api.txt | 295 The following function queues a list for execution. The function will return
|
D | kernel-docs.txt | 163 Keywords: interrupts, irqs, DMA, bottom halves, task queues. 292 event queues.
|
D | HOWTO | 307 in use, or patch queues being published as quilt series. Addresses of
|
/linux-4.1.27/init/ |
D | Kconfig | 246 POSIX variant of message queues is a part of IPC. In POSIX message 247 queues every message has a priority which decides about succession 250 queues (functions mq_*) say Y here. 252 POSIX message queues are visible as a filesystem called 'mqueue' 254 operations on message queues. 1694 per cpu and per node queues. 1700 instead of managing queues of cached objects (SLAB approach). 1702 of queues of objects. SLUB can use memory efficiently
|
/linux-4.1.27/Documentation/misc-devices/mei/ |
D | mei.txt | 62 handles this internally by maintaining request queues for the applications.
|
/linux-4.1.27/drivers/net/wireless/rt2x00/ |
D | rt2x00mac.c | 756 u32 queues, bool drop) in rt2x00mac_flush() argument
|
D | rt2x00.h | 1459 u32 queues, bool drop);
|
D | rt2x00dev.c | 1047 rt2x00dev->hw->queues = rt2x00dev->ops->tx_queues; in rt2x00lib_probe_hw()
|
/linux-4.1.27/drivers/net/wireless/iwlwifi/dvm/ |
D | mac80211.c | 197 hw->queues = IWLAGN_FIRST_AMPDU_QUEUE; in iwlagn_mac_setup_register() 1096 u32 queues, bool drop) in iwlagn_mac_flush() argument
|
/linux-4.1.27/drivers/net/wireless/libertas_tf/ |
D | main.c | 639 hw->queues = 1; in lbtf_add_card()
|
/linux-4.1.27/include/net/ |
D | mac80211.h | 1976 u16 queues; member 3286 u32 queues, bool drop);
|
/linux-4.1.27/drivers/net/wireless/brcm80211/brcmsmac/ |
D | mac80211_if.c | 904 u32 queues, bool drop) in brcms_ops_flush() argument 1072 hw->queues = N_TX_QUEUES; in ieee_hw_init()
|
/linux-4.1.27/Documentation/timers/ |
D | hrtimers.txt | 101 queues while keeping the time-order intact.)
|
D | highres.txt | 195 The softirq for running the hrtimer queues and executing the callbacks has been
|
/linux-4.1.27/drivers/scsi/qla4xxx/ |
D | ql4_def.h | 691 void *queues; member
|
D | ql4_os.c | 4130 if (ha->queues) in qla4xxx_mem_free() 4131 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues, in qla4xxx_mem_free() 4138 ha->queues = NULL; in qla4xxx_mem_free() 4201 ha->queues = dma_alloc_coherent(&ha->pdev->dev, ha->queues_len, in qla4xxx_mem_alloc() 4203 if (ha->queues == NULL) { in qla4xxx_mem_alloc() 4209 memset(ha->queues, 0, ha->queues_len); in qla4xxx_mem_alloc() 4222 ha->request_ring = (struct queue_entry *) (ha->queues + align); in qla4xxx_mem_alloc() 4225 ha->response_ring = (struct queue_entry *) (ha->queues + align + in qla4xxx_mem_alloc() 4231 ha->shadow_regs = (struct shadow_regs *) (ha->queues + align + in qla4xxx_mem_alloc()
|
/linux-4.1.27/drivers/net/wireless/ath/ar5523/ |
D | ar5523.c | 1095 u32 queues, bool drop) in ar5523_flush() argument 1692 hw->queues = 1; in ar5523_probe()
|
/linux-4.1.27/drivers/net/wireless/rtl818x/rtl8187/ |
D | dev.c | 1621 dev->queues = 1; in rtl8187_probe() 1623 dev->queues = 4; in rtl8187_probe()
|
/linux-4.1.27/arch/arm/boot/dts/ |
D | imx6sx.dtsi | 793 fsl,num-tx-queues=<3>; 794 fsl,num-rx-queues=<3>;
|
/linux-4.1.27/drivers/net/wireless/ath/wcn36xx/ |
D | main.c | 973 wcn->hw->queues = 4; in wcn36xx_init_ieee80211()
|
/linux-4.1.27/drivers/net/wireless/rsi/ |
D | rsi_91x_mac80211.c | 1070 hw->queues = MAX_HW_QUEUES; in rsi_mac80211_attach()
|
/linux-4.1.27/drivers/net/wireless/rtlwifi/ |
D | core.c | 1716 u32 queues, in rtl_op_flush() argument 1722 rtlpriv->intf_ops->flush(hw, queues, drop); in rtl_op_flush()
|
D | pci.c | 1754 static void rtl_pci_flush(struct ieee80211_hw *hw, u32 queues, bool drop) in rtl_pci_flush() argument 1770 if (((queues >> queue_id) & 0x1) == 0) { in rtl_pci_flush()
|
D | base.c | 424 hw->queues = AC_MAX; in _rtl_init_mac80211()
|
D | wifi.h | 2200 void (*flush)(struct ieee80211_hw *hw, u32 queues, bool drop);
|
/linux-4.1.27/Documentation/powerpc/ |
D | cxl.txt | 241 queues the WED may describe.
|
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4/ |
D | cxgb4.h | 1075 int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
|
D | cxgb4_main.c | 860 int cxgb4_write_rss(const struct port_info *pi, const u16 *queues) in cxgb4_write_rss() argument 871 for (i = 0; i < pi->rss_size; i++, queues++) in cxgb4_write_rss() 872 rss[i] = q[*queues].rspq.abs_id; in cxgb4_write_rss()
|
/linux-4.1.27/Documentation/nfc/ |
D | nfc-hci.txt | 190 Only when llc_shdlc is used: handles shdlc rx & tx queues.
|
/linux-4.1.27/drivers/net/wireless/ath/ath5k/ |
D | base.c | 3062 hw->queues = 4; in ath5k_init() 3071 hw->queues = 1; in ath5k_init()
|
/linux-4.1.27/drivers/usb/gadget/legacy/ |
D | Kconfig | 114 driver, so that deep I/O queues can be supported. On 2.4 kernels,
|
/linux-4.1.27/drivers/net/wireless/ti/wl12xx/ |
D | main.c | 330 .queues = 0x1,
|
/linux-4.1.27/Documentation/filesystems/caching/ |
D | fscache.txt | 282 Ops pend=N Number of times async ops added to pending queues
|
/linux-4.1.27/drivers/net/wireless/zd1211rw/ |
D | zd_mac.c | 1412 hw->queues = 1; in zd_mac_alloc_hw()
|
/linux-4.1.27/drivers/net/wireless/ti/wl1251/ |
D | main.c | 1491 wl->hw->queues = 4; in wl1251_init_ieee80211()
|
/linux-4.1.27/drivers/net/wireless/ |
D | mac80211_hwsim.c | 1849 u32 queues, bool drop) in mac80211_hwsim_flush() argument 2384 hw->queues = 5; in mac80211_hwsim_new_radio()
|
D | adm8211.c | 1871 dev->queues = 1; /* ADM8211C supports more, maybe ADM8211B too */ in adm8211_probe()
|
D | mwl8k.c | 6076 hw->queues = MWL8K_TX_WMM_QUEUES; in mwl8k_firmware_load_success()
|
/linux-4.1.27/drivers/net/wireless/ti/wl18xx/ |
D | main.c | 455 .queues = 0x1,
|
/linux-4.1.27/drivers/net/usb/ |
D | Kconfig | 115 that supports deep queues for efficient transfers. (This gives
|
/linux-4.1.27/drivers/net/wireless/b43/ |
D | main.c | 2597 wl->hw->queues = B43_QOS_QUEUE_NUM; in b43_request_firmware() 2599 wl->hw->queues = 1; in b43_request_firmware() 2710 dev->qos_enabled = dev->wl->hw->queues > 1; in b43_upload_microcode()
|
/linux-4.1.27/Documentation/security/ |
D | credentials.txt | 43 - Message queues
|
D | Smack.txt | 463 IPC objects, message queues, semaphore sets, and memory segments exist in flat
|
/linux-4.1.27/drivers/net/wireless/iwlegacy/ |
D | common.h | 1727 u32 queues, bool drop);
|
D | 3945-mac.c | 3581 hw->queues = 4; in il3945_setup_mac()
|
D | common.c | 4761 u32 queues, bool drop) in il_mac_flush() argument
|
D | 4965-mac.c | 5784 hw->queues = 4; in il4965_mac_setup_register()
|
/linux-4.1.27/Documentation/cdrom/ |
D | cdrom-standard.tex | 320 the information from $media_changed()$ to two separate queues. Other 394 queues for the VFS and a new $ioctl()$ function that can report device
|
/linux-4.1.27/Documentation/spi/ |
D | spi-summary | 134 SPI requests always go into I/O queues. Requests for a given SPI device
|
/linux-4.1.27/sound/core/seq/ |
D | seq_clientmgr.c | 1137 info.queues = SNDRV_SEQ_MAX_QUEUES; in snd_seq_ioctl_system_info()
|
/linux-4.1.27/drivers/net/wireless/ath/ath10k/ |
D | mac.c | 4486 u32 queues, bool drop) in ath10k_flush() argument 5566 ar->hw->queues = 4; in ath10k_mac_register()
|
/linux-4.1.27/Documentation/power/ |
D | devices.txt | 243 before reactivating its class I/O queues.
|
D | pci.txt | 1003 device the PM core automatically queues a request to check if the device is
|
/linux-4.1.27/drivers/net/wireless/b43legacy/ |
D | main.c | 3846 hw->queues = 1; /* FIXME: hardware has more queues */ in b43legacy_wireless_init()
|
/linux-4.1.27/net/netfilter/ |
D | Kconfig | 818 As opposed to QUEUE, it supports 65535 different queues,
|
/linux-4.1.27/ |
D | CREDITS | 313 D: POSIX message queues fs (with M. Wronski) 3957 D: POSIX message queues fs (with K. Benedyczak)
|
/linux-4.1.27/drivers/scsi/aic7xxx/ |
D | aic79xx.reg | 3722 * Per "other-id" execution queues. We use an array of
|