/linux-4.4.14/fs/ocfs2/cluster/ |
D | quorum.c | 109 struct o2quo_state *qs = &o2quo_state; in o2quo_make_decision() local 111 spin_lock(&qs->qs_lock); in o2quo_make_decision() 113 lowest_hb = find_first_bit(qs->qs_hb_bm, O2NM_MAX_NODES); in o2quo_make_decision() 115 lowest_reachable = test_bit(lowest_hb, qs->qs_conn_bm); in o2quo_make_decision() 118 "lowest: %d (%sreachable)\n", qs->qs_heartbeating, in o2quo_make_decision() 119 qs->qs_connected, lowest_hb, lowest_reachable ? "" : "un"); in o2quo_make_decision() 121 if (!test_bit(o2nm_this_node(), qs->qs_hb_bm) || in o2quo_make_decision() 122 qs->qs_heartbeating == 1) in o2quo_make_decision() 125 if (qs->qs_heartbeating & 1) { in o2quo_make_decision() 128 quorum = (qs->qs_heartbeating + 1)/2; in o2quo_make_decision() [all …]
|
/linux-4.4.14/drivers/net/ethernet/cavium/thunder/ |
D | nicvf_queues.c | 234 struct queue_set *qs = nic->qs; in nicvf_refill_rbdr() local 235 int rbdr_idx = qs->rbdr_cnt; in nicvf_refill_rbdr() 247 rbdr = &qs->rbdr[rbdr_idx]; in nicvf_refill_rbdr() 256 if (qcount >= (qs->rbdr_len - 1)) in nicvf_refill_rbdr() 259 refill_rb_cnt = qs->rbdr_len - qcount - 1; in nicvf_refill_rbdr() 396 struct queue_set *qs, int qidx) in nicvf_reclaim_snd_queue() argument 408 struct queue_set *qs, int qidx) in nicvf_reclaim_rcv_queue() argument 418 struct queue_set *qs, int qidx) in nicvf_reclaim_cmp_queue() argument 500 static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, in nicvf_rcv_queue_config() argument 507 rq = &qs->rq[qidx]; in nicvf_rcv_queue_config() [all …]
|
D | nicvf_main.c | 289 mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt; in nicvf_config_cpi() 415 nic->snicvf[sqs]->qs->rq_cnt = MAX_RCV_QUEUES_PER_QS; in nicvf_request_sqs() 418 nic->snicvf[sqs]->qs->rq_cnt = rx_queues; in nicvf_request_sqs() 423 nic->snicvf[sqs]->qs->sq_cnt = MAX_SND_QUEUES_PER_QS; in nicvf_request_sqs() 426 nic->snicvf[sqs]->qs->sq_cnt = tx_queues; in nicvf_request_sqs() 430 nic->snicvf[sqs]->qs->cq_cnt = in nicvf_request_sqs() 431 max(nic->snicvf[sqs]->qs->rq_cnt, nic->snicvf[sqs]->qs->sq_cnt); in nicvf_request_sqs() 517 sq = &nic->qs->sq[cqe_tx->sq_idx]; in nicvf_snd_pkt_handler() 640 struct queue_set *qs = nic->qs; in nicvf_cq_intr_handler() local 641 struct cmp_queue *cq = &qs->cq[cq_idx]; in nicvf_cq_intr_handler() [all …]
|
D | nicvf_ethtool.c | 171 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { in nicvf_get_qset_strings() 179 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { in nicvf_get_qset_strings() 236 (nic->qs->rq_cnt + nic->qs->sq_cnt); in nicvf_get_sset_count() 244 (snic->qs->rq_cnt + snic->qs->sq_cnt); in nicvf_get_sset_count() 260 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) { in nicvf_get_qset_stats() 263 *((*data)++) = ((u64 *)&nic->qs->rq[qidx].stats) in nicvf_get_qset_stats() 267 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) { in nicvf_get_qset_stats() 270 *((*data)++) = ((u64 *)&nic->qs->sq[qidx].stats) in nicvf_get_qset_stats() 418 struct queue_set *qs = nic->qs; in nicvf_get_ringparam() local 421 ring->rx_pending = qs->rbdr_len; in nicvf_get_ringparam() [all …]
|
D | nic.h | 269 struct queue_set *qs; member 468 struct qs_cfg_msg qs; member
|
D | nicvf_queues.h | 315 void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
|
D | nic_main.c | 670 (mbx.qs.num << NIC_QS_ID_SHIFT); in nic_handle_mbx_intr() 671 cfg = mbx.qs.cfg; in nic_handle_mbx_intr() 706 nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq); in nic_handle_mbx_intr()
|
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb3/ |
D | sge.c | 726 static void init_qset_cntxt(struct sge_qset *qs, unsigned int id) in init_qset_cntxt() argument 728 qs->rspq.cntxt_id = id; in init_qset_cntxt() 729 qs->fl[0].cntxt_id = 2 * id; in init_qset_cntxt() 730 qs->fl[1].cntxt_id = 2 * id + 1; in init_qset_cntxt() 731 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id; in init_qset_cntxt() 732 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id; in init_qset_cntxt() 733 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id; in init_qset_cntxt() 734 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id; in init_qset_cntxt() 735 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id; in init_qset_cntxt() 1207 struct sge_qset *qs, struct sge_txq *q) in t3_stop_tx_queue() argument [all …]
|
D | cxgb3_main.c | 411 adap->sge.qs[qidx]. in request_msix_data_irqs() 414 &adap->sge.qs[qidx]); in request_msix_data_irqs() 418 &adap->sge.qs[qidx]); in request_msix_data_irqs() 438 &adapter->sge.qs[i]); in free_irq_resources() 448 while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) { in await_mgmt_replies() 461 unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts; in init_tp_parity() 600 struct sge_qset *qs = &adap->sge.qs[i]; in ring_dbs() local 602 if (qs->adap) in ring_dbs() 604 t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id)); in ring_dbs() 613 struct sge_qset *qs = &adap->sge.qs[i]; in init_napi() local [all …]
|
D | adapter.h | 68 struct sge_qset *qs; member 217 struct sge_qset qs[SGE_QSETS]; member 325 void t3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p);
|
/linux-4.4.14/drivers/net/ethernet/chelsio/cxgb4vf/ |
D | cxgb4vf_main.c | 308 int qs, msi; in name_msix_vecs() local 310 for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) { in name_msix_vecs() 312 "%s-%d", dev->name, qs); in name_msix_vecs() 575 int qs; in setup_sge_queues() local 577 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { in setup_sge_queues() 585 netdev_get_tx_queue(dev, qs), in setup_sge_queues() 590 rxq->rspq.idx = qs; in setup_sge_queues() 606 int qs; in setup_sge_queues() local 608 for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) { in setup_sge_queues() 648 int qs, err; in setup_rss() local [all …]
|
D | sge.c | 2546 int qs; in t4vf_free_sge_resources() local 2548 for (qs = 0; qs < adapter->sge.ethqsets; qs++, rxq++, txq++) { in t4vf_free_sge_resources()
|
/linux-4.4.14/fs/qnx4/ |
D | inode.c | 45 struct qnx4_sb_info *qs; in qnx4_remount() local 48 qs = qnx4_sb(sb); in qnx4_remount() 49 qs->Version = QNX4_VERSION; in qnx4_remount() 191 struct qnx4_sb_info *qs; in qnx4_fill_super() local 193 qs = kzalloc(sizeof(struct qnx4_sb_info), GFP_KERNEL); in qnx4_fill_super() 194 if (!qs) in qnx4_fill_super() 196 s->s_fs_info = qs; in qnx4_fill_super() 238 struct qnx4_sb_info *qs = qnx4_sb(sb); in qnx4_kill_sb() local 240 if (qs) { in qnx4_kill_sb() 241 kfree(qs->BitMap); in qnx4_kill_sb() [all …]
|
/linux-4.4.14/drivers/net/ethernet/hisilicon/hns/ |
D | hns_ae_adapt.c | 155 ae_handle->qs = (struct hnae_queue **)(&ae_handle->qs + 1); in hns_ae_get_handle() 157 ae_handle->qs[i] = &ring_pair_cb->q; in hns_ae_get_handle() 158 ae_handle->qs[i]->rx_ring.q = ae_handle->qs[i]; in hns_ae_get_handle() 159 ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i]; in hns_ae_get_handle() 196 hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0; in hns_ae_put_handle() 205 hns_rcb_ring_enable_hw(handle->qs[i], val); in hns_ae_ring_enable_all() 302 hns_rcb_wait_fbd_clean(handle->qs, handle->q_num, RCB_INT_FLAG_TX); in hns_ae_stop() 504 queue = handle->qs[idx]; in hns_ae_update_stats() 583 hns_rcb_get_stats(handle->qs[idx], p); in hns_ae_get_stats() 706 hns_rcb_get_ring_regs(handle->qs[i], p); in hns_ae_get_regs()
|
D | hnae.c | 292 hnae_fini_queue(handle->qs[i]); in hnae_reinit_handle() 298 ret = hnae_init_queue(handle, handle->qs[i], handle->dev); in hnae_reinit_handle() 305 hnae_fini_queue(handle->qs[j]); in hnae_reinit_handle() 341 ret = hnae_init_queue(handle, handle->qs[i], dev); in hnae_get_handle() 354 hnae_fini_queue(handle->qs[j]); in hnae_get_handle() 366 hnae_fini_queue(h->qs[i]); in hnae_put_handle()
|
D | hns_enet.c | 177 hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num); in hns_nic_net_xmit_hw() 914 h->dev->ops->toggle_queue_status(h->qs[k], 1); in hns_nic_net_up() 936 h->dev->ops->toggle_queue_status(h->qs[k], 0); in hns_nic_net_up() 1191 tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes; in hns_nic_get_stats64() 1192 tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts; in hns_nic_get_stats64() 1193 rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes; in hns_nic_get_stats64() 1194 rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts; in hns_nic_get_stats64() 1292 i, h->qs[i]->tx_ring.next_to_clean); in hns_nic_dump() 1294 i, h->qs[i]->tx_ring.next_to_use); in hns_nic_dump() 1296 i, h->qs[i]->rx_ring.next_to_clean); in hns_nic_dump() [all …]
|
D | hns_dsaf_rcb.h | 118 void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
|
D | hns_dsaf_rcb.c | 41 void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag) in hns_rcb_wait_fbd_clean() argument 50 fbd_num += dsaf_read_dev(qs[i], in hns_rcb_wait_fbd_clean() 53 fbd_num += dsaf_read_dev(qs[i], in hns_rcb_wait_fbd_clean() 62 dev_err(qs[i]->handle->owner_dev, in hns_rcb_wait_fbd_clean()
|
D | hnae.h | 475 struct hnae_queue **qs; /* array base of all queues */ member
|
D | hns_ethtool.c | 685 queue = priv->ae_handle->qs[0]; in hns_get_ringparam()
|
/linux-4.4.14/fs/qnx6/ |
D | inode.c | 304 struct qnx6_sb_info *qs; in qnx6_fill_super() local 309 qs = kzalloc(sizeof(struct qnx6_sb_info), GFP_KERNEL); in qnx6_fill_super() 310 if (!qs) in qnx6_fill_super() 312 s->s_fs_info = qs; in qnx6_fill_super() 477 kfree(qs); in qnx6_fill_super() 484 struct qnx6_sb_info *qs = QNX6_SB(sb); in qnx6_put_super() local 485 brelse(qs->sb_buf); in qnx6_put_super() 486 iput(qs->longfile); in qnx6_put_super() 487 iput(qs->inodes); in qnx6_put_super() 488 kfree(qs); in qnx6_put_super()
|
/linux-4.4.14/drivers/md/ |
D | dm-cache-policy-mq.c | 136 struct list_head qs[NR_QUEUE_LEVELS]; member 148 INIT_LIST_HEAD(q->qs + i); in queue_init() 171 list_add_tail(elt, q->qs + level); in queue_push() 195 list_for_each(h, q->qs + level) in queue_peek() 223 list_for_each(h, q->qs + level) { in queue_pop_old() 262 list_add_tail(h, q->qs + i); in queue_update_writeback_sentinels() 281 list_add_tail(q->sentinels + i, q->qs + i); in queue_tick() 292 list_for_each_prev(h, q->qs + i) { in queue_iterate_tick() 649 head = mq->cache_clean.qs + level; in check_generation() 658 head = mq->cache_dirty.qs + level; in check_generation() [all …]
|
D | dm-cache-policy-smq.c | 242 struct ilist qs[MAX_LEVELS]; member 263 l_init(q->qs + i); in q_init() 285 l_add_tail(q->es, q->qs + e->level, e); in q_push() 293 l_add_before(q->es, q->qs + e->level, old, e); in q_push_before() 298 l_del(q->es, q->qs + e->level, e); in q_del() 314 for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) { in q_peek() 361 for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) in __redist_pop_from() 363 l_del(q->es, q->qs + e->level, e); in __redist_pop_from() 420 l = q->qs + level; in q_redistribute() 440 l_above = q->qs + level + 1u; in q_redistribute() [all …]
|
/linux-4.4.14/Documentation/RCU/ |
D | trace.txt | 59 …0!c=30455 g=30456 cnq=1/0:1 dt=126535/140000000000000/0 df=2002 of=4 ql=0/0 qs=N... b=10 ci=74572 … 60 …1!c=30719 g=30720 cnq=1/0:0 dt=132007/140000000000000/0 df=1874 of=10 ql=0/0 qs=N... b=10 ci=12320… 61 …2!c=30150 g=30151 cnq=1/1:1 dt=138537/140000000000000/0 df=1707 of=8 ql=0/0 qs=N... b=10 ci=80132 … 62 …3 c=31249 g=31250 cnq=1/1:0 dt=107255/140000000000000/0 df=1749 of=6 ql=0/450 qs=NRW. b=10 ci=1517… 63 …4!c=29502 g=29503 cnq=1/0:1 dt=83647/140000000000000/0 df=965 of=5 ql=0/0 qs=N... b=10 ci=65643 nc… 64 …5 c=31201 g=31202 cnq=1/0:1 dt=70422/0/0 df=535 of=7 ql=0/0 qs=.... b=10 ci=58500 nci=0 co=764 ca=… 65 …6!c=30253 g=30254 cnq=1/0:1 dt=95363/140000000000000/0 df=780 of=5 ql=0/0 qs=N... b=10 ci=100607 n… 66 …7 c=31178 g=31178 cnq=1/0:0 dt=91536/0/0 df=547 of=4 ql=0/0 qs=.... b=10 ci=109819 nci=0 co=1115 c… 140 o "qs" gives an indication of the state of the callback queue 191 …0!c=12865 g=12866 cnq=1/0:1 dt=83113/140000000000000/0 df=288 of=11 ql=0/0 qs=N... kt=0/O ktl=944 … [all …]
|
/linux-4.4.14/net/sched/ |
D | sch_fq_codel.c | 561 struct gnet_stats_queue qs = { 0 }; in fq_codel_dump_class_stats() local 585 qs.qlen++; in fq_codel_dump_class_stats() 588 qs.backlog = q->backlogs[idx]; in fq_codel_dump_class_stats() 589 qs.drops = flow->dropped; in fq_codel_dump_class_stats() 591 if (gnet_stats_copy_queue(d, NULL, &qs, 0) < 0) in fq_codel_dump_class_stats()
|
D | sch_sfq.c | 843 struct gnet_stats_queue qs = { 0 }; in sfq_dump_class_stats() local 850 qs.qlen = slot->qlen; in sfq_dump_class_stats() 851 qs.backlog = slot->backlog; in sfq_dump_class_stats() 853 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0) in sfq_dump_class_stats()
|
/linux-4.4.14/include/uapi/sound/ |
D | hdspm.h | 39 qs enumerator
|
/linux-4.4.14/drivers/net/ethernet/brocade/bna/ |
D | bfi_enet.h | 502 struct bfi_enet_rxq qs; /* small/header buffers */ member
|
D | bna_tx_rx.c | 1643 bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q, in bna_bfi_rx_enet_start() 1645 cfg_req->q_cfg[i].qs.rx_buffer_size = in bna_bfi_rx_enet_start()
|
/linux-4.4.14/sound/pci/rme9652/ |
D | hdspm.c | 6210 levels->speed = qs; in snd_hdspm_hwdep_ioctl()
|