sq 59 arch/sh/kernel/cpu/sh4/sq.c unsigned long *sq = (unsigned long *)start; sq 62 arch/sh/kernel/cpu/sh4/sq.c for (len >>= 5; len--; sq += 8) sq 63 arch/sh/kernel/cpu/sh4/sq.c prefetchw(sq); sq 249 block/blk-throttle.c static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq) sq 251 block/blk-throttle.c if (sq && sq->parent_sq) sq 252 block/blk-throttle.c return container_of(sq, struct throtl_grp, service_queue); sq 264 block/blk-throttle.c static struct throtl_data *sq_to_td(struct throtl_service_queue *sq) sq 266 block/blk-throttle.c struct throtl_grp *tg = sq_to_tg(sq); sq 271 block/blk-throttle.c return container_of(sq, struct throtl_data, service_queue); sq 366 block/blk-throttle.c #define throtl_log(sq, fmt, args...) do { \ sq 367 block/blk-throttle.c struct throtl_grp *__tg = sq_to_tg((sq)); \ sq 368 block/blk-throttle.c struct throtl_data *__td = sq_to_td((sq)); \ sq 473 block/blk-throttle.c static void throtl_service_queue_init(struct throtl_service_queue *sq) sq 475 block/blk-throttle.c INIT_LIST_HEAD(&sq->queued[0]); sq 476 block/blk-throttle.c INIT_LIST_HEAD(&sq->queued[1]); sq 477 block/blk-throttle.c sq->pending_tree = RB_ROOT_CACHED; sq 478 block/blk-throttle.c timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0); sq 523 block/blk-throttle.c struct throtl_service_queue *sq = &tg->service_queue; sq 538 block/blk-throttle.c sq->parent_sq = &td->service_queue; sq 540 block/blk-throttle.c sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue; sq 703 block/blk-throttle.c static void throtl_schedule_pending_timer(struct throtl_service_queue *sq, sq 706 block/blk-throttle.c unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice; sq 717 block/blk-throttle.c mod_timer(&sq->pending_timer, expires); sq 718 block/blk-throttle.c throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu", sq 740 block/blk-throttle.c static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq, sq 744 block/blk-throttle.c if (!sq->nr_pending) sq 747 block/blk-throttle.c update_min_dispatch_time(sq); sq 750 block/blk-throttle.c if (force || time_after(sq->first_pending_disptime, jiffies)) { sq 751 block/blk-throttle.c throtl_schedule_pending_timer(sq, sq->first_pending_disptime); sq 1057 block/blk-throttle.c struct throtl_service_queue *sq = &tg->service_queue; sq 1069 block/blk-throttle.c if (!sq->nr_queued[rw]) sq 1072 block/blk-throttle.c throtl_qnode_add_bio(bio, qn, &sq->queued[rw]); sq 1074 block/blk-throttle.c sq->nr_queued[rw]++; sq 1080 block/blk-throttle.c struct throtl_service_queue *sq = &tg->service_queue; sq 1084 block/blk-throttle.c bio = throtl_peek_queued(&sq->queued[READ]); sq 1088 block/blk-throttle.c bio = throtl_peek_queued(&sq->queued[WRITE]); sq 1116 block/blk-throttle.c struct throtl_service_queue *sq = &tg->service_queue; sq 1117 block/blk-throttle.c struct throtl_service_queue *parent_sq = sq->parent_sq; sq 1128 block/blk-throttle.c bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put); sq 1129 block/blk-throttle.c sq->nr_queued[rw]--; sq 1158 block/blk-throttle.c struct throtl_service_queue *sq = &tg->service_queue; sq 1166 block/blk-throttle.c while ((bio = throtl_peek_queued(&sq->queued[READ])) && sq 1176 block/blk-throttle.c while ((bio = throtl_peek_queued(&sq->queued[WRITE])) && sq 1195 block/blk-throttle.c struct throtl_service_queue *sq; sq 1207 block/blk-throttle.c sq = &tg->service_queue; sq 1208 block/blk-throttle.c if (sq->nr_queued[0] || sq->nr_queued[1]) sq 1237 block/blk-throttle.c struct throtl_service_queue *sq = from_timer(sq, t, pending_timer); sq 1238 block/blk-throttle.c struct throtl_grp *tg = sq_to_tg(sq); sq 1239 block/blk-throttle.c struct throtl_data *td = sq_to_td(sq); sq 1250 block/blk-throttle.c parent_sq = sq->parent_sq; sq 1254 block/blk-throttle.c throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u", sq 1255 block/blk-throttle.c sq->nr_queued[READ] + sq->nr_queued[WRITE], sq 1256 block/blk-throttle.c sq->nr_queued[READ], sq->nr_queued[WRITE]); sq 1258 block/blk-throttle.c ret = throtl_select_dispatch(sq); sq 1260 block/blk-throttle.c throtl_log(sq, "bios disp=%u", ret); sq 1264 block/blk-throttle.c if (throtl_schedule_next_dispatch(sq, false)) sq 1282 block/blk-throttle.c sq = parent_sq; sq 1283 block/blk-throttle.c tg = sq_to_tg(sq); sq 1368 block/blk-throttle.c struct throtl_service_queue *sq = &tg->service_queue; sq 1418 block/blk-throttle.c throtl_schedule_next_dispatch(sq->parent_sq, true); sq 1806 block/blk-throttle.c struct throtl_service_queue *sq = &tg->service_queue; sq 1817 block/blk-throttle.c if (read_limit && sq->nr_queued[READ] && sq 1818 block/blk-throttle.c (!write_limit || sq->nr_queued[WRITE])) sq 1820 block/blk-throttle.c if (write_limit && sq->nr_queued[WRITE] && sq 1821 block/blk-throttle.c (!read_limit || sq->nr_queued[READ])) sq 1904 block/blk-throttle.c struct throtl_service_queue *sq = &tg->service_queue; sq 1907 block/blk-throttle.c throtl_select_dispatch(sq); sq 1908 block/blk-throttle.c throtl_schedule_next_dispatch(sq, true); sq 2122 block/blk-throttle.c struct throtl_service_queue *sq; sq 2139 block/blk-throttle.c sq = &tg->service_queue; sq 2148 block/blk-throttle.c if (sq->nr_queued[rw]) sq 2183 block/blk-throttle.c sq = sq->parent_sq; sq 2184 block/blk-throttle.c tg = sq_to_tg(sq); sq 2190 block/blk-throttle.c throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d", sq 2195 block/blk-throttle.c sq->nr_queued[READ], sq->nr_queued[WRITE]); sq 2318 block/blk-throttle.c struct throtl_service_queue *sq = &tg->service_queue; sq 2323 block/blk-throttle.c while ((bio = throtl_peek_queued(&sq->queued[READ]))) sq 2325 block/blk-throttle.c while ((bio = throtl_peek_queued(&sq->queued[WRITE]))) sq 849 drivers/infiniband/hw/bnxt_re/ib_verbs.c bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE); sq 855 drivers/infiniband/hw/bnxt_re/ib_verbs.c bytes += (qplib_qp->sq.max_wqe * psn_sz); sq 863 drivers/infiniband/hw/bnxt_re/ib_verbs.c qplib_qp->sq.sg_info.sglist = umem->sg_head.sgl; sq 864 drivers/infiniband/hw/bnxt_re/ib_verbs.c qplib_qp->sq.sg_info.npages = ib_umem_num_pages(umem); sq 865 drivers/infiniband/hw/bnxt_re/ib_verbs.c qplib_qp->sq.sg_info.nmap = umem->nmap; sq 886 drivers/infiniband/hw/bnxt_re/ib_verbs.c memset(&qplib_qp->sq.sg_info, 0, sizeof(qplib_qp->sq.sg_info)); sq 964 drivers/infiniband/hw/bnxt_re/ib_verbs.c qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe; sq 965 drivers/infiniband/hw/bnxt_re/ib_verbs.c qp->qplib_qp.sq.max_sge = 2; sq 967 drivers/infiniband/hw/bnxt_re/ib_verbs.c qp->qplib_qp.sq.q_full_delta = 1; sq 1044 drivers/infiniband/hw/bnxt_re/ib_verbs.c qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge; sq 1045 drivers/infiniband/hw/bnxt_re/ib_verbs.c if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges) sq 1046 drivers/infiniband/hw/bnxt_re/ib_verbs.c qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges; sq 1104 drivers/infiniband/hw/bnxt_re/ib_verbs.c qp->qplib_qp.sq.max_wqe = min_t(u32, entries, sq 1106 drivers/infiniband/hw/bnxt_re/ib_verbs.c qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe - sq 1111 drivers/infiniband/hw/bnxt_re/ib_verbs.c qp->qplib_qp.sq.max_sge++; sq 1112 drivers/infiniband/hw/bnxt_re/ib_verbs.c if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges) sq 1113 drivers/infiniband/hw/bnxt_re/ib_verbs.c qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges; sq 1150 drivers/infiniband/hw/bnxt_re/ib_verbs.c qp->qplib_qp.sq.max_wqe = min_t(u32, entries, sq 1153 drivers/infiniband/hw/bnxt_re/ib_verbs.c qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1; sq 1161 drivers/infiniband/hw/bnxt_re/ib_verbs.c qp->qplib_qp.sq.q_full_delta -= 1; sq 1525 drivers/infiniband/hw/bnxt_re/ib_verbs.c qp->qplib_qp.sq.psn = qp1_qp->qplib_qp.sq.psn; sq 1704 drivers/infiniband/hw/bnxt_re/ib_verbs.c qp->qplib_qp.sq.psn = qp_attr->sq_psn; sq 1738 drivers/infiniband/hw/bnxt_re/ib_verbs.c qp->qplib_qp.sq.max_wqe = min_t(u32, entries, sq 1740 drivers/infiniband/hw/bnxt_re/ib_verbs.c qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe - sq 1747 drivers/infiniband/hw/bnxt_re/ib_verbs.c qp->qplib_qp.sq.q_full_delta -= 1; sq 1748 drivers/infiniband/hw/bnxt_re/ib_verbs.c qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge; sq 1815 drivers/infiniband/hw/bnxt_re/ib_verbs.c qp_attr->sq_psn = qplib_qp->sq.psn; sq 1821 drivers/infiniband/hw/bnxt_re/ib_verbs.c qp_attr->cap.max_send_wr = qp->qplib_qp.sq.max_wqe; sq 1822 drivers/infiniband/hw/bnxt_re/ib_verbs.c qp_attr->cap.max_send_sge = qp->qplib_qp.sq.max_sge; sq 2290 drivers/infiniband/hw/bnxt_re/ib_verbs.c if (wr->num_sge > qp->qplib_qp.sq.max_sge) { sq 2339 drivers/infiniband/hw/bnxt_re/ib_verbs.c if (wr->num_sge > qp->qplib_qp.sq.max_sge) { sq 3102 drivers/infiniband/hw/bnxt_re/ib_verbs.c lib_qp->sq.phantom_wqe_cnt++; sq 3103 drivers/infiniband/hw/bnxt_re/ib_verbs.c dev_dbg(&lib_qp->sq.hwq.pdev->dev, sq 3105 drivers/infiniband/hw/bnxt_re/ib_verbs.c lib_qp->id, lib_qp->sq.hwq.prod, sq 3106 drivers/infiniband/hw/bnxt_re/ib_verbs.c HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq), sq 3107 drivers/infiniband/hw/bnxt_re/ib_verbs.c lib_qp->sq.phantom_wqe_cnt); sq 3120 drivers/infiniband/hw/bnxt_re/ib_verbs.c struct bnxt_qplib_q *sq; sq 3138 drivers/infiniband/hw/bnxt_re/ib_verbs.c sq = &lib_qp->sq; sq 3139 drivers/infiniband/hw/bnxt_re/ib_verbs.c if (sq->send_phantom) { sq 3146 drivers/infiniband/hw/bnxt_re/ib_verbs.c sq->send_phantom = false; sq 62 drivers/infiniband/hw/bnxt_re/qplib_fp.c qp->sq.condition = false; sq 63 drivers/infiniband/hw/bnxt_re/qplib_fp.c qp->sq.send_phantom = false; sq 64 drivers/infiniband/hw/bnxt_re/qplib_fp.c qp->sq.single = false; sq 75 drivers/infiniband/hw/bnxt_re/qplib_fp.c if (!qp->sq.flushed) { sq 80 drivers/infiniband/hw/bnxt_re/qplib_fp.c qp->sq.flushed = true; sq 125 drivers/infiniband/hw/bnxt_re/qplib_fp.c if (qp->sq.flushed) { sq 126 drivers/infiniband/hw/bnxt_re/qplib_fp.c qp->sq.flushed = false; sq 143 drivers/infiniband/hw/bnxt_re/qplib_fp.c qp->sq.hwq.prod = 0; sq 144 drivers/infiniband/hw/bnxt_re/qplib_fp.c qp->sq.hwq.cons = 0; sq 178 drivers/infiniband/hw/bnxt_re/qplib_fp.c struct bnxt_qplib_q *sq = &qp->sq; sq 186 drivers/infiniband/hw/bnxt_re/qplib_fp.c sq->hwq.max_elements * qp->sq_hdr_buf_size, sq 200 drivers/infiniband/hw/bnxt_re/qplib_fp.c struct bnxt_qplib_q *sq = &qp->sq; sq 203 drivers/infiniband/hw/bnxt_re/qplib_fp.c if (qp->sq_hdr_buf_size && sq->hwq.max_elements) { sq 205 drivers/infiniband/hw/bnxt_re/qplib_fp.c sq->hwq.max_elements * sq 728 drivers/infiniband/hw/bnxt_re/qplib_fp.c struct bnxt_qplib_q *sq = &qp->sq; sq 742 drivers/infiniband/hw/bnxt_re/qplib_fp.c sq->hwq.max_elements = sq->max_wqe; sq 743 drivers/infiniband/hw/bnxt_re/qplib_fp.c rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, NULL, sq 744 drivers/infiniband/hw/bnxt_re/qplib_fp.c &sq->hwq.max_elements, sq 750 drivers/infiniband/hw/bnxt_re/qplib_fp.c sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL); sq 751 drivers/infiniband/hw/bnxt_re/qplib_fp.c if (!sq->swq) { sq 755 drivers/infiniband/hw/bnxt_re/qplib_fp.c pbl = &sq->hwq.pbl[PBL_LVL_0]; sq 758 drivers/infiniband/hw/bnxt_re/qplib_fp.c ((sq->hwq.level & CMDQ_CREATE_QP1_SQ_LVL_MASK) sq 824 drivers/infiniband/hw/bnxt_re/qplib_fp.c req.sq_size = cpu_to_le32(sq->hwq.max_elements); sq 828 drivers/infiniband/hw/bnxt_re/qplib_fp.c cpu_to_le16((sq->max_sge & CMDQ_CREATE_QP1_SQ_SGE_MASK) << sq 854 drivers/infiniband/hw/bnxt_re/qplib_fp.c bnxt_qplib_free_hwq(res->pdev, &sq->hwq); sq 855 drivers/infiniband/hw/bnxt_re/qplib_fp.c kfree(sq->swq); sq 865 drivers/infiniband/hw/bnxt_re/qplib_fp.c struct bnxt_qplib_q *sq = &qp->sq; sq 890 drivers/infiniband/hw/bnxt_re/qplib_fp.c sq->hwq.max_elements = sq->max_wqe; sq 891 drivers/infiniband/hw/bnxt_re/qplib_fp.c rc = bnxt_qplib_alloc_init_hwq(res->pdev, &sq->hwq, &sq->sg_info, sq 892 drivers/infiniband/hw/bnxt_re/qplib_fp.c &sq->hwq.max_elements, sq 899 drivers/infiniband/hw/bnxt_re/qplib_fp.c sq->swq = kcalloc(sq->hwq.max_elements, sizeof(*sq->swq), GFP_KERNEL); sq 900 drivers/infiniband/hw/bnxt_re/qplib_fp.c if (!sq->swq) { sq 904 drivers/infiniband/hw/bnxt_re/qplib_fp.c hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr; sq 908 drivers/infiniband/hw/bnxt_re/qplib_fp.c (sq->hwq.max_elements)]; sq 910 drivers/infiniband/hw/bnxt_re/qplib_fp.c &hw_sq_send_ptr[get_sqe_pg(sq->hwq.max_elements)] sq 911 drivers/infiniband/hw/bnxt_re/qplib_fp.c [get_sqe_idx(sq->hwq.max_elements)]; sq 919 drivers/infiniband/hw/bnxt_re/qplib_fp.c for (i = 0; i < sq->hwq.max_elements; i++) { sq 920 drivers/infiniband/hw/bnxt_re/qplib_fp.c sq->swq[i].psn_search = sq 924 drivers/infiniband/hw/bnxt_re/qplib_fp.c sq->swq[i].psn_ext = sq 930 drivers/infiniband/hw/bnxt_re/qplib_fp.c pbl = &sq->hwq.pbl[PBL_LVL_0]; sq 933 drivers/infiniband/hw/bnxt_re/qplib_fp.c ((sq->hwq.level & CMDQ_CREATE_QP_SQ_LVL_MASK) sq 1003 drivers/infiniband/hw/bnxt_re/qplib_fp.c req.sq_size = cpu_to_le32(sq->hwq.max_elements); sq 1016 drivers/infiniband/hw/bnxt_re/qplib_fp.c max_ssge = qp->max_inline_data ? 6 : sq->max_sge; sq 1087 drivers/infiniband/hw/bnxt_re/qplib_fp.c bnxt_qplib_free_hwq(res->pdev, &sq->hwq); sq 1088 drivers/infiniband/hw/bnxt_re/qplib_fp.c kfree(sq->swq); sq 1260 drivers/infiniband/hw/bnxt_re/qplib_fp.c req.sq_psn = cpu_to_le32(qp->sq.psn); sq 1270 drivers/infiniband/hw/bnxt_re/qplib_fp.c req.sq_size = cpu_to_le32(qp->sq.hwq.max_elements); sq 1272 drivers/infiniband/hw/bnxt_re/qplib_fp.c req.sq_sge = cpu_to_le16(qp->sq.max_sge); sq 1355 drivers/infiniband/hw/bnxt_re/qplib_fp.c qp->sq.psn = le32_to_cpu(sb->sq_psn); sq 1358 drivers/infiniband/hw/bnxt_re/qplib_fp.c qp->sq.max_wqe = qp->sq.hwq.max_elements; sq 1360 drivers/infiniband/hw/bnxt_re/qplib_fp.c qp->sq.max_sge = le16_to_cpu(sb->sq_sge); sq 1443 drivers/infiniband/hw/bnxt_re/qplib_fp.c bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq); sq 1444 drivers/infiniband/hw/bnxt_re/qplib_fp.c kfree(qp->sq.swq); sq 1459 drivers/infiniband/hw/bnxt_re/qplib_fp.c struct bnxt_qplib_q *sq = &qp->sq; sq 1465 drivers/infiniband/hw/bnxt_re/qplib_fp.c sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq); sq 1508 drivers/infiniband/hw/bnxt_re/qplib_fp.c struct bnxt_qplib_q *sq = &qp->sq; sq 1515 drivers/infiniband/hw/bnxt_re/qplib_fp.c sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq); sq 1524 drivers/infiniband/hw/bnxt_re/qplib_fp.c struct bnxt_qplib_q *sq = &qp->sq; sq 1538 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_dbg(&sq->hwq.pdev->dev, sq 1545 drivers/infiniband/hw/bnxt_re/qplib_fp.c if (bnxt_qplib_queue_full(sq)) { sq 1546 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_err(&sq->hwq.pdev->dev, sq 1548 drivers/infiniband/hw/bnxt_re/qplib_fp.c sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements, sq 1549 drivers/infiniband/hw/bnxt_re/qplib_fp.c sq->q_full_delta); sq 1553 drivers/infiniband/hw/bnxt_re/qplib_fp.c sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq); sq 1554 drivers/infiniband/hw/bnxt_re/qplib_fp.c swq = &sq->swq[sw_prod]; sq 1560 drivers/infiniband/hw/bnxt_re/qplib_fp.c swq->start_psn = sq->psn & BTH_PSN_MASK; sq 1562 drivers/infiniband/hw/bnxt_re/qplib_fp.c hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr; sq 1571 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_warn(&sq->hwq.pdev->dev, sq 1637 drivers/infiniband/hw/bnxt_re/qplib_fp.c sq->psn = (sq->psn + 1) & BTH_PSN_MASK; sq 1646 drivers/infiniband/hw/bnxt_re/qplib_fp.c sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK; sq 1668 drivers/infiniband/hw/bnxt_re/qplib_fp.c sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK; sq 1686 drivers/infiniband/hw/bnxt_re/qplib_fp.c sq->psn = (sq->psn + pkt_num) & BTH_PSN_MASK; sq 1752 drivers/infiniband/hw/bnxt_re/qplib_fp.c swq->next_psn = sq->psn & BTH_PSN_MASK; sq 1778 drivers/infiniband/hw/bnxt_re/qplib_fp.c sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq); sq 1779 drivers/infiniband/hw/bnxt_re/qplib_fp.c swq = &sq->swq[sw_prod]; sq 1785 drivers/infiniband/hw/bnxt_re/qplib_fp.c swq->start_psn = sq->psn & BTH_PSN_MASK; sq 1787 drivers/infiniband/hw/bnxt_re/qplib_fp.c sq->hwq.prod++; sq 1799 drivers/infiniband/hw/bnxt_re/qplib_fp.c dev_err(&sq->hwq.pdev->dev, sq 2015 drivers/infiniband/hw/bnxt_re/qplib_fp.c static int __flush_sq(struct bnxt_qplib_q *sq, struct bnxt_qplib_qp *qp, sq 2023 drivers/infiniband/hw/bnxt_re/qplib_fp.c sw_prod = HWQ_CMP(sq->hwq.prod, &sq->hwq); sq 2026 drivers/infiniband/hw/bnxt_re/qplib_fp.c sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq); sq 2031 drivers/infiniband/hw/bnxt_re/qplib_fp.c if (sq->swq[sw_cons].wr_id == BNXT_QPLIB_FENCE_WRID) { sq 2039 drivers/infiniband/hw/bnxt_re/qplib_fp.c cqe->wr_id = sq->swq[sw_cons].wr_id; sq 2041 drivers/infiniband/hw/bnxt_re/qplib_fp.c cqe->type = sq->swq[sw_cons].type; sq 2045 drivers/infiniband/hw/bnxt_re/qplib_fp.c sq->hwq.cons++; sq 2048 drivers/infiniband/hw/bnxt_re/qplib_fp.c if (!(*budget) && HWQ_CMP(sq->hwq.cons, &sq->hwq) != sw_prod) sq 2119 drivers/infiniband/hw/bnxt_re/qplib_fp.c struct bnxt_qplib_q *sq = &qp->sq; sq 2130 drivers/infiniband/hw/bnxt_re/qplib_fp.c swq = &sq->swq[sw_sq_cons]; sq 2140 drivers/infiniband/hw/bnxt_re/qplib_fp.c sq->condition = true; sq 2141 drivers/infiniband/hw/bnxt_re/qplib_fp.c sq->send_phantom = true; sq 2149 drivers/infiniband/hw/bnxt_re/qplib_fp.c if (sq->condition) { sq 2177 drivers/infiniband/hw/bnxt_re/qplib_fp.c peek_sq = &peek_qp->sq; sq 2180 drivers/infiniband/hw/bnxt_re/qplib_fp.c , &sq->hwq); sq 2182 drivers/infiniband/hw/bnxt_re/qplib_fp.c if (peek_sq == sq && sq 2183 drivers/infiniband/hw/bnxt_re/qplib_fp.c sq->swq[peek_sq_cons_idx].wr_id == sq 2191 drivers/infiniband/hw/bnxt_re/qplib_fp.c sq->condition = false; sq 2192 drivers/infiniband/hw/bnxt_re/qplib_fp.c sq->single = true; sq 2221 drivers/infiniband/hw/bnxt_re/qplib_fp.c struct bnxt_qplib_q *sq; sq 2234 drivers/infiniband/hw/bnxt_re/qplib_fp.c sq = &qp->sq; sq 2236 drivers/infiniband/hw/bnxt_re/qplib_fp.c cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq); sq 2237 drivers/infiniband/hw/bnxt_re/qplib_fp.c if (cqe_sq_cons > sq->hwq.max_elements) { sq 2240 drivers/infiniband/hw/bnxt_re/qplib_fp.c cqe_sq_cons, sq->hwq.max_elements); sq 2244 drivers/infiniband/hw/bnxt_re/qplib_fp.c if (qp->sq.flushed) { sq 2255 drivers/infiniband/hw/bnxt_re/qplib_fp.c sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq); sq 2260 drivers/infiniband/hw/bnxt_re/qplib_fp.c swq = &sq->swq[sw_sq_cons]; sq 2274 drivers/infiniband/hw/bnxt_re/qplib_fp.c if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons && sq 2299 drivers/infiniband/hw/bnxt_re/qplib_fp.c sq->hwq.cons++; sq 2300 drivers/infiniband/hw/bnxt_re/qplib_fp.c if (sq->single) sq 2305 drivers/infiniband/hw/bnxt_re/qplib_fp.c if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) { sq 2314 drivers/infiniband/hw/bnxt_re/qplib_fp.c sq->single = false; sq 2595 drivers/infiniband/hw/bnxt_re/qplib_fp.c struct bnxt_qplib_q *sq, *rq; sq 2617 drivers/infiniband/hw/bnxt_re/qplib_fp.c sq = &qp->sq; sq 2624 drivers/infiniband/hw/bnxt_re/qplib_fp.c if (cqe_cons > sq->hwq.max_elements) { sq 2627 drivers/infiniband/hw/bnxt_re/qplib_fp.c cqe_cons, sq->hwq.max_elements); sq 2631 drivers/infiniband/hw/bnxt_re/qplib_fp.c if (qp->sq.flushed) { sq 2643 drivers/infiniband/hw/bnxt_re/qplib_fp.c sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq); sq 2646 drivers/infiniband/hw/bnxt_re/qplib_fp.c if (sq->swq[sw_cons].flags & SQ_SEND_FLAGS_SIGNAL_COMP) { sq 2652 drivers/infiniband/hw/bnxt_re/qplib_fp.c cqe->wr_id = sq->swq[sw_cons].wr_id; sq 2653 drivers/infiniband/hw/bnxt_re/qplib_fp.c cqe->type = sq->swq[sw_cons].type; sq 2657 drivers/infiniband/hw/bnxt_re/qplib_fp.c sq->hwq.cons++; sq 2724 drivers/infiniband/hw/bnxt_re/qplib_fp.c __flush_sq(&qp->sq, qp, &cqe, &budget); sq 288 drivers/infiniband/hw/bnxt_re/qplib_fp.h struct bnxt_qplib_q sq; sq 275 drivers/infiniband/hw/cxgb3/cxio_hal.c wq->sq = kcalloc(depth, sizeof(struct t3_swsq), GFP_KERNEL); sq 276 drivers/infiniband/hw/cxgb3/cxio_hal.c if (!wq->sq) sq 295 drivers/infiniband/hw/cxgb3/cxio_hal.c kfree(wq->sq); sq 323 drivers/infiniband/hw/cxgb3/cxio_hal.c kfree(wq->sq); sq 391 drivers/infiniband/hw/cxgb3/cxio_hal.c struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); sq 397 drivers/infiniband/hw/cxgb3/cxio_hal.c sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); sq 1048 drivers/infiniband/hw/cxgb3/cxio_hal.c sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); sq 1052 drivers/infiniband/hw/cxgb3/cxio_hal.c sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2); sq 1088 drivers/infiniband/hw/cxgb3/cxio_hal.c u32 rptr = wq->oldest_read - wq->sq + 1; sq 1092 drivers/infiniband/hw/cxgb3/cxio_hal.c wq->oldest_read = wq->sq + Q_PTR2IDX(rptr, wq->sq_size_log2); sq 1253 drivers/infiniband/hw/cxgb3/cxio_hal.c sqp = wq->sq + sq 1272 drivers/infiniband/hw/cxgb3/cxio_hal.c *cookie = wq->sq[Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2)].wr_id; sq 699 drivers/infiniband/hw/cxgb3/cxio_wr.h struct t3_swsq *sq; /* SW SQ */ sq 392 drivers/infiniband/hw/cxgb3/iwch_qp.c sqp = qhp->wq.sq + sq 1879 drivers/infiniband/hw/cxgb4/cm.c __func__, ep->com.qp->wq.sq.qid, ep, sq 3032 drivers/infiniband/hw/cxgb4/cm.c ep->com.qp->wq.sq.qid); sq 195 drivers/infiniband/hw/cxgb4/cq.c CQE_QPID_V(wq->sq.qid)); sq 229 drivers/infiniband/hw/cxgb4/cq.c CQE_QPID_V(wq->sq.qid)); sq 247 drivers/infiniband/hw/cxgb4/cq.c if (wq->sq.flush_cidx == -1) sq 248 drivers/infiniband/hw/cxgb4/cq.c wq->sq.flush_cidx = wq->sq.cidx; sq 249 drivers/infiniband/hw/cxgb4/cq.c idx = wq->sq.flush_cidx; sq 250 drivers/infiniband/hw/cxgb4/cq.c while (idx != wq->sq.pidx) { sq 251 drivers/infiniband/hw/cxgb4/cq.c swsqe = &wq->sq.sw_sq[idx]; sq 254 drivers/infiniband/hw/cxgb4/cq.c if (wq->sq.oldest_read == swsqe) { sq 258 drivers/infiniband/hw/cxgb4/cq.c if (++idx == wq->sq.size) sq 261 drivers/infiniband/hw/cxgb4/cq.c wq->sq.flush_cidx += flushed; sq 262 drivers/infiniband/hw/cxgb4/cq.c if (wq->sq.flush_cidx >= wq->sq.size) sq 263 drivers/infiniband/hw/cxgb4/cq.c wq->sq.flush_cidx -= wq->sq.size; sq 272 drivers/infiniband/hw/cxgb4/cq.c if (wq->sq.flush_cidx == -1) sq 273 drivers/infiniband/hw/cxgb4/cq.c wq->sq.flush_cidx = wq->sq.cidx; sq 274 drivers/infiniband/hw/cxgb4/cq.c cidx = wq->sq.flush_cidx; sq 276 drivers/infiniband/hw/cxgb4/cq.c while (cidx != wq->sq.pidx) { sq 277 drivers/infiniband/hw/cxgb4/cq.c swsqe = &wq->sq.sw_sq[cidx]; sq 279 drivers/infiniband/hw/cxgb4/cq.c if (++cidx == wq->sq.size) sq 292 drivers/infiniband/hw/cxgb4/cq.c if (++cidx == wq->sq.size) sq 294 drivers/infiniband/hw/cxgb4/cq.c wq->sq.flush_cidx = cidx; sq 303 drivers/infiniband/hw/cxgb4/cq.c read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx; sq 304 drivers/infiniband/hw/cxgb4/cq.c read_cqe->len = htonl(wq->sq.oldest_read->read_len); sq 315 drivers/infiniband/hw/cxgb4/cq.c u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1; sq 317 drivers/infiniband/hw/cxgb4/cq.c if (rptr == wq->sq.size) sq 319 drivers/infiniband/hw/cxgb4/cq.c while (rptr != wq->sq.pidx) { sq 320 drivers/infiniband/hw/cxgb4/cq.c wq->sq.oldest_read = &wq->sq.sw_sq[rptr]; sq 322 drivers/infiniband/hw/cxgb4/cq.c if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ) sq 324 drivers/infiniband/hw/cxgb4/cq.c if (++rptr == wq->sq.size) sq 327 drivers/infiniband/hw/cxgb4/cq.c wq->sq.oldest_read = NULL; sq 386 drivers/infiniband/hw/cxgb4/cq.c if (!qhp->wq.sq.oldest_read->signaled) { sq 404 drivers/infiniband/hw/cxgb4/cq.c swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)]; sq 425 drivers/infiniband/hw/cxgb4/cq.c WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid); sq 454 drivers/infiniband/hw/cxgb4/cq.c (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq)) sq 630 drivers/infiniband/hw/cxgb4/cq.c if (!wq->sq.oldest_read->signaled) { sq 680 drivers/infiniband/hw/cxgb4/cq.c if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) { sq 685 drivers/infiniband/hw/cxgb4/cq.c swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)]; sq 710 drivers/infiniband/hw/cxgb4/cq.c if (idx < wq->sq.cidx) sq 711 drivers/infiniband/hw/cxgb4/cq.c wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx; sq 713 drivers/infiniband/hw/cxgb4/cq.c wq->sq.in_use -= idx - wq->sq.cidx; sq 715 drivers/infiniband/hw/cxgb4/cq.c wq->sq.cidx = (uint16_t)idx; sq 716 drivers/infiniband/hw/cxgb4/cq.c pr_debug("completing sq idx %u\n", wq->sq.cidx); sq 717 drivers/infiniband/hw/cxgb4/cq.c *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id; sq 107 drivers/infiniband/hw/cxgb4/device.c le.qid = wq->sq.qid; sq 109 drivers/infiniband/hw/cxgb4/device.c le.post_host_time = wq->sq.sw_sq[wq->sq.cidx].host_time; sq 110 drivers/infiniband/hw/cxgb4/device.c le.post_sge_ts = wq->sq.sw_sq[wq->sq.cidx].sge_ts; sq 250 drivers/infiniband/hw/cxgb4/device.c if (id != qp->wq.sq.qid) sq 271 drivers/infiniband/hw/cxgb4/device.c qp->wq.sq.qid, qp->srq ? "srq" : "rq", sq 274 drivers/infiniband/hw/cxgb4/device.c qp->wq.sq.flags & T4_SQ_ONCHIP, sq 292 drivers/infiniband/hw/cxgb4/device.c qp->wq.sq.qid, qp->wq.rq.qid, sq 294 drivers/infiniband/hw/cxgb4/device.c qp->wq.sq.flags & T4_SQ_ONCHIP, sq 306 drivers/infiniband/hw/cxgb4/device.c qp->wq.sq.qid, qp->wq.rq.qid, sq 308 drivers/infiniband/hw/cxgb4/device.c qp->wq.sq.flags & T4_SQ_ONCHIP); sq 1287 drivers/infiniband/hw/cxgb4/device.c t4_ring_sq_db(&qp->wq, qp->wq.sq.wq_pidx_inc, NULL); sq 1288 drivers/infiniband/hw/cxgb4/device.c qp->wq.sq.wq_pidx_inc = 0; sq 1378 drivers/infiniband/hw/cxgb4/device.c qp->wq.sq.qid, sq 1383 drivers/infiniband/hw/cxgb4/device.c pci_name(ctx->lldi.pdev), qp->wq.sq.qid); sq 1388 drivers/infiniband/hw/cxgb4/device.c qp->wq.sq.wq_pidx_inc = 0; sq 211 drivers/infiniband/hw/cxgb4/ev.c CQE_STATUS(err_cqe), qhp->wq.sq.qid); sq 95 drivers/infiniband/hw/cxgb4/qp.c static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) sq 97 drivers/infiniband/hw/cxgb4/qp.c c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize); sq 100 drivers/infiniband/hw/cxgb4/qp.c static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) sq 102 drivers/infiniband/hw/cxgb4/qp.c dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue, sq 103 drivers/infiniband/hw/cxgb4/qp.c dma_unmap_addr(sq, mapping)); sq 106 drivers/infiniband/hw/cxgb4/qp.c static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) sq 108 drivers/infiniband/hw/cxgb4/qp.c if (t4_sq_onchip(sq)) sq 109 drivers/infiniband/hw/cxgb4/qp.c dealloc_oc_sq(rdev, sq); sq 111 drivers/infiniband/hw/cxgb4/qp.c dealloc_host_sq(rdev, sq); sq 114 drivers/infiniband/hw/cxgb4/qp.c static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) sq 118 drivers/infiniband/hw/cxgb4/qp.c sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize); sq 119 drivers/infiniband/hw/cxgb4/qp.c if (!sq->dma_addr) sq 121 drivers/infiniband/hw/cxgb4/qp.c sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr - sq 123 drivers/infiniband/hw/cxgb4/qp.c sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr - sq 125 drivers/infiniband/hw/cxgb4/qp.c sq->flags |= T4_SQ_ONCHIP; sq 129 drivers/infiniband/hw/cxgb4/qp.c static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) sq 131 drivers/infiniband/hw/cxgb4/qp.c sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq 132 drivers/infiniband/hw/cxgb4/qp.c &(sq->dma_addr), GFP_KERNEL); sq 133 drivers/infiniband/hw/cxgb4/qp.c if (!sq->queue) sq 135 drivers/infiniband/hw/cxgb4/qp.c sq->phys_addr = virt_to_phys(sq->queue); sq 136 drivers/infiniband/hw/cxgb4/qp.c dma_unmap_addr_set(sq, mapping, sq->dma_addr); sq 140 drivers/infiniband/hw/cxgb4/qp.c static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user) sq 144 drivers/infiniband/hw/cxgb4/qp.c ret = alloc_oc_sq(rdev, sq); sq 146 drivers/infiniband/hw/cxgb4/qp.c ret = alloc_host_sq(rdev, sq); sq 157 drivers/infiniband/hw/cxgb4/qp.c dealloc_sq(rdev, &wq->sq); sq 158 drivers/infiniband/hw/cxgb4/qp.c kfree(wq->sq.sw_sq); sq 159 drivers/infiniband/hw/cxgb4/qp.c c4iw_put_qpid(rdev, wq->sq.qid, uctx); sq 213 drivers/infiniband/hw/cxgb4/qp.c wq->sq.qid = c4iw_get_qpid(rdev, uctx); sq 214 drivers/infiniband/hw/cxgb4/qp.c if (!wq->sq.qid) sq 226 drivers/infiniband/hw/cxgb4/qp.c wq->sq.sw_sq = kcalloc(wq->sq.size, sizeof(*wq->sq.sw_sq), sq 228 drivers/infiniband/hw/cxgb4/qp.c if (!wq->sq.sw_sq) { sq 257 drivers/infiniband/hw/cxgb4/qp.c ret = alloc_sq(rdev, &wq->sq, user); sq 260 drivers/infiniband/hw/cxgb4/qp.c memset(wq->sq.queue, 0, wq->sq.memsize); sq 261 drivers/infiniband/hw/cxgb4/qp.c dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); sq 273 drivers/infiniband/hw/cxgb4/qp.c wq->sq.queue, sq 274 drivers/infiniband/hw/cxgb4/qp.c (unsigned long long)virt_to_phys(wq->sq.queue), sq 282 drivers/infiniband/hw/cxgb4/qp.c wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid, sq 284 drivers/infiniband/hw/cxgb4/qp.c &wq->sq.bar2_qid, sq 285 drivers/infiniband/hw/cxgb4/qp.c user ? &wq->sq.bar2_pa : NULL); sq 295 drivers/infiniband/hw/cxgb4/qp.c if (user && (!wq->sq.bar2_pa || (need_rq && !wq->rq.bar2_pa))) { sq 297 drivers/infiniband/hw/cxgb4/qp.c pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid); sq 329 drivers/infiniband/hw/cxgb4/qp.c eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + sq 336 drivers/infiniband/hw/cxgb4/qp.c (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_ONCHIP_F : 0) | sq 342 drivers/infiniband/hw/cxgb4/qp.c (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_FBMAX_V(2) : sq 347 drivers/infiniband/hw/cxgb4/qp.c res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid); sq 348 drivers/infiniband/hw/cxgb4/qp.c res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr); sq 381 drivers/infiniband/hw/cxgb4/qp.c ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->sq.qid, __func__); sq 386 drivers/infiniband/hw/cxgb4/qp.c wq->sq.qid, wq->rq.qid, wq->db, sq 387 drivers/infiniband/hw/cxgb4/qp.c wq->sq.bar2_va, wq->rq.bar2_va); sq 396 drivers/infiniband/hw/cxgb4/qp.c dealloc_sq(rdev, &wq->sq); sq 404 drivers/infiniband/hw/cxgb4/qp.c kfree(wq->sq.sw_sq); sq 409 drivers/infiniband/hw/cxgb4/qp.c c4iw_put_qpid(rdev, wq->sq.qid, uctx); sq 413 drivers/infiniband/hw/cxgb4/qp.c static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp, sq 429 drivers/infiniband/hw/cxgb4/qp.c if (dstp == (u8 *)&sq->queue[sq->size]) sq 430 drivers/infiniband/hw/cxgb4/qp.c dstp = (u8 *)sq->queue; sq 431 drivers/infiniband/hw/cxgb4/qp.c if (rem <= (u8 *)&sq->queue[sq->size] - dstp) sq 434 drivers/infiniband/hw/cxgb4/qp.c len = (u8 *)&sq->queue[sq->size] - dstp; sq 488 drivers/infiniband/hw/cxgb4/qp.c static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe, sq 526 drivers/infiniband/hw/cxgb4/qp.c ret = build_immd(sq, wqe->send.u.immd_src, wr, sq 533 drivers/infiniband/hw/cxgb4/qp.c ret = build_isgl((__be64 *)sq->queue, sq 534 drivers/infiniband/hw/cxgb4/qp.c (__be64 *)&sq->queue[sq->size], sq 555 drivers/infiniband/hw/cxgb4/qp.c static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe, sq 577 drivers/infiniband/hw/cxgb4/qp.c ret = build_immd(sq, wqe->write.u.immd_src, wr, sq 584 drivers/infiniband/hw/cxgb4/qp.c ret = build_isgl((__be64 *)sq->queue, sq 585 drivers/infiniband/hw/cxgb4/qp.c (__be64 *)&sq->queue[sq->size], sq 606 drivers/infiniband/hw/cxgb4/qp.c static void build_immd_cmpl(struct t4_sq *sq, struct fw_ri_immd_cmpl *immdp, sq 615 drivers/infiniband/hw/cxgb4/qp.c static void build_rdma_write_cmpl(struct t4_sq *sq, sq 644 drivers/infiniband/hw/cxgb4/qp.c build_immd_cmpl(sq, &wcwr->u_cmpl.immd_src, wr->next); sq 646 drivers/infiniband/hw/cxgb4/qp.c build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size], sq 650 drivers/infiniband/hw/cxgb4/qp.c build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size], sq 705 drivers/infiniband/hw/cxgb4/qp.c wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue + sq 706 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE); sq 707 drivers/infiniband/hw/cxgb4/qp.c build_rdma_write_cmpl(&qhp->wq.sq, &wqe->write_cmpl, wr, &len16); sq 710 drivers/infiniband/hw/cxgb4/qp.c swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; sq 712 drivers/infiniband/hw/cxgb4/qp.c swsqe->idx = qhp->wq.sq.pidx; sq 723 drivers/infiniband/hw/cxgb4/qp.c write_wrid = qhp->wq.sq.pidx; sq 726 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.sq.in_use++; sq 727 drivers/infiniband/hw/cxgb4/qp.c if (++qhp->wq.sq.pidx == qhp->wq.sq.size) sq 728 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.sq.pidx = 0; sq 731 drivers/infiniband/hw/cxgb4/qp.c swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; sq 736 drivers/infiniband/hw/cxgb4/qp.c swsqe->idx = qhp->wq.sq.pidx; sq 748 drivers/infiniband/hw/cxgb4/qp.c wqe->write_cmpl.wrid_send = qhp->wq.sq.pidx; sq 819 drivers/infiniband/hw/cxgb4/qp.c static int build_memreg(struct t4_sq *sq, union t4_wr *wqe, sq 868 drivers/infiniband/hw/cxgb4/qp.c if (++p == (__be64 *)&sq->queue[sq->size]) sq 869 drivers/infiniband/hw/cxgb4/qp.c p = (__be64 *)sq->queue; sq 874 drivers/infiniband/hw/cxgb4/qp.c if (++p == (__be64 *)&sq->queue[sq->size]) sq 875 drivers/infiniband/hw/cxgb4/qp.c p = (__be64 *)sq->queue; sq 921 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.sq.wq_pidx_inc += inc; sq 1000 drivers/infiniband/hw/cxgb4/qp.c CQE_QPID_V(qhp->wq.sq.qid)); sq 1051 drivers/infiniband/hw/cxgb4/qp.c CQE_QPID_V(qhp->wq.sq.qid)); sq 1141 drivers/infiniband/hw/cxgb4/qp.c wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue + sq 1142 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE); sq 1149 drivers/infiniband/hw/cxgb4/qp.c swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx]; sq 1160 drivers/infiniband/hw/cxgb4/qp.c err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16); sq 1172 drivers/infiniband/hw/cxgb4/qp.c err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16); sq 1188 drivers/infiniband/hw/cxgb4/qp.c if (!qhp->wq.sq.oldest_read) sq 1189 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.sq.oldest_read = swsqe; sq 1202 drivers/infiniband/hw/cxgb4/qp.c err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr), sq 1228 drivers/infiniband/hw/cxgb4/qp.c swsqe->idx = qhp->wq.sq.pidx; sq 1240 drivers/infiniband/hw/cxgb4/qp.c init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16); sq 1243 drivers/infiniband/hw/cxgb4/qp.c (unsigned long long)wr->wr_id, qhp->wq.sq.pidx, sq 1566 drivers/infiniband/hw/cxgb4/qp.c pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid, sq 1698 drivers/infiniband/hw/cxgb4/qp.c pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid, ep->hwtid); sq 1718 drivers/infiniband/hw/cxgb4/qp.c qhp->ep->hwtid, qhp->wq.sq.qid, __func__); sq 1755 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord); sq 1800 drivers/infiniband/hw/cxgb4/qp.c wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid); sq 1801 drivers/infiniband/hw/cxgb4/qp.c wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid); sq 1821 drivers/infiniband/hw/cxgb4/qp.c qhp->ep->hwtid, qhp->wq.sq.qid, __func__); sq 1845 drivers/infiniband/hw/cxgb4/qp.c qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state, sq 2031 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.sq.qid); sq 2089 drivers/infiniband/hw/cxgb4/qp.c __xa_erase(&rhp->qps, qhp->wq.sq.qid); sq 2099 drivers/infiniband/hw/cxgb4/qp.c pr_debug("ib_qp %p qpid 0x%0x\n", ib_qp, qhp->wq.sq.qid); sq 2166 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.sq.size = sqsize; sq 2167 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.sq.memsize = sq 2169 drivers/infiniband/hw/cxgb4/qp.c sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64); sq 2170 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.sq.flush_cidx = -1; sq 2179 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE); sq 2220 drivers/infiniband/hw/cxgb4/qp.c ret = xa_insert_irq(&rhp->qps, qhp->wq.sq.qid, qhp, GFP_KERNEL); sq 2251 drivers/infiniband/hw/cxgb4/qp.c if (t4_sq_onchip(&qhp->wq.sq)) { sq 2263 drivers/infiniband/hw/cxgb4/qp.c uresp.sqid = qhp->wq.sq.qid; sq 2264 drivers/infiniband/hw/cxgb4/qp.c uresp.sq_size = qhp->wq.sq.size; sq 2265 drivers/infiniband/hw/cxgb4/qp.c uresp.sq_memsize = qhp->wq.sq.memsize; sq 2293 drivers/infiniband/hw/cxgb4/qp.c sq_key_mm->addr = qhp->wq.sq.phys_addr; sq 2294 drivers/infiniband/hw/cxgb4/qp.c sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize); sq 2303 drivers/infiniband/hw/cxgb4/qp.c sq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.sq.bar2_pa; sq 2329 drivers/infiniband/hw/cxgb4/qp.c &qhp->wq.sq.queue[qhp->wq.sq.size].status.qp_err; sq 2331 drivers/infiniband/hw/cxgb4/qp.c &qhp->wq.sq.queue[qhp->wq.sq.size].status.srqidx; sq 2334 drivers/infiniband/hw/cxgb4/qp.c qhp->ibqp.qp_num = qhp->wq.sq.qid; sq 2339 drivers/infiniband/hw/cxgb4/qp.c qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize, sq 2356 drivers/infiniband/hw/cxgb4/qp.c xa_erase_irq(&rhp->qps, qhp->wq.sq.qid); sq 42 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "sqid", wq->sq.qid)) sq 46 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "memsize", wq->sq.memsize)) sq 48 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "cidx", wq->sq.cidx)) sq 50 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "pidx", wq->sq.pidx)) sq 52 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "wq_pidx", wq->sq.wq_pidx)) sq 54 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "flush_cidx", wq->sq.flush_cidx)) sq 56 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "in_use", wq->sq.in_use)) sq 58 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32(msg, "size", wq->sq.size)) sq 60 drivers/infiniband/hw/cxgb4/restrack.c if (rdma_nl_put_driver_u32_hex(msg, "flags", wq->sq.flags)) sq 95 drivers/infiniband/hw/cxgb4/restrack.c static int fill_swsqe(struct sk_buff *msg, struct t4_sq *sq, u16 idx, sq 119 drivers/infiniband/hw/cxgb4/restrack.c static int fill_swsqes(struct sk_buff *msg, struct t4_sq *sq, sq 125 drivers/infiniband/hw/cxgb4/restrack.c if (fill_swsqe(msg, sq, first_idx, first_sqe)) sq 129 drivers/infiniband/hw/cxgb4/restrack.c if (fill_swsqe(msg, sq, last_idx, last_sqe)) sq 161 drivers/infiniband/hw/cxgb4/restrack.c if (wq.sq.cidx != wq.sq.pidx) { sq 162 drivers/infiniband/hw/cxgb4/restrack.c first_sq_idx = wq.sq.cidx; sq 163 drivers/infiniband/hw/cxgb4/restrack.c first_sqe = qhp->wq.sq.sw_sq[first_sq_idx]; sq 165 drivers/infiniband/hw/cxgb4/restrack.c last_sq_idx = wq.sq.pidx; sq 167 drivers/infiniband/hw/cxgb4/restrack.c last_sq_idx = wq.sq.size - 1; sq 169 drivers/infiniband/hw/cxgb4/restrack.c last_sqe = qhp->wq.sq.sw_sq[last_sq_idx]; sq 178 drivers/infiniband/hw/cxgb4/restrack.c if (fill_swsqes(msg, &wq.sq, first_sq_idx, fsp, last_sq_idx, lsp)) sq 383 drivers/infiniband/hw/cxgb4/t4.h struct t4_sq sq; sq 527 drivers/infiniband/hw/cxgb4/t4.h static inline int t4_sq_onchip(struct t4_sq *sq) sq 529 drivers/infiniband/hw/cxgb4/t4.h return sq->flags & T4_SQ_ONCHIP; sq 534 drivers/infiniband/hw/cxgb4/t4.h return wq->sq.in_use == 0; sq 539 drivers/infiniband/hw/cxgb4/t4.h return wq->sq.in_use == (wq->sq.size - 1); sq 544 drivers/infiniband/hw/cxgb4/t4.h return wq->sq.size - 1 - wq->sq.in_use; sq 549 drivers/infiniband/hw/cxgb4/t4.h wq->sq.in_use++; sq 550 drivers/infiniband/hw/cxgb4/t4.h if (++wq->sq.pidx == wq->sq.size) sq 551 drivers/infiniband/hw/cxgb4/t4.h wq->sq.pidx = 0; sq 552 drivers/infiniband/hw/cxgb4/t4.h wq->sq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); sq 553 drivers/infiniband/hw/cxgb4/t4.h if (wq->sq.wq_pidx >= wq->sq.size * T4_SQ_NUM_SLOTS) sq 554 drivers/infiniband/hw/cxgb4/t4.h wq->sq.wq_pidx %= wq->sq.size * T4_SQ_NUM_SLOTS; sq 559 drivers/infiniband/hw/cxgb4/t4.h if (wq->sq.cidx == wq->sq.flush_cidx) sq 560 drivers/infiniband/hw/cxgb4/t4.h wq->sq.flush_cidx = -1; sq 561 drivers/infiniband/hw/cxgb4/t4.h wq->sq.in_use--; sq 562 drivers/infiniband/hw/cxgb4/t4.h if (++wq->sq.cidx == wq->sq.size) sq 563 drivers/infiniband/hw/cxgb4/t4.h wq->sq.cidx = 0; sq 568 drivers/infiniband/hw/cxgb4/t4.h return wq->sq.queue[wq->sq.size].status.host_wq_pidx; sq 573 drivers/infiniband/hw/cxgb4/t4.h return wq->sq.size * T4_SQ_NUM_SLOTS; sq 616 drivers/infiniband/hw/cxgb4/t4.h if (wq->sq.bar2_va) { sq 617 drivers/infiniband/hw/cxgb4/t4.h if (inc == 1 && wq->sq.bar2_qid == 0 && wqe) { sq 618 drivers/infiniband/hw/cxgb4/t4.h pr_debug("WC wq->sq.pidx = %d\n", wq->sq.pidx); sq 620 drivers/infiniband/hw/cxgb4/t4.h (wq->sq.bar2_va + SGE_UDB_WCDOORBELL), sq 623 drivers/infiniband/hw/cxgb4/t4.h pr_debug("DB wq->sq.pidx = %d\n", wq->sq.pidx); sq 624 drivers/infiniband/hw/cxgb4/t4.h writel(PIDX_T5_V(inc) | QID_V(wq->sq.bar2_qid), sq 625 drivers/infiniband/hw/cxgb4/t4.h wq->sq.bar2_va + SGE_UDB_KDOORBELL); sq 632 drivers/infiniband/hw/cxgb4/t4.h writel(QID_V(wq->sq.qid) | PIDX_V(inc), wq->db); sq 138 drivers/infiniband/hw/efa/efa_com.c struct efa_com_admin_sq *sq = &aq->sq; sq 139 drivers/infiniband/hw/efa/efa_com.c u16 size = aq->depth * sizeof(*sq->entries); sq 144 drivers/infiniband/hw/efa/efa_com.c sq->entries = sq 145 drivers/infiniband/hw/efa/efa_com.c dma_alloc_coherent(aq->dmadev, size, &sq->dma_addr, GFP_KERNEL); sq 146 drivers/infiniband/hw/efa/efa_com.c if (!sq->entries) sq 149 drivers/infiniband/hw/efa/efa_com.c spin_lock_init(&sq->lock); sq 151 drivers/infiniband/hw/efa/efa_com.c sq->cc = 0; sq 152 drivers/infiniband/hw/efa/efa_com.c sq->pc = 0; sq 153 drivers/infiniband/hw/efa/efa_com.c sq->phase = 1; sq 155 drivers/infiniband/hw/efa/efa_com.c sq->db_addr = (u32 __iomem *)(edev->reg_bar + EFA_REGS_AQ_PROD_DB_OFF); sq 157 drivers/infiniband/hw/efa/efa_com.c addr_high = EFA_DMA_ADDR_TO_UINT32_HIGH(sq->dma_addr); sq 158 drivers/infiniband/hw/efa/efa_com.c addr_low = EFA_DMA_ADDR_TO_UINT32_LOW(sq->dma_addr); sq 328 drivers/infiniband/hw/efa/efa_com.c pi = aq->sq.pc & queue_size_mask; sq 334 drivers/infiniband/hw/efa/efa_com.c cmd_id |= aq->sq.pc & ~queue_size_mask; sq 338 drivers/infiniband/hw/efa/efa_com.c cmd->aq_common_descriptor.flags |= aq->sq.phase & sq 354 drivers/infiniband/hw/efa/efa_com.c aqe = &aq->sq.entries[pi]; sq 358 drivers/infiniband/hw/efa/efa_com.c aq->sq.pc++; sq 361 drivers/infiniband/hw/efa/efa_com.c if ((aq->sq.pc & queue_size_mask) == 0) sq 362 drivers/infiniband/hw/efa/efa_com.c aq->sq.phase = !aq->sq.phase; sq 365 drivers/infiniband/hw/efa/efa_com.c writel(aq->sq.pc, aq->sq.db_addr); sq 408 drivers/infiniband/hw/efa/efa_com.c spin_lock(&aq->sq.lock); sq 411 drivers/infiniband/hw/efa/efa_com.c spin_unlock(&aq->sq.lock); sq 417 drivers/infiniband/hw/efa/efa_com.c spin_unlock(&aq->sq.lock); sq 487 drivers/infiniband/hw/efa/efa_com.c aq->sq.cc += comp_num; sq 576 drivers/infiniband/hw/efa/efa_com.c comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc); sq 583 drivers/infiniband/hw/efa/efa_com.c comp_ctx, aq->sq.pc, aq->sq.cc, aq->cq.cc); sq 678 drivers/infiniband/hw/efa/efa_com.c struct efa_com_admin_sq *sq = &aq->sq; sq 686 drivers/infiniband/hw/efa/efa_com.c size = aq->depth * sizeof(*sq->entries); sq 687 drivers/infiniband/hw/efa/efa_com.c dma_free_coherent(edev->dmadev, size, sq->entries, sq->dma_addr); sq 799 drivers/infiniband/hw/efa/efa_com.c dma_free_coherent(edev->dmadev, aq->depth * sizeof(*aq->sq.entries), sq 800 drivers/infiniband/hw/efa/efa_com.c aq->sq.entries, aq->sq.dma_addr); sq 66 drivers/infiniband/hw/efa/efa_com.h struct efa_com_admin_sq sq; sq 661 drivers/infiniband/hw/hns/hns_roce_device.h struct hns_roce_wq sq; sq 90 drivers/infiniband/hw/hns/hns_roce_hw_v1.c spin_lock_irqsave(&qp->sq.lock, flags); sq 93 drivers/infiniband/hw/hns/hns_roce_hw_v1.c if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { sq 99 drivers/infiniband/hw/hns/hns_roce_hw_v1.c wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1); sq 101 drivers/infiniband/hw/hns/hns_roce_hw_v1.c if (unlikely(wr->num_sge > qp->sq.max_gs)) { sq 103 drivers/infiniband/hw/hns/hns_roce_hw_v1.c wr->num_sge, qp->sq.max_gs); sq 110 drivers/infiniband/hw/hns/hns_roce_hw_v1.c qp->sq.wrid[wqe_idx] = wr->wr_id; sq 317 drivers/infiniband/hw/hns/hns_roce_hw_v1.c qp->sq.head += nreq; sq 325 drivers/infiniband/hw/hns/hns_roce_hw_v1.c (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1))); sq 337 drivers/infiniband/hw/hns/hns_roce_hw_v1.c hns_roce_write64_k(doorbell, qp->sq.db_reg_l); sq 340 drivers/infiniband/hw/hns/hns_roce_hw_v1.c spin_unlock_irqrestore(&qp->sq.lock, flags); sq 2296 drivers/infiniband/hw/hns/hns_roce_hw_v1.c ((*cur_qp)->sq.wqe_cnt-1)); sq 2321 drivers/infiniband/hw/hns/hns_roce_hw_v1.c wq = &(*cur_qp)->sq; sq 2599 drivers/infiniband/hw/hns/hns_roce_hw_v1.c ilog2((unsigned int)hr_qp->sq.wqe_cnt)); sq 2628 drivers/infiniband/hw/hns/hns_roce_hw_v1.c QP1C_BYTES_20_SQ_HEAD_S, hr_qp->sq.head); sq 2700 drivers/infiniband/hw/hns/hns_roce_hw_v1.c hr_qp->sq.head = 0; sq 2701 drivers/infiniband/hw/hns/hns_roce_hw_v1.c hr_qp->sq.tail = 0; sq 2783 drivers/infiniband/hw/hns/hns_roce_hw_v1.c ilog2((unsigned int)hr_qp->sq.wqe_cnt)); sq 2849 drivers/infiniband/hw/hns/hns_roce_hw_v1.c ilog2((unsigned int)hr_qp->sq.wqe_cnt)); sq 3313 drivers/infiniband/hw/hns/hns_roce_hw_v1.c hr_qp->sq.head = 0; sq 3314 drivers/infiniband/hw/hns/hns_roce_hw_v1.c hr_qp->sq.tail = 0; sq 3438 drivers/infiniband/hw/hns/hns_roce_hw_v1.c qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt; sq 3439 drivers/infiniband/hw/hns/hns_roce_hw_v1.c qp_attr->cap.max_send_sge = hr_qp->sq.max_gs; sq 3576 drivers/infiniband/hw/hns/hns_roce_hw_v1.c qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt; sq 3577 drivers/infiniband/hw/hns/hns_roce_hw_v1.c qp_attr->cap.max_send_sge = hr_qp->sq.max_gs; sq 3636 drivers/infiniband/hw/hns/hns_roce_hw_v1.c kfree(hr_qp->sq.wrid); sq 276 drivers/infiniband/hw/hns/hns_roce_hw_v2.c spin_lock_irqsave(&qp->sq.lock, flags); sq 280 drivers/infiniband/hw/hns/hns_roce_hw_v2.c if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { sq 286 drivers/infiniband/hw/hns/hns_roce_hw_v2.c wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1); sq 288 drivers/infiniband/hw/hns/hns_roce_hw_v2.c if (unlikely(wr->num_sge > qp->sq.max_gs)) { sq 290 drivers/infiniband/hw/hns/hns_roce_hw_v2.c wr->num_sge, qp->sq.max_gs); sq 297 drivers/infiniband/hw/hns/hns_roce_hw_v2.c qp->sq.wrid[wqe_idx] = wr->wr_id; sq 299 drivers/infiniband/hw/hns/hns_roce_hw_v2.c ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1); sq 569 drivers/infiniband/hw/hns/hns_roce_hw_v2.c spin_unlock_irqrestore(&qp->sq.lock, flags); sq 577 drivers/infiniband/hw/hns/hns_roce_hw_v2.c qp->sq.head += nreq; sq 590 drivers/infiniband/hw/hns/hns_roce_hw_v2.c qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1)); sq 594 drivers/infiniband/hw/hns/hns_roce_hw_v2.c hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l); sq 605 drivers/infiniband/hw/hns/hns_roce_hw_v2.c spin_unlock_irqrestore(&qp->sq.lock, flags); sq 612 drivers/infiniband/hw/hns/hns_roce_hw_v2.c spin_unlock_irqrestore(&qp->sq.lock, flags); sq 2741 drivers/infiniband/hw/hns/hns_roce_hw_v2.c wq = &(*cur_qp)->sq; sq 3232 drivers/infiniband/hw/hns/hns_roce_hw_v2.c hr_qp->sq.max_gs > sq 3241 drivers/infiniband/hw/hns/hns_roce_hw_v2.c ilog2((unsigned int)hr_qp->sq.wqe_cnt)); sq 3744 drivers/infiniband/hw/hns/hns_roce_hw_v2.c hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ? sq 3968 drivers/infiniband/hw/hns/hns_roce_hw_v2.c hr_qp->sq.max_gs > HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) ? sq 3974 drivers/infiniband/hw/hns/hns_roce_hw_v2.c ((ibqp->qp_type == IB_QPT_GSI) || hr_qp->sq.max_gs > sq 4416 drivers/infiniband/hw/hns/hns_roce_hw_v2.c hr_qp->sq.head); sq 4469 drivers/infiniband/hw/hns/hns_roce_hw_v2.c hr_qp->sq.head = 0; sq 4470 drivers/infiniband/hw/hns/hns_roce_hw_v2.c hr_qp->sq.tail = 0; sq 4635 drivers/infiniband/hw/hns/hns_roce_hw_v2.c qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt; sq 4636 drivers/infiniband/hw/hns/hns_roce_hw_v2.c qp_attr->cap.max_send_sge = hr_qp->sq.max_gs; sq 4698 drivers/infiniband/hw/hns/hns_roce_hw_v2.c if (hr_qp->sq.wqe_cnt && (hr_qp->sdb_en == 1)) sq 4704 drivers/infiniband/hw/hns/hns_roce_hw_v2.c kfree(hr_qp->sq.wrid); sq 4849 drivers/infiniband/hw/hns/hns_roce_hw_v2.c hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr); sq 360 drivers/infiniband/hw/hns/hns_roce_qp.c if (check_shl_overflow(1, ucmd->log_sq_bb_count, &hr_qp->sq.wqe_cnt) || sq 361 drivers/infiniband/hw/hns/hns_roce_qp.c hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) sq 370 drivers/infiniband/hw/hns/hns_roce_qp.c hr_qp->sq.wqe_shift = ucmd->log_sq_stride; sq 374 drivers/infiniband/hw/hns/hns_roce_qp.c hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt); sq 376 drivers/infiniband/hw/hns/hns_roce_qp.c hr_qp->sq.max_gs = max_cnt; sq 378 drivers/infiniband/hw/hns/hns_roce_qp.c if (hr_qp->sq.max_gs > 2) sq 379 drivers/infiniband/hw/hns/hns_roce_qp.c hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * sq 380 drivers/infiniband/hw/hns/hns_roce_qp.c (hr_qp->sq.max_gs - 2)); sq 382 drivers/infiniband/hw/hns/hns_roce_qp.c if ((hr_qp->sq.max_gs > 2) && (hr_dev->pci_dev->revision == 0x20)) { sq 398 drivers/infiniband/hw/hns/hns_roce_qp.c HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << sq 399 drivers/infiniband/hw/hns/hns_roce_qp.c hr_qp->sq.wqe_shift), PAGE_SIZE); sq 401 drivers/infiniband/hw/hns/hns_roce_qp.c hr_qp->sq.offset = 0; sq 402 drivers/infiniband/hw/hns/hns_roce_qp.c hr_qp->rq.offset = HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << sq 403 drivers/infiniband/hw/hns/hns_roce_qp.c hr_qp->sq.wqe_shift), PAGE_SIZE); sq 412 drivers/infiniband/hw/hns/hns_roce_qp.c HNS_ROCE_ALOGN_UP((hr_qp->sq.wqe_cnt << sq 413 drivers/infiniband/hw/hns/hns_roce_qp.c hr_qp->sq.wqe_shift), page_size); sq 415 drivers/infiniband/hw/hns/hns_roce_qp.c hr_qp->sq.offset = 0; sq 418 drivers/infiniband/hw/hns/hns_roce_qp.c (hr_qp->sq.wqe_cnt << sq 419 drivers/infiniband/hw/hns/hns_roce_qp.c hr_qp->sq.wqe_shift), sq 427 drivers/infiniband/hw/hns/hns_roce_qp.c (hr_qp->sq.wqe_cnt << sq 428 drivers/infiniband/hw/hns/hns_roce_qp.c hr_qp->sq.wqe_shift), sq 457 drivers/infiniband/hw/hns/hns_roce_qp.c buf_size = hr_qp->sge.offset - hr_qp->sq.offset; sq 459 drivers/infiniband/hw/hns/hns_roce_qp.c buf_size = hr_qp->rq.offset - hr_qp->sq.offset; sq 465 drivers/infiniband/hw/hns/hns_roce_qp.c hr_qp->sq.offset / page_size, sq 526 drivers/infiniband/hw/hns/hns_roce_qp.c if (hr_qp->sq.max_gs > 2) { sq 527 drivers/infiniband/hw/hns/hns_roce_qp.c hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * sq 528 drivers/infiniband/hw/hns/hns_roce_qp.c (hr_qp->sq.max_gs - 2)); sq 534 drivers/infiniband/hw/hns/hns_roce_qp.c hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt * sq 535 drivers/infiniband/hw/hns/hns_roce_qp.c hr_qp->sq.max_gs); sq 539 drivers/infiniband/hw/hns/hns_roce_qp.c if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) { sq 567 drivers/infiniband/hw/hns/hns_roce_qp.c hr_qp->sq.wqe_shift = ilog2(hr_dev->caps.max_sq_desc_sz); sq 574 drivers/infiniband/hw/hns/hns_roce_qp.c hr_qp->sq.wqe_cnt = roundup_pow_of_two(max_cnt); sq 575 drivers/infiniband/hw/hns/hns_roce_qp.c if ((u32)hr_qp->sq.wqe_cnt > hr_dev->caps.max_wqes) { sq 583 drivers/infiniband/hw/hns/hns_roce_qp.c hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt); sq 585 drivers/infiniband/hw/hns/hns_roce_qp.c hr_qp->sq.max_gs = max_cnt; sq 595 drivers/infiniband/hw/hns/hns_roce_qp.c hr_qp->sq.offset = 0; sq 596 drivers/infiniband/hw/hns/hns_roce_qp.c size = HNS_ROCE_ALOGN_UP(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift, sq 613 drivers/infiniband/hw/hns/hns_roce_qp.c cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt; sq 614 drivers/infiniband/hw/hns/hns_roce_qp.c cap->max_send_sge = hr_qp->sq.max_gs; sq 704 drivers/infiniband/hw/hns/hns_roce_qp.c spin_lock_init(&hr_qp->sq.lock); sq 831 drivers/infiniband/hw/hns/hns_roce_qp.c hr_qp->sq.db_reg_l = hr_dev->reg_base + hr_dev->sdb_offset + sq 879 drivers/infiniband/hw/hns/hns_roce_qp.c hr_qp->sq.wrid = kcalloc(hr_qp->sq.wqe_cnt, sizeof(u64), sq 881 drivers/infiniband/hw/hns/hns_roce_qp.c if (ZERO_OR_NULL_PTR(hr_qp->sq.wrid)) { sq 992 drivers/infiniband/hw/hns/hns_roce_qp.c kfree(hr_qp->sq.wrid); sq 1191 drivers/infiniband/hw/hns/hns_roce_qp.c hr_qp->sq.head = *(int *)(hr_qp->sdb.virt_addr); sq 1278 drivers/infiniband/hw/hns/hns_roce_qp.c return get_wqe(hr_qp, hr_qp->sq.offset + (n << hr_qp->sq.wqe_shift)); sq 153 drivers/infiniband/hw/i40iw/i40iw.h struct i40iw_dma_mem sq; sq 469 drivers/infiniband/hw/i40iw/i40iw_ctrl.c cqp->sq_base = info->sq; sq 1861 drivers/infiniband/hw/i40iw/i40iw_ctrl.c info->sq = true; sq 2612 drivers/infiniband/hw/i40iw/i40iw_ctrl.c if (info->sq && !qp->flush_sq) sq 418 drivers/infiniband/hw/i40iw/i40iw_hw.c if (!info->sq && ctx_info->err_rq_idx_valid) { sq 843 drivers/infiniband/hw/i40iw/i40iw_hw.c info.sq = true; sq 249 drivers/infiniband/hw/i40iw/i40iw_main.c i40iw_free_dma_mem(dev->hw, &cqp->sq); sq 579 drivers/infiniband/hw/i40iw/i40iw_main.c status = i40iw_allocate_dma_mem(dev->hw, &cqp->sq, sq 593 drivers/infiniband/hw/i40iw/i40iw_main.c cqp_init_info.sq = cqp->sq.va; sq 594 drivers/infiniband/hw/i40iw/i40iw_main.c cqp_init_info.sq_pa = cqp->sq.pa; sq 611 drivers/infiniband/hw/i40iw/i40iw_type.h struct i40iw_cqp_quanta *sq; sq 764 drivers/infiniband/hw/i40iw/i40iw_type.h bool sq; sq 946 drivers/infiniband/hw/i40iw/i40iw_type.h bool sq; sq 1004 drivers/infiniband/hw/i40iw/i40iw_uk.c qp->sq_base = info->sq; sq 378 drivers/infiniband/hw/i40iw/i40iw_user.h struct i40iw_qp_quanta *sq; sq 496 drivers/infiniband/hw/i40iw/i40iw_verbs.c ukinfo->sq = mem->va; sq 499 drivers/infiniband/hw/i40iw/i40iw_verbs.c ukinfo->rq = &ukinfo->sq[sqdepth]; sq 619 drivers/infiniband/hw/mlx4/cq.c wq = is_send ? &qp->sq : &qp->rq; sq 737 drivers/infiniband/hw/mlx4/cq.c wq = &(*cur_qp)->sq; sq 3120 drivers/infiniband/hw/mlx4/main.c spin_lock_irqsave(&mqp->sq.lock, flags_qp); sq 3121 drivers/infiniband/hw/mlx4/main.c if (mqp->sq.tail != mqp->sq.head) { sq 3134 drivers/infiniband/hw/mlx4/main.c spin_unlock_irqrestore(&mqp->sq.lock, flags_qp); sq 321 drivers/infiniband/hw/mlx4/mlx4_ib.h struct mlx4_ib_wq sq; sq 204 drivers/infiniband/hw/mlx4/qp.c return get_wqe(qp, qp->sq.offset + (n << qp->sq.wqe_shift)); sq 220 drivers/infiniband/hw/mlx4/qp.c buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); sq 400 drivers/infiniband/hw/mlx4/qp.c qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s)); sq 406 drivers/infiniband/hw/mlx4/qp.c qp->sq_spare_wqes = MLX4_IB_SQ_HEADROOM(qp->sq.wqe_shift); sq 407 drivers/infiniband/hw/mlx4/qp.c qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr + sq 410 drivers/infiniband/hw/mlx4/qp.c qp->sq.max_gs = sq 412 drivers/infiniband/hw/mlx4/qp.c (1 << qp->sq.wqe_shift)) - sq 417 drivers/infiniband/hw/mlx4/qp.c (qp->sq.wqe_cnt << qp->sq.wqe_shift); sq 418 drivers/infiniband/hw/mlx4/qp.c if (qp->rq.wqe_shift > qp->sq.wqe_shift) { sq 420 drivers/infiniband/hw/mlx4/qp.c qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; sq 422 drivers/infiniband/hw/mlx4/qp.c qp->rq.offset = qp->sq.wqe_cnt << qp->sq.wqe_shift; sq 423 drivers/infiniband/hw/mlx4/qp.c qp->sq.offset = 0; sq 426 drivers/infiniband/hw/mlx4/qp.c cap->max_send_wr = qp->sq.max_post = sq 427 drivers/infiniband/hw/mlx4/qp.c qp->sq.wqe_cnt - qp->sq_spare_wqes; sq 428 drivers/infiniband/hw/mlx4/qp.c cap->max_send_sge = min(qp->sq.max_gs, sq 448 drivers/infiniband/hw/mlx4/qp.c qp->sq.wqe_cnt = 1 << ucmd->log_sq_bb_count; sq 449 drivers/infiniband/hw/mlx4/qp.c qp->sq.wqe_shift = ucmd->log_sq_stride; sq 452 drivers/infiniband/hw/mlx4/qp.c (qp->sq.wqe_cnt << qp->sq.wqe_shift); sq 669 drivers/infiniband/hw/mlx4/qp.c qp->sq.wqe_cnt = 1; sq 670 drivers/infiniband/hw/mlx4/qp.c qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE; sq 671 drivers/infiniband/hw/mlx4/qp.c qp->buf_size = qp->sq.wqe_cnt << MLX4_IB_MIN_SQ_STRIDE; sq 877 drivers/infiniband/hw/mlx4/qp.c spin_lock_init(&qp->sq.lock); sq 914 drivers/infiniband/hw/mlx4/qp.c qp->sq.wqe_cnt = 1; sq 915 drivers/infiniband/hw/mlx4/qp.c qp->sq.wqe_shift = MLX4_IB_MIN_SQ_STRIDE; sq 917 drivers/infiniband/hw/mlx4/qp.c (qp->sq.wqe_cnt << qp->sq.wqe_shift); sq 1067 drivers/infiniband/hw/mlx4/qp.c spin_lock_init(&qp->sq.lock); sq 1183 drivers/infiniband/hw/mlx4/qp.c qp->sq.wrid = kvmalloc_array(qp->sq.wqe_cnt, sq 1187 drivers/infiniband/hw/mlx4/qp.c if (!qp->sq.wrid || !qp->rq.wrid) { sq 1279 drivers/infiniband/hw/mlx4/qp.c kvfree(qp->sq.wrid); sq 1501 drivers/infiniband/hw/mlx4/qp.c kvfree(qp->sq.wrid); sq 2238 drivers/infiniband/hw/mlx4/qp.c if (qp->sq.wqe_cnt) sq 2239 drivers/infiniband/hw/mlx4/qp.c context->sq_size_stride = ilog2(qp->sq.wqe_cnt) << 3; sq 2240 drivers/infiniband/hw/mlx4/qp.c context->sq_size_stride |= qp->sq.wqe_shift - 4; sq 2542 drivers/infiniband/hw/mlx4/qp.c for (i = 0; i < qp->sq.wqe_cnt; ++i) { sq 2546 drivers/infiniband/hw/mlx4/qp.c 1 << (qp->sq.wqe_shift - 4); sq 2608 drivers/infiniband/hw/mlx4/qp.c qp->sq.head = 0; sq 2609 drivers/infiniband/hw/mlx4/qp.c qp->sq.tail = 0; sq 3501 drivers/infiniband/hw/mlx4/qp.c wr->wr.num_sge > qp->sq.max_gs - (halign >> 4))) sq 3572 drivers/infiniband/hw/mlx4/qp.c spin_lock_irqsave(&qp->sq.lock, flags); sq 3587 drivers/infiniband/hw/mlx4/qp.c if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { sq 3593 drivers/infiniband/hw/mlx4/qp.c if (unlikely(wr->num_sge > qp->sq.max_gs)) { sq 3599 drivers/infiniband/hw/mlx4/qp.c ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); sq 3600 drivers/infiniband/hw/mlx4/qp.c qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; sq 3814 drivers/infiniband/hw/mlx4/qp.c (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh; sq 3828 drivers/infiniband/hw/mlx4/qp.c qp->sq.head += nreq; sq 3844 drivers/infiniband/hw/mlx4/qp.c spin_unlock_irqrestore(&qp->sq.lock, flags); sq 4095 drivers/infiniband/hw/mlx4/qp.c qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; sq 4096 drivers/infiniband/hw/mlx4/qp.c qp_attr->cap.max_send_sge = qp->sq.max_gs; sq 339 drivers/infiniband/hw/mlx5/cq.c idx = tail & (qp->sq.wqe_cnt - 1); sq 343 drivers/infiniband/hw/mlx5/cq.c tail = qp->sq.w_list[idx].next; sq 345 drivers/infiniband/hw/mlx5/cq.c tail = qp->sq.w_list[idx].next; sq 346 drivers/infiniband/hw/mlx5/cq.c qp->sq.last_poll = tail; sq 394 drivers/infiniband/hw/mlx5/cq.c wq = (is_send) ? &qp->sq : &qp->rq; sq 497 drivers/infiniband/hw/mlx5/cq.c wq = &(*cur_qp)->sq; sq 525 drivers/infiniband/hw/mlx5/cq.c wq = &(*cur_qp)->sq; sq 616 drivers/infiniband/hw/mlx5/devx.c struct mlx5_ib_sq *sq = &raw_packet_qp->sq; sq 621 drivers/infiniband/hw/mlx5/devx.c sq->base.mqp.qpn) == obj_id || sq 625 drivers/infiniband/hw/mlx5/devx.c sq->tisn) == obj_id); sq 140 drivers/infiniband/hw/mlx5/ib_rep.c struct mlx5_ib_sq *sq, sq 155 drivers/infiniband/hw/mlx5/ib_rep.c sq->base.mqp.qpn); sq 24 drivers/infiniband/hw/mlx5/ib_rep.h struct mlx5_ib_sq *sq, sq 58 drivers/infiniband/hw/mlx5/ib_rep.h struct mlx5_ib_sq *sq, sq 4482 drivers/infiniband/hw/mlx5/main.c spin_lock_irqsave(&mqp->sq.lock, flags_qp); sq 4483 drivers/infiniband/hw/mlx5/main.c if (mqp->sq.tail != mqp->sq.head) { sq 4496 drivers/infiniband/hw/mlx5/main.c spin_unlock_irqrestore(&mqp->sq.lock, flags_qp); sq 376 drivers/infiniband/hw/mlx5/mlx5_ib.h struct mlx5_ib_wq *sq; sq 385 drivers/infiniband/hw/mlx5/mlx5_ib.h struct mlx5_ib_sq sq; sq 415 drivers/infiniband/hw/mlx5/mlx5_ib.h struct mlx5_ib_wq sq; sq 1168 drivers/infiniband/hw/mlx5/odp.c bool sq = pfault->type & MLX5_PFAULT_REQUESTOR; sq 1199 drivers/infiniband/hw/mlx5/odp.c if (qp && sq) { sq 1206 drivers/infiniband/hw/mlx5/odp.c } else if (qp && !sq) { sq 1228 drivers/infiniband/hw/mlx5/odp.c &total_wqe_bytes, !sq); sq 171 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_wq *wq = &qp->sq; sq 501 drivers/infiniband/hw/mlx5/qp.c qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB; sq 502 drivers/infiniband/hw/mlx5/qp.c if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { sq 505 drivers/infiniband/hw/mlx5/qp.c qp->sq.wqe_cnt, sq 509 drivers/infiniband/hw/mlx5/qp.c qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); sq 510 drivers/infiniband/hw/mlx5/qp.c qp->sq.max_gs = get_send_sge(attr, wqe_size); sq 511 drivers/infiniband/hw/mlx5/qp.c if (qp->sq.max_gs < attr->cap.max_send_sge) sq 514 drivers/infiniband/hw/mlx5/qp.c attr->cap.max_send_sge = qp->sq.max_gs; sq 515 drivers/infiniband/hw/mlx5/qp.c qp->sq.max_post = wq_size / wqe_size; sq 516 drivers/infiniband/hw/mlx5/qp.c attr->cap.max_send_wr = qp->sq.max_post; sq 527 drivers/infiniband/hw/mlx5/qp.c int desc_sz = 1 << qp->sq.wqe_shift; sq 541 drivers/infiniband/hw/mlx5/qp.c qp->sq.wqe_cnt = ucmd->sq_wqe_count; sq 543 drivers/infiniband/hw/mlx5/qp.c if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) { sq 545 drivers/infiniband/hw/mlx5/qp.c qp->sq.wqe_cnt, sq 553 drivers/infiniband/hw/mlx5/qp.c qp->raw_packet_qp.sq.ubuffer.buf_size = qp->sq.wqe_cnt << 6; sq 556 drivers/infiniband/hw/mlx5/qp.c (qp->sq.wqe_cnt << 6); sq 910 drivers/infiniband/hw/mlx5/qp.c qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); sq 911 drivers/infiniband/hw/mlx5/qp.c qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; sq 1019 drivers/infiniband/hw/mlx5/qp.c static void *get_sq_edge(struct mlx5_ib_wq *sq, u32 idx) sq 1024 drivers/infiniband/hw/mlx5/qp.c (&sq->fbc, sq 1025 drivers/infiniband/hw/mlx5/qp.c mlx5_frag_buf_get_idx_last_contig_stride(&sq->fbc, idx)); sq 1065 drivers/infiniband/hw/mlx5/qp.c qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; sq 1079 drivers/infiniband/hw/mlx5/qp.c if (qp->sq.wqe_cnt) { sq 1080 drivers/infiniband/hw/mlx5/qp.c int sq_strides_offset = (qp->sq.offset & (PAGE_SIZE - 1)) / sq 1083 drivers/infiniband/hw/mlx5/qp.c (qp->sq.offset / PAGE_SIZE), sq 1085 drivers/infiniband/hw/mlx5/qp.c ilog2(qp->sq.wqe_cnt), sq 1086 drivers/infiniband/hw/mlx5/qp.c sq_strides_offset, &qp->sq.fbc); sq 1088 drivers/infiniband/hw/mlx5/qp.c qp->sq.cur_edge = get_sq_edge(&qp->sq, 0); sq 1122 drivers/infiniband/hw/mlx5/qp.c qp->sq.wrid = kvmalloc_array(qp->sq.wqe_cnt, sq 1123 drivers/infiniband/hw/mlx5/qp.c sizeof(*qp->sq.wrid), GFP_KERNEL); sq 1124 drivers/infiniband/hw/mlx5/qp.c qp->sq.wr_data = kvmalloc_array(qp->sq.wqe_cnt, sq 1125 drivers/infiniband/hw/mlx5/qp.c sizeof(*qp->sq.wr_data), GFP_KERNEL); sq 1128 drivers/infiniband/hw/mlx5/qp.c qp->sq.w_list = kvmalloc_array(qp->sq.wqe_cnt, sq 1129 drivers/infiniband/hw/mlx5/qp.c sizeof(*qp->sq.w_list), GFP_KERNEL); sq 1130 drivers/infiniband/hw/mlx5/qp.c qp->sq.wqe_head = kvmalloc_array(qp->sq.wqe_cnt, sq 1131 drivers/infiniband/hw/mlx5/qp.c sizeof(*qp->sq.wqe_head), GFP_KERNEL); sq 1133 drivers/infiniband/hw/mlx5/qp.c if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid || sq 1134 drivers/infiniband/hw/mlx5/qp.c !qp->sq.w_list || !qp->sq.wqe_head) { sq 1143 drivers/infiniband/hw/mlx5/qp.c kvfree(qp->sq.wqe_head); sq 1144 drivers/infiniband/hw/mlx5/qp.c kvfree(qp->sq.w_list); sq 1145 drivers/infiniband/hw/mlx5/qp.c kvfree(qp->sq.wrid); sq 1146 drivers/infiniband/hw/mlx5/qp.c kvfree(qp->sq.wr_data); sq 1160 drivers/infiniband/hw/mlx5/qp.c kvfree(qp->sq.wqe_head); sq 1161 drivers/infiniband/hw/mlx5/qp.c kvfree(qp->sq.w_list); sq 1162 drivers/infiniband/hw/mlx5/qp.c kvfree(qp->sq.wrid); sq 1163 drivers/infiniband/hw/mlx5/qp.c kvfree(qp->sq.wr_data); sq 1192 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_sq *sq, u32 tdn, sq 1203 drivers/infiniband/hw/mlx5/qp.c return mlx5_core_create_tis(dev->mdev, in, sizeof(in), &sq->tisn); sq 1207 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_sq *sq, struct ib_pd *pd) sq 1209 drivers/infiniband/hw/mlx5/qp.c mlx5_cmd_destroy_tis(dev->mdev, sq->tisn, to_mpd(pd)->uid); sq 1212 drivers/infiniband/hw/mlx5/qp.c static void destroy_flow_rule_vport_sq(struct mlx5_ib_sq *sq) sq 1214 drivers/infiniband/hw/mlx5/qp.c if (sq->flow_rule) sq 1215 drivers/infiniband/hw/mlx5/qp.c mlx5_del_flow_rules(sq->flow_rule); sq 1216 drivers/infiniband/hw/mlx5/qp.c sq->flow_rule = NULL; sq 1221 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_sq *sq, void *qpin, sq 1224 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_ubuffer *ubuffer = &sq->ubuffer; sq 1238 drivers/infiniband/hw/mlx5/qp.c &sq->ubuffer.umem, &npages, &page_shift, &ncont, sq 1259 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(sqc, sqc, tis_num_0, sq->tisn); sq 1275 drivers/infiniband/hw/mlx5/qp.c mlx5_ib_populate_pas(dev, sq->ubuffer.umem, page_shift, pas, 0); sq 1277 drivers/infiniband/hw/mlx5/qp.c err = mlx5_core_create_sq_tracked(dev->mdev, in, inlen, &sq->base.mqp); sq 1287 drivers/infiniband/hw/mlx5/qp.c ib_umem_release(sq->ubuffer.umem); sq 1288 drivers/infiniband/hw/mlx5/qp.c sq->ubuffer.umem = NULL; sq 1294 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_sq *sq) sq 1296 drivers/infiniband/hw/mlx5/qp.c destroy_flow_rule_vport_sq(sq); sq 1297 drivers/infiniband/hw/mlx5/qp.c mlx5_core_destroy_sq_tracked(dev->mdev, &sq->base.mqp); sq 1298 drivers/infiniband/hw/mlx5/qp.c ib_umem_release(sq->ubuffer.umem); sq 1457 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_sq *sq = &raw_packet_qp->sq; sq 1466 drivers/infiniband/hw/mlx5/qp.c if (qp->sq.wqe_cnt) { sq 1467 drivers/infiniband/hw/mlx5/qp.c err = create_raw_packet_qp_tis(dev, qp, sq, tdn, pd); sq 1471 drivers/infiniband/hw/mlx5/qp.c err = create_raw_packet_qp_sq(dev, udata, sq, in, pd); sq 1476 drivers/infiniband/hw/mlx5/qp.c resp->tisn = sq->tisn; sq 1478 drivers/infiniband/hw/mlx5/qp.c resp->sqn = sq->base.mqp.qpn; sq 1482 drivers/infiniband/hw/mlx5/qp.c sq->base.container_mibqp = qp; sq 1483 drivers/infiniband/hw/mlx5/qp.c sq->base.mqp.event = mlx5_ib_qp_event; sq 1525 drivers/infiniband/hw/mlx5/qp.c qp->trans_qp.base.mqp.qpn = qp->sq.wqe_cnt ? sq->base.mqp.qpn : sq 1538 drivers/infiniband/hw/mlx5/qp.c if (!qp->sq.wqe_cnt) sq 1540 drivers/infiniband/hw/mlx5/qp.c destroy_raw_packet_qp_sq(dev, sq); sq 1542 drivers/infiniband/hw/mlx5/qp.c destroy_raw_packet_qp_tis(dev, sq, pd); sq 1551 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_sq *sq = &raw_packet_qp->sq; sq 1559 drivers/infiniband/hw/mlx5/qp.c if (qp->sq.wqe_cnt) { sq 1560 drivers/infiniband/hw/mlx5/qp.c destroy_raw_packet_qp_sq(dev, sq); sq 1561 drivers/infiniband/hw/mlx5/qp.c destroy_raw_packet_qp_tis(dev, sq, qp->ibqp.pd); sq 1568 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_sq *sq = &raw_packet_qp->sq; sq 1571 drivers/infiniband/hw/mlx5/qp.c sq->sq = &qp->sq; sq 1573 drivers/infiniband/hw/mlx5/qp.c sq->doorbell = &qp->db; sq 1967 drivers/infiniband/hw/mlx5/qp.c spin_lock_init(&qp->sq.lock); sq 2210 drivers/infiniband/hw/mlx5/qp.c if (qp->sq.wqe_cnt) { sq 2211 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt)); sq 2282 drivers/infiniband/hw/mlx5/qp.c qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr; sq 2854 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_sq *sq, u8 sl, sq 2873 drivers/infiniband/hw/mlx5/qp.c err = mlx5_core_modify_tis(dev, sq->tisn, in, inlen); sq 2881 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_sq *sq, u8 tx_affinity, sq 2900 drivers/infiniband/hw/mlx5/qp.c err = mlx5_core_modify_tis(dev, sq->tisn, in, inlen); sq 2977 drivers/infiniband/hw/mlx5/qp.c if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt) sq 2979 drivers/infiniband/hw/mlx5/qp.c &qp->raw_packet_qp.sq, sq 3192 drivers/infiniband/hw/mlx5/qp.c struct mlx5_core_dev *dev, struct mlx5_ib_sq *sq, int new_state, sq 3195 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_qp *ibqp = sq->base.container_mibqp; sq 3211 drivers/infiniband/hw/mlx5/qp.c MLX5_SET(modify_sq_in, in, sq_state, sq->state); sq 3243 drivers/infiniband/hw/mlx5/qp.c err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in, inlen); sq 3260 drivers/infiniband/hw/mlx5/qp.c sq->state = new_state; sq 3273 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_sq *sq = &raw_packet_qp->sq; sq 3275 drivers/infiniband/hw/mlx5/qp.c int modify_sq = !!qp->sq.wqe_cnt; sq 3298 drivers/infiniband/hw/mlx5/qp.c sq_state = sq->state; sq 3325 drivers/infiniband/hw/mlx5/qp.c err = modify_raw_packet_tx_affinity(dev->mdev, sq, sq 3332 drivers/infiniband/hw/mlx5/qp.c flow_rule = create_flow_rule_vport_sq(dev, sq, sq 3337 drivers/infiniband/hw/mlx5/qp.c err = modify_raw_packet_qp_sq(dev->mdev, sq, sq_state, sq 3346 drivers/infiniband/hw/mlx5/qp.c destroy_flow_rule_vport_sq(sq); sq 3347 drivers/infiniband/hw/mlx5/qp.c sq->flow_rule = flow_rule; sq 3723 drivers/infiniband/hw/mlx5/qp.c qp->sq.head = 0; sq 3724 drivers/infiniband/hw/mlx5/qp.c qp->sq.tail = 0; sq 3725 drivers/infiniband/hw/mlx5/qp.c qp->sq.cur_post = 0; sq 3726 drivers/infiniband/hw/mlx5/qp.c if (qp->sq.wqe_cnt) sq 3727 drivers/infiniband/hw/mlx5/qp.c qp->sq.cur_edge = get_sq_edge(&qp->sq, 0); sq 3728 drivers/infiniband/hw/mlx5/qp.c qp->sq.last_poll = 0; sq 4014 drivers/infiniband/hw/mlx5/qp.c static void _handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg, sq 4019 drivers/infiniband/hw/mlx5/qp.c idx = (sq->cur_post + (wqe_sz >> 2)) & (sq->wqe_cnt - 1); sq 4020 drivers/infiniband/hw/mlx5/qp.c *cur_edge = get_sq_edge(sq, idx); sq 4022 drivers/infiniband/hw/mlx5/qp.c *seg = mlx5_frag_buf_get_wqe(&sq->fbc, idx); sq 4032 drivers/infiniband/hw/mlx5/qp.c static inline void handle_post_send_edge(struct mlx5_ib_wq *sq, void **seg, sq 4038 drivers/infiniband/hw/mlx5/qp.c _handle_post_send_edge(sq, seg, wqe_sz, cur_edge); sq 4050 drivers/infiniband/hw/mlx5/qp.c static inline void memcpy_send_wqe(struct mlx5_ib_wq *sq, void **cur_edge, sq 4066 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(sq, seg, *wqe_sz, cur_edge); sq 4129 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, seg, *size, cur_edge); sq 4132 drivers/infiniband/hw/mlx5/qp.c memcpy_send_wqe(&qp->sq, cur_edge, seg, size, pdata, sq 4460 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, wqe, sq 4687 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, seg, *size, cur_edge); sq 4696 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, seg, *size, cur_edge); sq 4764 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, seg, *size, cur_edge); sq 4770 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, seg, *size, cur_edge); sq 4845 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, seg, *size, cur_edge); sq 4850 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, seg, *size, cur_edge); sq 4853 drivers/infiniband/hw/mlx5/qp.c memcpy_send_wqe(&qp->sq, cur_edge, seg, size, mr->descs, sq 4870 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, seg, *size, cur_edge); sq 4874 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, seg, *size, cur_edge); sq 4885 drivers/infiniband/hw/mlx5/qp.c p = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx); sq 4888 drivers/infiniband/hw/mlx5/qp.c idx = (idx + 1) & (qp->sq.wqe_cnt - 1); sq 4902 drivers/infiniband/hw/mlx5/qp.c if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) sq 4905 drivers/infiniband/hw/mlx5/qp.c *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); sq 4906 drivers/infiniband/hw/mlx5/qp.c *seg = mlx5_frag_buf_get_wqe(&qp->sq.fbc, *idx); sq 4916 drivers/infiniband/hw/mlx5/qp.c *cur_edge = qp->sq.cur_edge; sq 4939 drivers/infiniband/hw/mlx5/qp.c ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | sq 4946 drivers/infiniband/hw/mlx5/qp.c qp->sq.wrid[idx] = wr_id; sq 4947 drivers/infiniband/hw/mlx5/qp.c qp->sq.w_list[idx].opcode = mlx5_opcode; sq 4948 drivers/infiniband/hw/mlx5/qp.c qp->sq.wqe_head[idx] = qp->sq.head + nreq; sq 4949 drivers/infiniband/hw/mlx5/qp.c qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB); sq 4950 drivers/infiniband/hw/mlx5/qp.c qp->sq.w_list[idx].next = qp->sq.cur_post; sq 4956 drivers/infiniband/hw/mlx5/qp.c qp->sq.cur_edge = (unlikely(seg == cur_edge)) ? sq 4957 drivers/infiniband/hw/mlx5/qp.c get_sq_edge(&qp->sq, qp->sq.cur_post & sq 4958 drivers/infiniband/hw/mlx5/qp.c (qp->sq.wqe_cnt - 1)) : sq 5000 drivers/infiniband/hw/mlx5/qp.c spin_lock_irqsave(&qp->sq.lock, flags); sq 5011 drivers/infiniband/hw/mlx5/qp.c if (unlikely(num_sge > qp->sq.max_gs)) { sq 5068 drivers/infiniband/hw/mlx5/qp.c qp->sq.wr_data[idx] = IB_WR_LOCAL_INV; sq 5075 drivers/infiniband/hw/mlx5/qp.c qp->sq.wr_data[idx] = IB_WR_REG_MR; sq 5087 drivers/infiniband/hw/mlx5/qp.c qp->sq.wr_data[idx] = IB_WR_REG_MR_INTEGRITY; sq 5241 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, &seg, size, &cur_edge); sq 5248 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, &seg, size, &cur_edge); sq 5259 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, &seg, size, sq 5269 drivers/infiniband/hw/mlx5/qp.c qp->sq.wr_data[idx] = MLX5_IB_WR_UMR; sq 5276 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, &seg, size, &cur_edge); sq 5280 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, &seg, size, &cur_edge); sq 5296 drivers/infiniband/hw/mlx5/qp.c handle_post_send_edge(&qp->sq, &seg, size, sq 5318 drivers/infiniband/hw/mlx5/qp.c qp->sq.head += nreq; sq 5325 drivers/infiniband/hw/mlx5/qp.c qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post); sq 5339 drivers/infiniband/hw/mlx5/qp.c spin_unlock_irqrestore(&qp->sq.lock, flags); sq 5514 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_sq *sq, sq 5519 drivers/infiniband/hw/mlx5/qp.c err = mlx5_core_query_sq_state(dev->mdev, sq->base.mqp.qpn, sq_state); sq 5522 drivers/infiniband/hw/mlx5/qp.c sq->state = *sq_state; sq 5589 drivers/infiniband/hw/mlx5/qp.c qp->raw_packet_qp.sq.base.mqp.qpn, sq_state, sq 5605 drivers/infiniband/hw/mlx5/qp.c struct mlx5_ib_sq *sq = &raw_packet_qp->sq; sq 5611 drivers/infiniband/hw/mlx5/qp.c if (qp->sq.wqe_cnt) { sq 5612 drivers/infiniband/hw/mlx5/qp.c err = query_raw_packet_qp_sq_state(dev, sq, &sq_state); sq 5801 drivers/infiniband/hw/mlx5/qp.c qp_attr->cap.max_send_wr = qp->sq.max_post; sq 5802 drivers/infiniband/hw/mlx5/qp.c qp_attr->cap.max_send_sge = qp->sq.max_gs; sq 534 drivers/infiniband/hw/mthca/mthca_cq.c wq = &(*cur_qp)->sq; sq 517 drivers/infiniband/hw/mthca/mthca_provider.c qp->sq.db_index = ucmd.sq_db_index; sq 572 drivers/infiniband/hw/mthca/mthca_provider.c init_attr->cap.max_send_wr = qp->sq.max; sq 574 drivers/infiniband/hw/mthca/mthca_provider.c init_attr->cap.max_send_sge = qp->sq.max_gs; sq 593 drivers/infiniband/hw/mthca/mthca_provider.c to_mqp(qp)->sq.db_index); sq 276 drivers/infiniband/hw/mthca/mthca_provider.h struct mthca_wq sq; sq 221 drivers/infiniband/hw/mthca/mthca_qp.c (n << qp->sq.wqe_shift); sq 224 drivers/infiniband/hw/mthca/mthca_qp.c (n << qp->sq.wqe_shift)) >> sq 226 drivers/infiniband/hw/mthca/mthca_qp.c ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) & sq 505 drivers/infiniband/hw/mthca/mthca_qp.c qp_attr->cap.max_send_wr = qp->sq.max; sq 507 drivers/infiniband/hw/mthca/mthca_qp.c qp_attr->cap.max_send_sge = qp->sq.max_gs; sq 619 drivers/infiniband/hw/mthca/mthca_qp.c if (qp->sq.max) sq 620 drivers/infiniband/hw/mthca/mthca_qp.c qp_context->sq_size_stride = ilog2(qp->sq.max) << 3; sq 621 drivers/infiniband/hw/mthca/mthca_qp.c qp_context->sq_size_stride |= qp->sq.wqe_shift - 4; sq 739 drivers/infiniband/hw/mthca/mthca_qp.c qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index); sq 840 drivers/infiniband/hw/mthca/mthca_qp.c mthca_wq_reset(&qp->sq); sq 841 drivers/infiniband/hw/mthca/mthca_qp.c qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); sq 847 drivers/infiniband/hw/mthca/mthca_qp.c *qp->sq.db = 0; sq 870 drivers/infiniband/hw/mthca/mthca_qp.c spin_lock_irq(&qp->sq.lock); sq 874 drivers/infiniband/hw/mthca/mthca_qp.c spin_unlock_irq(&qp->sq.lock); sq 968 drivers/infiniband/hw/mthca/mthca_qp.c 1 << qp->sq.wqe_shift)); sq 972 drivers/infiniband/hw/mthca/mthca_qp.c qp->sq.max_gs = min_t(int, dev->limits.max_sg, sq 1005 drivers/infiniband/hw/mthca/mthca_qp.c size = qp->sq.max_gs * sizeof (struct mthca_data_seg); sq 1045 drivers/infiniband/hw/mthca/mthca_qp.c for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size; sq 1046 drivers/infiniband/hw/mthca/mthca_qp.c qp->sq.wqe_shift++) sq 1050 drivers/infiniband/hw/mthca/mthca_qp.c 1 << qp->sq.wqe_shift); sq 1061 drivers/infiniband/hw/mthca/mthca_qp.c (qp->sq.max << qp->sq.wqe_shift)); sq 1063 drivers/infiniband/hw/mthca/mthca_qp.c qp->wrid = kmalloc_array(qp->rq.max + qp->sq.max, sizeof(u64), sq 1084 drivers/infiniband/hw/mthca/mthca_qp.c (qp->sq.max << qp->sq.wqe_shift)), sq 1139 drivers/infiniband/hw/mthca/mthca_qp.c qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ, sq 1140 drivers/infiniband/hw/mthca/mthca_qp.c qp->qpn, &qp->sq.db); sq 1141 drivers/infiniband/hw/mthca/mthca_qp.c if (qp->sq.db_index < 0) { sq 1154 drivers/infiniband/hw/mthca/mthca_qp.c mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index); sq 1178 drivers/infiniband/hw/mthca/mthca_qp.c mthca_wq_reset(&qp->sq); sq 1181 drivers/infiniband/hw/mthca/mthca_qp.c spin_lock_init(&qp->sq.lock); sq 1228 drivers/infiniband/hw/mthca/mthca_qp.c for (i = 0; i < qp->sq.max; ++i) { sq 1230 drivers/infiniband/hw/mthca/mthca_qp.c next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) << sq 1231 drivers/infiniband/hw/mthca/mthca_qp.c qp->sq.wqe_shift) + sq 1243 drivers/infiniband/hw/mthca/mthca_qp.c qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); sq 1272 drivers/infiniband/hw/mthca/mthca_qp.c qp->sq.max = cap->max_send_wr ? sq 1276 drivers/infiniband/hw/mthca/mthca_qp.c qp->sq.max = cap->max_send_wr; sq 1280 drivers/infiniband/hw/mthca/mthca_qp.c qp->sq.max_gs = max_t(int, cap->max_send_sge, sq 1382 drivers/infiniband/hw/mthca/mthca_qp.c sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE; sq 1647 drivers/infiniband/hw/mthca/mthca_qp.c spin_lock_irqsave(&qp->sq.lock, flags); sq 1651 drivers/infiniband/hw/mthca/mthca_qp.c ind = qp->sq.next_ind; sq 1654 drivers/infiniband/hw/mthca/mthca_qp.c if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { sq 1657 drivers/infiniband/hw/mthca/mthca_qp.c qp->sq.head, qp->sq.tail, sq 1658 drivers/infiniband/hw/mthca/mthca_qp.c qp->sq.max, nreq); sq 1665 drivers/infiniband/hw/mthca/mthca_qp.c prev_wqe = qp->sq.last; sq 1666 drivers/infiniband/hw/mthca/mthca_qp.c qp->sq.last = wqe; sq 1750 drivers/infiniband/hw/mthca/mthca_qp.c if (wr->num_sge > qp->sq.max_gs) { sq 1782 drivers/infiniband/hw/mthca/mthca_qp.c cpu_to_be32(((ind << qp->sq.wqe_shift) + sq 1799 drivers/infiniband/hw/mthca/mthca_qp.c if (unlikely(ind >= qp->sq.max)) sq 1800 drivers/infiniband/hw/mthca/mthca_qp.c ind -= qp->sq.max; sq 1807 drivers/infiniband/hw/mthca/mthca_qp.c mthca_write64(((qp->sq.next_ind << qp->sq.wqe_shift) + sq 1814 drivers/infiniband/hw/mthca/mthca_qp.c qp->sq.next_ind = ind; sq 1815 drivers/infiniband/hw/mthca/mthca_qp.c qp->sq.head += nreq; sq 1817 drivers/infiniband/hw/mthca/mthca_qp.c spin_unlock_irqrestore(&qp->sq.lock, flags); sq 1951 drivers/infiniband/hw/mthca/mthca_qp.c spin_lock_irqsave(&qp->sq.lock, flags); sq 1955 drivers/infiniband/hw/mthca/mthca_qp.c ind = qp->sq.head & (qp->sq.max - 1); sq 1962 drivers/infiniband/hw/mthca/mthca_qp.c ((qp->sq.head & 0xffff) << 8) | f0 | op0; sq 1964 drivers/infiniband/hw/mthca/mthca_qp.c qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB; sq 1971 drivers/infiniband/hw/mthca/mthca_qp.c *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); sq 1984 drivers/infiniband/hw/mthca/mthca_qp.c if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { sq 1987 drivers/infiniband/hw/mthca/mthca_qp.c qp->sq.head, qp->sq.tail, sq 1988 drivers/infiniband/hw/mthca/mthca_qp.c qp->sq.max, nreq); sq 1995 drivers/infiniband/hw/mthca/mthca_qp.c prev_wqe = qp->sq.last; sq 1996 drivers/infiniband/hw/mthca/mthca_qp.c qp->sq.last = wqe; sq 2080 drivers/infiniband/hw/mthca/mthca_qp.c if (wr->num_sge > qp->sq.max_gs) { sq 2112 drivers/infiniband/hw/mthca/mthca_qp.c cpu_to_be32(((ind << qp->sq.wqe_shift) + sq 2129 drivers/infiniband/hw/mthca/mthca_qp.c if (unlikely(ind >= qp->sq.max)) sq 2130 drivers/infiniband/hw/mthca/mthca_qp.c ind -= qp->sq.max; sq 2135 drivers/infiniband/hw/mthca/mthca_qp.c dbhi = (nreq << 24) | ((qp->sq.head & 0xffff) << 8) | f0 | op0; sq 2137 drivers/infiniband/hw/mthca/mthca_qp.c qp->sq.head += nreq; sq 2144 drivers/infiniband/hw/mthca/mthca_qp.c *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff); sq 2156 drivers/infiniband/hw/mthca/mthca_qp.c spin_unlock_irqrestore(&qp->sq.lock, flags); sq 158 drivers/infiniband/hw/ocrdma/ocrdma.h struct ocrdma_queue_info sq; sq 394 drivers/infiniband/hw/ocrdma/ocrdma.h struct ocrdma_qp_hwq_info sq; sq 138 drivers/infiniband/hw/ocrdma/ocrdma_hw.c return dev->mq.sq.va + (dev->mq.sq.head * sizeof(struct ocrdma_mqe)); sq 143 drivers/infiniband/hw/ocrdma/ocrdma_hw.c dev->mq.sq.head = (dev->mq.sq.head + 1) & (OCRDMA_MQ_LEN - 1); sq 148 drivers/infiniband/hw/ocrdma/ocrdma_hw.c return dev->mq.sq.va + (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe)); sq 325 drivers/infiniband/hw/ocrdma/ocrdma_hw.c val |= dev->mq.sq.id & OCRDMA_MQ_ID_MASK; sq 624 drivers/infiniband/hw/ocrdma/ocrdma_hw.c status = ocrdma_alloc_q(dev, &dev->mq.sq, OCRDMA_MQ_LEN, sq 628 drivers/infiniband/hw/ocrdma/ocrdma_hw.c status = ocrdma_mbx_create_mq(dev, &dev->mq.sq, &dev->mq.cq); sq 635 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ocrdma_free_q(dev, &dev->mq.sq); sq 650 drivers/infiniband/hw/ocrdma/ocrdma_hw.c mbxq = &dev->mq.sq; sq 903 drivers/infiniband/hw/ocrdma/ocrdma_hw.c struct ocrdma_cq *cq, bool sq) sq 908 drivers/infiniband/hw/ocrdma/ocrdma_hw.c struct list_head *head = sq?(&cq->sq_head):(&cq->rq_head); sq 911 drivers/infiniband/hw/ocrdma/ocrdma_hw.c if (sq) sq 1043 drivers/infiniband/hw/ocrdma/ocrdma_hw.c dev->mqe_ctx.tag = dev->mq.sq.head; sq 1046 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cmd->hdr.tag_lo = dev->mq.sq.head; sq 2128 drivers/infiniband/hw/ocrdma/ocrdma_hw.c qp->sq.head = 0; sq 2129 drivers/infiniband/hw/ocrdma/ocrdma_hw.c qp->sq.tail = 0; sq 2205 drivers/infiniband/hw/ocrdma/ocrdma_hw.c qp->sq.max_cnt = max_wqe_allocated; sq 2208 drivers/infiniband/hw/ocrdma/ocrdma_hw.c qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); sq 2209 drivers/infiniband/hw/ocrdma/ocrdma_hw.c if (!qp->sq.va) sq 2211 drivers/infiniband/hw/ocrdma/ocrdma_hw.c qp->sq.len = len; sq 2212 drivers/infiniband/hw/ocrdma/ocrdma_hw.c qp->sq.pa = pa; sq 2213 drivers/infiniband/hw/ocrdma/ocrdma_hw.c qp->sq.entry_size = dev->attr.wqe_size; sq 2227 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cmd->max_wqe_rqe |= (ilog2(qp->sq.max_cnt) << sq 2340 drivers/infiniband/hw/ocrdma/ocrdma_hw.c qp->sq.dbid = rsp->sq_rq_id >> OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT; sq 2358 drivers/infiniband/hw/ocrdma/ocrdma_hw.c qp->sq.max_cnt = max_wqe_allocated; sq 2359 drivers/infiniband/hw/ocrdma/ocrdma_hw.c qp->sq.max_wqe_idx = max_wqe_allocated - 1; sq 2459 drivers/infiniband/hw/ocrdma/ocrdma_hw.c dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa); sq 2734 drivers/infiniband/hw/ocrdma/ocrdma_hw.c if (qp->sq.va) sq 2735 drivers/infiniband/hw/ocrdma/ocrdma_hw.c dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa); sq 1198 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c uresp.sq_dbid = qp->sq.dbid; sq 1200 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c uresp.sq_page_size = PAGE_ALIGN(qp->sq.len); sq 1201 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c uresp.sq_page_addr[0] = virt_to_phys(qp->sq.va); sq 1202 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c uresp.num_wqe_allocated = qp->sq.max_cnt; sq 1266 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c kcalloc(qp->sq.max_cnt, sizeof(*(qp->wqe_wr_id_tbl)), sq 1290 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c qp->sq.max_sges = attrs->cap.max_send_sge; sq 1497 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1; sq 1499 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c qp_attr->cap.max_send_sge = qp->sq.max_sges; sq 1571 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c return (qp->sq.tail == qp->sq.head); sq 1638 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c ocrdma_hwq_inc_tail(&qp->sq); sq 1732 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa, sq 1733 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c PAGE_ALIGN(qp->sq.len)); sq 2096 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT); sq 2124 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c if (ocrdma_hwq_free_cnt(&qp->sq) == 0 || sq 2125 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c wr->num_sge > qp->sq.max_sges) { sq 2130 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c hdr = ocrdma_hwq_head(&qp->sq); sq 2187 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1; sq 2189 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0; sq 2190 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id; sq 2199 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c ocrdma_hwq_inc_head(&qp->sq); sq 2422 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx); sq 2520 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c ocrdma_update_wc(qp, ibwc, qp->sq.tail); sq 2521 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c ocrdma_hwq_inc_tail(&qp->sq); sq 2576 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c int tail = qp->sq.tail; sq 2589 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx; sq 2593 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c ocrdma_hwq_inc_tail(&qp->sq); sq 2836 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c ocrdma_update_wc(qp, ibwc, qp->sq.tail); sq 2837 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c ocrdma_hwq_inc_tail(&qp->sq); sq 395 drivers/infiniband/hw/qedr/qedr.h struct qedr_qp_hwq_info sq; sq 81 drivers/infiniband/hw/qedr/qedr_roce_cm.c dev->gsi_sqcq, dev->gsi_rqcq, qp->sq.gsi_cons, sq 89 drivers/infiniband/hw/qedr/qedr_roce_cm.c qedr_inc_sw_gsi_cons(&qp->sq); sq 341 drivers/infiniband/hw/qedr/qedr_roce_cm.c qp->sq.max_wr = attrs->cap.max_send_wr; sq 347 drivers/infiniband/hw/qedr/qedr_roce_cm.c qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id), sq 586 drivers/infiniband/hw/qedr/qedr_roce_cm.c qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id; sq 587 drivers/infiniband/hw/qedr/qedr_roce_cm.c qedr_inc_sw_prod(&qp->sq); sq 711 drivers/infiniband/hw/qedr/qedr_roce_cm.c while (i < num_entries && qp->sq.cons != qp->sq.gsi_cons) { sq 715 drivers/infiniband/hw/qedr/qedr_roce_cm.c wc[i].wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id; sq 719 drivers/infiniband/hw/qedr/qedr_roce_cm.c qedr_inc_sw_cons(&qp->sq); sq 727 drivers/infiniband/hw/qedr/qedr_roce_cm.c num_entries, i, qp->rq.cons, qp->rq.gsi_cons, qp->sq.cons, sq 728 drivers/infiniband/hw/qedr/qedr_roce_cm.c qp->sq.gsi_cons, qp->ibqp.qp_num); sq 1204 drivers/infiniband/hw/qedr/verbs.c qp->sq.max_sges = attrs->cap.max_send_sge; sq 1226 drivers/infiniband/hw/qedr/verbs.c qp->sq.max_sges, qp->sq_cq->icid); sq 1231 drivers/infiniband/hw/qedr/verbs.c qp->sq.db = dev->db_addr + sq 1233 drivers/infiniband/hw/qedr/verbs.c qp->sq.db_data.data.icid = qp->icid + 1; sq 1673 drivers/infiniband/hw/qedr/verbs.c qp->sq.db = dev->db_addr + sq 1675 drivers/infiniband/hw/qedr/verbs.c qp->sq.db_data.data.icid = qp->icid; sq 1701 drivers/infiniband/hw/qedr/verbs.c &qp->sq.pbl, NULL); sq 1706 drivers/infiniband/hw/qedr/verbs.c in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl); sq 1707 drivers/infiniband/hw/qedr/verbs.c in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl); sq 1768 drivers/infiniband/hw/qedr/verbs.c &qp->sq.pbl, &ext_pbl); sq 1801 drivers/infiniband/hw/qedr/verbs.c dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl); sq 1833 drivers/infiniband/hw/qedr/verbs.c qp->sq.max_wr = min_t(u32, attrs->cap.max_send_wr * dev->wq_multiplier, sq 1836 drivers/infiniband/hw/qedr/verbs.c qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id), sq 2014 drivers/infiniband/hw/qedr/verbs.c qedr_reset_qp_hwq_info(&qp->sq); sq 2084 drivers/infiniband/hw/qedr/verbs.c (qp->sq.prod != qp->sq.cons)) { sq 2087 drivers/infiniband/hw/qedr/verbs.c qp->rq.prod, qp->rq.cons, qp->sq.prod, sq 2088 drivers/infiniband/hw/qedr/verbs.c qp->sq.cons); sq 2404 drivers/infiniband/hw/qedr/verbs.c qp_attr->cap.max_send_wr = qp->sq.max_wr; sq 2406 drivers/infiniband/hw/qedr/verbs.c qp_attr->cap.max_send_sge = qp->sq.max_sges; sq 2961 drivers/infiniband/hw/qedr/verbs.c wqe = (char *)qed_chain_produce(&qp->sq.pbl); sq 3019 drivers/infiniband/hw/qedr/verbs.c struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl); sq 3082 drivers/infiniband/hw/qedr/verbs.c fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl); sq 3108 drivers/infiniband/hw/qedr/verbs.c qp->wqe_wr_id[qp->sq.prod].mr = mr; sq 3146 drivers/infiniband/hw/qedr/verbs.c err_wr = wr->num_sge > qp->sq.max_sges; sq 3147 drivers/infiniband/hw/qedr/verbs.c wq_is_full = qedr_wq_is_full(&qp->sq); sq 3148 drivers/infiniband/hw/qedr/verbs.c pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) < sq 3201 drivers/infiniband/hw/qedr/verbs.c wqe = qed_chain_produce(&qp->sq.pbl); sq 3202 drivers/infiniband/hw/qedr/verbs.c qp->wqe_wr_id[qp->sq.prod].signaled = sq 3214 drivers/infiniband/hw/qedr/verbs.c qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode); sq 3226 drivers/infiniband/hw/qedr/verbs.c swqe2 = qed_chain_produce(&qp->sq.pbl); sq 3232 drivers/infiniband/hw/qedr/verbs.c qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size; sq 3234 drivers/infiniband/hw/qedr/verbs.c qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length; sq 3241 drivers/infiniband/hw/qedr/verbs.c swqe2 = qed_chain_produce(&qp->sq.pbl); sq 3245 drivers/infiniband/hw/qedr/verbs.c qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size; sq 3247 drivers/infiniband/hw/qedr/verbs.c qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length; sq 3252 drivers/infiniband/hw/qedr/verbs.c swqe2 = qed_chain_produce(&qp->sq.pbl); sq 3258 drivers/infiniband/hw/qedr/verbs.c qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size; sq 3260 drivers/infiniband/hw/qedr/verbs.c qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length; sq 3274 drivers/infiniband/hw/qedr/verbs.c rwqe2 = qed_chain_produce(&qp->sq.pbl); sq 3278 drivers/infiniband/hw/qedr/verbs.c qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size; sq 3280 drivers/infiniband/hw/qedr/verbs.c qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length; sq 3287 drivers/infiniband/hw/qedr/verbs.c rwqe2 = qed_chain_produce(&qp->sq.pbl); sq 3291 drivers/infiniband/hw/qedr/verbs.c qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size; sq 3293 drivers/infiniband/hw/qedr/verbs.c qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length; sq 3304 drivers/infiniband/hw/qedr/verbs.c rwqe2 = qed_chain_produce(&qp->sq.pbl); sq 3308 drivers/infiniband/hw/qedr/verbs.c qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size; sq 3310 drivers/infiniband/hw/qedr/verbs.c qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length; sq 3318 drivers/infiniband/hw/qedr/verbs.c awqe2 = qed_chain_produce(&qp->sq.pbl); sq 3322 drivers/infiniband/hw/qedr/verbs.c awqe3 = qed_chain_produce(&qp->sq.pbl); sq 3338 drivers/infiniband/hw/qedr/verbs.c qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size; sq 3348 drivers/infiniband/hw/qedr/verbs.c qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size; sq 3364 drivers/infiniband/hw/qedr/verbs.c qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size; sq 3380 drivers/infiniband/hw/qedr/verbs.c value = le16_to_cpu(qp->sq.db_data.data.value); sq 3381 drivers/infiniband/hw/qedr/verbs.c qed_chain_set_prod(&qp->sq.pbl, value, wqe); sq 3425 drivers/infiniband/hw/qedr/verbs.c qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id; sq 3427 drivers/infiniband/hw/qedr/verbs.c qedr_inc_sw_prod(&qp->sq); sq 3429 drivers/infiniband/hw/qedr/verbs.c qp->sq.db_data.data.value++; sq 3448 drivers/infiniband/hw/qedr/verbs.c writel(qp->sq.db_data.raw, qp->sq.db); sq 3693 drivers/infiniband/hw/qedr/verbs.c if (qp->wqe_wr_id[qp->sq.cons].opcode == IB_WC_REG_MR) sq 3694 drivers/infiniband/hw/qedr/verbs.c qp->wqe_wr_id[qp->sq.cons].mr->info.completed++; sq 3704 drivers/infiniband/hw/qedr/verbs.c while (num_entries && qp->sq.wqe_cons != hw_cons) { sq 3705 drivers/infiniband/hw/qedr/verbs.c if (!qp->wqe_wr_id[qp->sq.cons].signaled && !force) { sq 3718 drivers/infiniband/hw/qedr/verbs.c wc->wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id; sq 3719 drivers/infiniband/hw/qedr/verbs.c wc->opcode = qp->wqe_wr_id[qp->sq.cons].opcode; sq 3723 drivers/infiniband/hw/qedr/verbs.c wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len; sq 3730 drivers/infiniband/hw/qedr/verbs.c qp->wqe_wr_id[qp->sq.cons].mr->info.completed++; sq 3734 drivers/infiniband/hw/qedr/verbs.c wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len; sq 3744 drivers/infiniband/hw/qedr/verbs.c while (qp->wqe_wr_id[qp->sq.cons].wqe_size--) sq 3745 drivers/infiniband/hw/qedr/verbs.c qed_chain_consume(&qp->sq.pbl); sq 3746 drivers/infiniband/hw/qedr/verbs.c qedr_inc_sw_cons(&qp->sq); sq 4048 drivers/infiniband/hw/qedr/verbs.c if (le16_to_cpu(req->sq_cons) == qp->sq.wqe_cons) { sq 185 drivers/infiniband/hw/vmw_pvrdma/pvrdma.h struct pvrdma_wq sq; sq 121 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c if (qp->sq.ring) { sq 122 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c atomic_set(&qp->sq.ring->cons_head, 0); sq 123 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c atomic_set(&qp->sq.ring->prod_tail, 0); sq 162 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c qp->sq.wqe_cnt = roundup_pow_of_two(max(1U, req_cap->max_send_wr)); sq 163 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c qp->sq.max_sg = roundup_pow_of_two(max(1U, req_cap->max_send_sge)); sq 166 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c req_cap->max_send_wr = qp->sq.wqe_cnt; sq 167 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c req_cap->max_send_sge = qp->sq.max_sg; sq 169 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c qp->sq.wqe_size = roundup_pow_of_two(sizeof(struct pvrdma_sq_wqe_hdr) + sq 171 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c qp->sq.max_sg); sq 174 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c (qp->sq.wqe_cnt * qp->sq.wqe_size + PAGE_SIZE - 1) / sq 245 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c spin_lock_init(&qp->sq.lock); sq 306 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c qp->sq.offset = PVRDMA_QP_NUM_HEADER_PAGES * PAGE_SIZE; sq 335 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c qp->sq.ring = qp->pdir.pages[0]; sq 336 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c qp->rq.ring = is_srq ? NULL : &qp->sq.ring[1]; sq 584 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c qp->sq.offset + n * qp->sq.wqe_size); sq 637 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c spin_lock_irqsave(&qp->sq.lock, flags); sq 643 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c qp->sq.ring, qp->sq.wqe_cnt, &tail))) { sq 651 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c if (unlikely(wr->num_sge > qp->sq.max_sg || wr->num_sge < 0)) { sq 803 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail, sq 804 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c qp->sq.wqe_cnt); sq 812 drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c spin_unlock_irqrestore(&qp->sq.lock, flags); sq 171 drivers/infiniband/sw/rxe/rxe_comp.c wqe = queue_head(qp->sq.queue); sq 449 drivers/infiniband/sw/rxe/rxe_comp.c advance_consumer(qp->sq.queue); sq 452 drivers/infiniband/sw/rxe/rxe_comp.c advance_consumer(qp->sq.queue); sq 547 drivers/infiniband/sw/rxe/rxe_comp.c while ((wqe = queue_head(qp->sq.queue))) { sq 552 drivers/infiniband/sw/rxe/rxe_comp.c advance_consumer(qp->sq.queue); sq 241 drivers/infiniband/sw/rxe/rxe_qp.c qp->sq.max_wr = init->cap.max_send_wr; sq 242 drivers/infiniband/sw/rxe/rxe_qp.c qp->sq.max_sge = init->cap.max_send_sge; sq 243 drivers/infiniband/sw/rxe/rxe_qp.c qp->sq.max_inline = init->cap.max_inline_data; sq 246 drivers/infiniband/sw/rxe/rxe_qp.c qp->sq.max_sge * sizeof(struct ib_sge), sq 248 drivers/infiniband/sw/rxe/rxe_qp.c qp->sq.max_inline); sq 250 drivers/infiniband/sw/rxe/rxe_qp.c qp->sq.queue = rxe_queue_init(rxe, sq 251 drivers/infiniband/sw/rxe/rxe_qp.c &qp->sq.max_wr, sq 253 drivers/infiniband/sw/rxe/rxe_qp.c if (!qp->sq.queue) sq 257 drivers/infiniband/sw/rxe/rxe_qp.c qp->sq.queue->buf, qp->sq.queue->buf_size, sq 258 drivers/infiniband/sw/rxe/rxe_qp.c &qp->sq.queue->ip); sq 261 drivers/infiniband/sw/rxe/rxe_qp.c vfree(qp->sq.queue->buf); sq 262 drivers/infiniband/sw/rxe/rxe_qp.c kfree(qp->sq.queue); sq 266 drivers/infiniband/sw/rxe/rxe_qp.c qp->req.wqe_index = producer_index(qp->sq.queue); sq 271 drivers/infiniband/sw/rxe/rxe_qp.c spin_lock_init(&qp->sq.sq_lock); sq 374 drivers/infiniband/sw/rxe/rxe_qp.c rxe_queue_cleanup(qp->sq.queue); sq 394 drivers/infiniband/sw/rxe/rxe_qp.c init->cap.max_send_wr = qp->sq.max_wr; sq 395 drivers/infiniband/sw/rxe/rxe_qp.c init->cap.max_send_sge = qp->sq.max_sge; sq 396 drivers/infiniband/sw/rxe/rxe_qp.c init->cap.max_inline_data = qp->sq.max_inline; sq 506 drivers/infiniband/sw/rxe/rxe_qp.c if (qp->sq.queue) { sq 521 drivers/infiniband/sw/rxe/rxe_qp.c if (qp->sq.queue) { sq 524 drivers/infiniband/sw/rxe/rxe_qp.c rxe_queue_reset(qp->sq.queue); sq 548 drivers/infiniband/sw/rxe/rxe_qp.c if (qp->sq.queue) { sq 559 drivers/infiniband/sw/rxe/rxe_qp.c if (qp->sq.queue) { sq 753 drivers/infiniband/sw/rxe/rxe_qp.c attr->cap.max_send_wr = qp->sq.max_wr; sq 754 drivers/infiniband/sw/rxe/rxe_qp.c attr->cap.max_send_sge = qp->sq.max_sge; sq 755 drivers/infiniband/sw/rxe/rxe_qp.c attr->cap.max_inline_data = qp->sq.max_inline; sq 798 drivers/infiniband/sw/rxe/rxe_qp.c if (qp->sq.queue) { sq 811 drivers/infiniband/sw/rxe/rxe_qp.c if (qp->sq.queue) sq 812 drivers/infiniband/sw/rxe/rxe_qp.c rxe_queue_cleanup(qp->sq.queue); sq 76 drivers/infiniband/sw/rxe/rxe_req.c qp->req.wqe_index = consumer_index(qp->sq.queue); sq 80 drivers/infiniband/sw/rxe/rxe_req.c for (wqe_index = consumer_index(qp->sq.queue); sq 81 drivers/infiniband/sw/rxe/rxe_req.c wqe_index != producer_index(qp->sq.queue); sq 82 drivers/infiniband/sw/rxe/rxe_req.c wqe_index = next_index(qp->sq.queue, wqe_index)) { sq 83 drivers/infiniband/sw/rxe/rxe_req.c wqe = addr_from_index(qp->sq.queue, wqe_index); sq 134 drivers/infiniband/sw/rxe/rxe_req.c struct rxe_send_wqe *wqe = queue_head(qp->sq.queue); sq 151 drivers/infiniband/sw/rxe/rxe_req.c consumer_index(qp->sq.queue)) || sq 174 drivers/infiniband/sw/rxe/rxe_req.c if (qp->req.wqe_index == producer_index(qp->sq.queue)) sq 177 drivers/infiniband/sw/rxe/rxe_req.c wqe = addr_from_index(qp->sq.queue, qp->req.wqe_index); sq 185 drivers/infiniband/sw/rxe/rxe_req.c (qp->req.wqe_index != consumer_index(qp->sq.queue)))) { sq 580 drivers/infiniband/sw/rxe/rxe_req.c qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index); sq 610 drivers/infiniband/sw/rxe/rxe_req.c qp->req.wqe_index = consumer_index(qp->sq.queue); sq 661 drivers/infiniband/sw/rxe/rxe_req.c qp->req.wqe_index = next_index(qp->sq.queue, sq 706 drivers/infiniband/sw/rxe/rxe_req.c qp->req.wqe_index = next_index(qp->sq.queue, sq 507 drivers/infiniband/sw/rxe/rxe_verbs.c struct rxe_sq *sq = &qp->sq; sq 509 drivers/infiniband/sw/rxe/rxe_verbs.c if (unlikely(num_sge > sq->max_sge)) sq 521 drivers/infiniband/sw/rxe/rxe_verbs.c (length > sq->max_inline))) sq 638 drivers/infiniband/sw/rxe/rxe_verbs.c struct rxe_sq *sq = &qp->sq; sq 646 drivers/infiniband/sw/rxe/rxe_verbs.c spin_lock_irqsave(&qp->sq.sq_lock, flags); sq 648 drivers/infiniband/sw/rxe/rxe_verbs.c if (unlikely(queue_full(sq->queue))) { sq 653 drivers/infiniband/sw/rxe/rxe_verbs.c send_wqe = producer_addr(sq->queue); sq 665 drivers/infiniband/sw/rxe/rxe_verbs.c advance_producer(sq->queue); sq 666 drivers/infiniband/sw/rxe/rxe_verbs.c spin_unlock_irqrestore(&qp->sq.sq_lock, flags); sq 671 drivers/infiniband/sw/rxe/rxe_verbs.c spin_unlock_irqrestore(&qp->sq.sq_lock, flags); sq 250 drivers/infiniband/sw/rxe/rxe_verbs.h struct rxe_sq sq; sq 288 drivers/iommu/intel_irq_remapping.c unsigned int sq, unsigned int sid) sq 293 drivers/iommu/intel_irq_remapping.c irte->sq = sq; sq 1278 drivers/iommu/intel_irq_remapping.c irte->sid, irte->sq, irte->svt); sq 2289 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c const struct v4l2_rect *sq = &tpg->square; sq 2331 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c if (tpg->show_square && frame_line >= sq->top && sq 2332 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c frame_line < sq->top + sq->height && sq 2333 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c sq->left < c->left + c->width && sq 2334 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c sq->left + sq->width >= c->left) { sq 2335 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c unsigned left = sq->left; sq 2336 drivers/media/common/v4l2-tpg/v4l2-tpg-core.c unsigned width = sq->width; sq 294 drivers/net/ethernet/amazon/ena/ena_admin_defs.h struct ena_admin_sq sq; sq 111 drivers/net/ethernet/amazon/ena/ena_com.c struct ena_com_admin_sq *sq = &queue->sq; sq 114 drivers/net/ethernet/amazon/ena/ena_com.c sq->entries = dma_alloc_coherent(queue->q_dmadev, size, &sq->dma_addr, sq 117 drivers/net/ethernet/amazon/ena/ena_com.c if (!sq->entries) { sq 122 drivers/net/ethernet/amazon/ena/ena_com.c sq->head = 0; sq 123 drivers/net/ethernet/amazon/ena/ena_com.c sq->tail = 0; sq 124 drivers/net/ethernet/amazon/ena/ena_com.c sq->phase = 1; sq 126 drivers/net/ethernet/amazon/ena/ena_com.c sq->db_addr = NULL; sq 240 drivers/net/ethernet/amazon/ena/ena_com.c tail_masked = admin_queue->sq.tail & queue_size_mask; sq 252 drivers/net/ethernet/amazon/ena/ena_com.c cmd->aq_common_descriptor.flags |= admin_queue->sq.phase & sq 269 drivers/net/ethernet/amazon/ena/ena_com.c memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes); sq 274 drivers/net/ethernet/amazon/ena/ena_com.c admin_queue->sq.tail++; sq 277 drivers/net/ethernet/amazon/ena/ena_com.c if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0)) sq 278 drivers/net/ethernet/amazon/ena/ena_com.c admin_queue->sq.phase = !admin_queue->sq.phase; sq 280 drivers/net/ethernet/amazon/ena/ena_com.c writel(admin_queue->sq.tail, admin_queue->sq.db_addr); sq 517 drivers/net/ethernet/amazon/ena/ena_com.c admin_queue->sq.head += comp_num; sq 892 drivers/net/ethernet/amazon/ena/ena_com.c destroy_cmd.sq.sq_identity |= (direction << sq 896 drivers/net/ethernet/amazon/ena/ena_com.c destroy_cmd.sq.sq_idx = io_sq->idx; sq 1635 drivers/net/ethernet/amazon/ena/ena_com.c struct ena_com_admin_sq *sq = &admin_queue->sq; sq 1643 drivers/net/ethernet/amazon/ena/ena_com.c if (sq->entries) sq 1644 drivers/net/ethernet/amazon/ena/ena_com.c dma_free_coherent(ena_dev->dmadev, size, sq->entries, sq 1645 drivers/net/ethernet/amazon/ena/ena_com.c sq->dma_addr); sq 1646 drivers/net/ethernet/amazon/ena/ena_com.c sq->entries = NULL; sq 1776 drivers/net/ethernet/amazon/ena/ena_com.c admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + sq 1779 drivers/net/ethernet/amazon/ena/ena_com.c addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr); sq 1780 drivers/net/ethernet/amazon/ena/ena_com.c addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr); sq 246 drivers/net/ethernet/amazon/ena/ena_com.h struct ena_com_admin_sq sq; sq 589 drivers/net/ethernet/cavium/thunder/nic.h struct sq_cfg_msg sq; sq 612 drivers/net/ethernet/cavium/thunder/nic_main.c struct sq_cfg_msg *sq) sq 618 drivers/net/ethernet/cavium/thunder/nic_main.c u8 sq_idx = sq->sq_num; sq 622 drivers/net/ethernet/cavium/thunder/nic_main.c if (sq->sqs_mode) sq 638 drivers/net/ethernet/cavium/thunder/nic_main.c if (!sq->sqs_mode) { sq 1023 drivers/net/ethernet/cavium/thunder/nic_main.c (mbx.sq.qs_num << NIC_QS_ID_SHIFT) | sq 1024 drivers/net/ethernet/cavium/thunder/nic_main.c (mbx.sq.sq_num << NIC_Q_NUM_SHIFT); sq 1025 drivers/net/ethernet/cavium/thunder/nic_main.c nic_reg_write(nic, reg_addr, mbx.sq.cfg); sq 1026 drivers/net/ethernet/cavium/thunder/nic_main.c nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq); sq 317 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c *((*data)++) = ((u64 *)&nic->qs->sq[qidx].stats) sq 531 drivers/net/ethernet/cavium/thunder/nicvf_main.c struct cqe_rx_t *cqe_rx, struct snd_queue *sq, sq 593 drivers/net/ethernet/cavium/thunder/nicvf_main.c nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len); sq 666 drivers/net/ethernet/cavium/thunder/nicvf_main.c struct snd_queue *sq; sq 670 drivers/net/ethernet/cavium/thunder/nicvf_main.c sq = &nic->qs->sq[cqe_tx->sq_idx]; sq 672 drivers/net/ethernet/cavium/thunder/nicvf_main.c hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr); sq 681 drivers/net/ethernet/cavium/thunder/nicvf_main.c if (sq->is_xdp) { sq 682 drivers/net/ethernet/cavium/thunder/nicvf_main.c page = (struct page *)sq->xdp_page[cqe_tx->sqe_ptr]; sq 685 drivers/net/ethernet/cavium/thunder/nicvf_main.c nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr, sq 691 drivers/net/ethernet/cavium/thunder/nicvf_main.c sq->xdp_page[cqe_tx->sqe_ptr] = (u64)NULL; sq 696 drivers/net/ethernet/cavium/thunder/nicvf_main.c skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; sq 702 drivers/net/ethernet/cavium/thunder/nicvf_main.c (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2); sq 703 drivers/net/ethernet/cavium/thunder/nicvf_main.c nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2, sq 707 drivers/net/ethernet/cavium/thunder/nicvf_main.c nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr, sq 720 drivers/net/ethernet/cavium/thunder/nicvf_main.c sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL; sq 776 drivers/net/ethernet/cavium/thunder/nicvf_main.c struct snd_queue *sq, struct rcv_queue *rq) sq 802 drivers/net/ethernet/cavium/thunder/nicvf_main.c if (nicvf_xdp_rx(snic, nic->xdp_prog, cqe_rx, sq, rq, &skb)) sq 859 drivers/net/ethernet/cavium/thunder/nicvf_main.c struct snd_queue *sq = &qs->sq[cq_idx]; sq 891 drivers/net/ethernet/cavium/thunder/nicvf_main.c nicvf_rcv_pkt_handler(netdev, napi, cq_desc, sq, rq); sq 922 drivers/net/ethernet/cavium/thunder/nicvf_main.c nicvf_put_sq_desc(sq, subdesc_cnt); sq 928 drivers/net/ethernet/cavium/thunder/nicvf_main.c nicvf_xdp_sq_doorbell(nic, sq, cq_idx); sq 937 drivers/net/ethernet/cavium/thunder/nicvf_main.c (atomic_read(&sq->free_cnt) >= MIN_SQ_DESC_PER_PKT_XMIT)) { sq 1008 drivers/net/ethernet/cavium/thunder/nicvf_main.c nicvf_sq_free_used_descs(nic->netdev, &qs->sq[qidx], qidx); sq 1009 drivers/net/ethernet/cavium/thunder/nicvf_main.c nicvf_sq_enable(nic, &qs->sq[qidx], qidx); sq 1260 drivers/net/ethernet/cavium/thunder/nicvf_main.c struct snd_queue *sq; sq 1291 drivers/net/ethernet/cavium/thunder/nicvf_main.c sq = &snic->qs->sq[qid]; sq 1293 drivers/net/ethernet/cavium/thunder/nicvf_main.c !nicvf_sq_append_skb(snic, sq, skb, qid)) { sq 1300 drivers/net/ethernet/cavium/thunder/nicvf_main.c if (atomic_read(&sq->free_cnt) > MIN_SQ_DESC_PER_PKT_XMIT) { sq 19 drivers/net/ethernet/cavium/thunder/nicvf_queues.c static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, sq 504 drivers/net/ethernet/cavium/thunder/nicvf_queues.c struct snd_queue *sq, int q_len, int qidx) sq 508 drivers/net/ethernet/cavium/thunder/nicvf_queues.c err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, sq 513 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->desc = sq->dmem.base; sq 514 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL); sq 515 drivers/net/ethernet/cavium/thunder/nicvf_queues.c if (!sq->skbuff) sq 518 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->head = 0; sq 519 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->tail = 0; sq 520 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->thresh = SND_QUEUE_THRESH; sq 527 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->xdp_page = kcalloc(q_len, sizeof(u64), GFP_KERNEL); sq 528 drivers/net/ethernet/cavium/thunder/nicvf_queues.c if (!sq->xdp_page) sq 530 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->xdp_desc_cnt = 0; sq 531 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->xdp_free_cnt = q_len - 1; sq 532 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->is_xdp = true; sq 534 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->xdp_page = NULL; sq 535 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->xdp_desc_cnt = 0; sq 536 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->xdp_free_cnt = 0; sq 537 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->is_xdp = false; sq 539 drivers/net/ethernet/cavium/thunder/nicvf_queues.c atomic_set(&sq->free_cnt, q_len - 1); sq 542 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev, sq 544 drivers/net/ethernet/cavium/thunder/nicvf_queues.c &sq->tso_hdrs_phys, sq 546 drivers/net/ethernet/cavium/thunder/nicvf_queues.c if (!sq->tso_hdrs) sq 553 drivers/net/ethernet/cavium/thunder/nicvf_queues.c void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq, sq 562 drivers/net/ethernet/cavium/thunder/nicvf_queues.c hdr_sqe &= (sq->dmem.q_len - 1); sq 563 drivers/net/ethernet/cavium/thunder/nicvf_queues.c gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, hdr_sqe); sq 571 drivers/net/ethernet/cavium/thunder/nicvf_queues.c static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) sq 578 drivers/net/ethernet/cavium/thunder/nicvf_queues.c if (!sq) sq 580 drivers/net/ethernet/cavium/thunder/nicvf_queues.c if (!sq->dmem.base) sq 583 drivers/net/ethernet/cavium/thunder/nicvf_queues.c if (sq->tso_hdrs) { sq 585 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->dmem.q_len * TSO_HEADER_SIZE, sq 586 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->tso_hdrs, sq->tso_hdrs_phys); sq 587 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->tso_hdrs = NULL; sq 592 drivers/net/ethernet/cavium/thunder/nicvf_queues.c while (sq->head != sq->tail) { sq 593 drivers/net/ethernet/cavium/thunder/nicvf_queues.c skb = (struct sk_buff *)sq->skbuff[sq->head]; sq 594 drivers/net/ethernet/cavium/thunder/nicvf_queues.c if (!skb || !sq->xdp_page) sq 597 drivers/net/ethernet/cavium/thunder/nicvf_queues.c page = (struct page *)sq->xdp_page[sq->head]; sq 603 drivers/net/ethernet/cavium/thunder/nicvf_queues.c hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); sq 608 drivers/net/ethernet/cavium/thunder/nicvf_queues.c (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2); sq 609 drivers/net/ethernet/cavium/thunder/nicvf_queues.c nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2, sq 612 drivers/net/ethernet/cavium/thunder/nicvf_queues.c nicvf_unmap_sndq_buffers(nic, sq, sq->head, sq 618 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->head++; sq 619 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->head &= (sq->dmem.q_len - 1); sq 621 drivers/net/ethernet/cavium/thunder/nicvf_queues.c kfree(sq->skbuff); sq 622 drivers/net/ethernet/cavium/thunder/nicvf_queues.c kfree(sq->xdp_page); sq 623 drivers/net/ethernet/cavium/thunder/nicvf_queues.c nicvf_free_q_desc_mem(nic, &sq->dmem); sq 862 drivers/net/ethernet/cavium/thunder/nicvf_queues.c struct snd_queue *sq; sq 865 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq = &qs->sq[qidx]; sq 866 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->enable = enable; sq 868 drivers/net/ethernet/cavium/thunder/nicvf_queues.c if (!sq->enable) { sq 876 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->cq_qs = qs->vnic_id; sq 877 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->cq_idx = qidx; sq 880 drivers/net/ethernet/cavium/thunder/nicvf_queues.c mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; sq 881 drivers/net/ethernet/cavium/thunder/nicvf_queues.c mbx.sq.qs_num = qs->vnic_id; sq 882 drivers/net/ethernet/cavium/thunder/nicvf_queues.c mbx.sq.sq_num = qidx; sq 883 drivers/net/ethernet/cavium/thunder/nicvf_queues.c mbx.sq.sqs_mode = nic->sqs_mode; sq 884 drivers/net/ethernet/cavium/thunder/nicvf_queues.c mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; sq 889 drivers/net/ethernet/cavium/thunder/nicvf_queues.c qidx, (u64)(sq->dmem.phys_base)); sq 905 drivers/net/ethernet/cavium/thunder/nicvf_queues.c nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); sq 909 drivers/net/ethernet/cavium/thunder/nicvf_queues.c cpumask_set_cpu(qidx, &sq->affinity_mask); sq 911 drivers/net/ethernet/cavium/thunder/nicvf_queues.c &sq->affinity_mask, qidx); sq 1003 drivers/net/ethernet/cavium/thunder/nicvf_queues.c nicvf_free_snd_queue(nic, &qs->sq[qidx]); sq 1020 drivers/net/ethernet/cavium/thunder/nicvf_queues.c if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx)) sq 1118 drivers/net/ethernet/cavium/thunder/nicvf_queues.c static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) sq 1122 drivers/net/ethernet/cavium/thunder/nicvf_queues.c qentry = sq->tail; sq 1123 drivers/net/ethernet/cavium/thunder/nicvf_queues.c if (!sq->is_xdp) sq 1124 drivers/net/ethernet/cavium/thunder/nicvf_queues.c atomic_sub(desc_cnt, &sq->free_cnt); sq 1126 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->xdp_free_cnt -= desc_cnt; sq 1127 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->tail += desc_cnt; sq 1128 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->tail &= (sq->dmem.q_len - 1); sq 1134 drivers/net/ethernet/cavium/thunder/nicvf_queues.c static inline void nicvf_rollback_sq_desc(struct snd_queue *sq, sq 1137 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->tail = qentry; sq 1138 drivers/net/ethernet/cavium/thunder/nicvf_queues.c atomic_add(desc_cnt, &sq->free_cnt); sq 1142 drivers/net/ethernet/cavium/thunder/nicvf_queues.c void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) sq 1144 drivers/net/ethernet/cavium/thunder/nicvf_queues.c if (!sq->is_xdp) sq 1145 drivers/net/ethernet/cavium/thunder/nicvf_queues.c atomic_add(desc_cnt, &sq->free_cnt); sq 1147 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->xdp_free_cnt += desc_cnt; sq 1148 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->head += desc_cnt; sq 1149 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->head &= (sq->dmem.q_len - 1); sq 1152 drivers/net/ethernet/cavium/thunder/nicvf_queues.c static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry) sq 1155 drivers/net/ethernet/cavium/thunder/nicvf_queues.c qentry &= (sq->dmem.q_len - 1); sq 1159 drivers/net/ethernet/cavium/thunder/nicvf_queues.c void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) sq 1179 drivers/net/ethernet/cavium/thunder/nicvf_queues.c void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq, sq 1189 drivers/net/ethernet/cavium/thunder/nicvf_queues.c while (sq->head != head) { sq 1190 drivers/net/ethernet/cavium/thunder/nicvf_queues.c hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); sq 1192 drivers/net/ethernet/cavium/thunder/nicvf_queues.c nicvf_put_sq_desc(sq, 1); sq 1195 drivers/net/ethernet/cavium/thunder/nicvf_queues.c skb = (struct sk_buff *)sq->skbuff[sq->head]; sq 1201 drivers/net/ethernet/cavium/thunder/nicvf_queues.c nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); sq 1207 drivers/net/ethernet/cavium/thunder/nicvf_queues.c struct snd_queue *sq, int sq_num) sq 1209 drivers/net/ethernet/cavium/thunder/nicvf_queues.c if (!sq->xdp_desc_cnt) sq 1217 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq_num, sq->xdp_desc_cnt); sq 1218 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->xdp_desc_cnt = 0; sq 1222 drivers/net/ethernet/cavium/thunder/nicvf_queues.c nicvf_xdp_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, sq 1227 drivers/net/ethernet/cavium/thunder/nicvf_queues.c hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); sq 1233 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->xdp_page[qentry] = (u64)virt_to_page((void *)data); sq 1236 drivers/net/ethernet/cavium/thunder/nicvf_queues.c int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq, sq 1242 drivers/net/ethernet/cavium/thunder/nicvf_queues.c if (subdesc_cnt > sq->xdp_free_cnt) sq 1245 drivers/net/ethernet/cavium/thunder/nicvf_queues.c qentry = nicvf_get_sq_desc(sq, subdesc_cnt); sq 1247 drivers/net/ethernet/cavium/thunder/nicvf_queues.c nicvf_xdp_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, bufaddr, len); sq 1249 drivers/net/ethernet/cavium/thunder/nicvf_queues.c qentry = nicvf_get_nxt_sqentry(sq, qentry); sq 1250 drivers/net/ethernet/cavium/thunder/nicvf_queues.c nicvf_sq_add_gather_subdesc(sq, qentry, len, dma_addr); sq 1252 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->xdp_desc_cnt += subdesc_cnt; sq 1330 drivers/net/ethernet/cavium/thunder/nicvf_queues.c nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, sq 1342 drivers/net/ethernet/cavium/thunder/nicvf_queues.c hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); sq 1352 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->skbuff[qentry] = (u64)skb; sq 1419 drivers/net/ethernet/cavium/thunder/nicvf_queues.c static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, sq 1424 drivers/net/ethernet/cavium/thunder/nicvf_queues.c qentry &= (sq->dmem.q_len - 1); sq 1425 drivers/net/ethernet/cavium/thunder/nicvf_queues.c gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry); sq 1438 drivers/net/ethernet/cavium/thunder/nicvf_queues.c static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry, sq 1444 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->skbuff[qentry] = (u64)skb; sq 1446 drivers/net/ethernet/cavium/thunder/nicvf_queues.c hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); sq 1458 drivers/net/ethernet/cavium/thunder/nicvf_queues.c qentry = nicvf_get_nxt_sqentry(sq, qentry); sq 1459 drivers/net/ethernet/cavium/thunder/nicvf_queues.c imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry); sq 1486 drivers/net/ethernet/cavium/thunder/nicvf_queues.c static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, sq 1507 drivers/net/ethernet/cavium/thunder/nicvf_queues.c qentry = nicvf_get_nxt_sqentry(sq, qentry); sq 1508 drivers/net/ethernet/cavium/thunder/nicvf_queues.c hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE; sq 1510 drivers/net/ethernet/cavium/thunder/nicvf_queues.c nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len, sq 1511 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->tso_hdrs_phys + sq 1523 drivers/net/ethernet/cavium/thunder/nicvf_queues.c qentry = nicvf_get_nxt_sqentry(sq, qentry); sq 1524 drivers/net/ethernet/cavium/thunder/nicvf_queues.c nicvf_sq_add_gather_subdesc(sq, qentry, size, sq 1532 drivers/net/ethernet/cavium/thunder/nicvf_queues.c nicvf_sq_add_hdr_subdesc(nic, sq, hdr_qentry, sq 1534 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->skbuff[hdr_qentry] = (u64)NULL; sq 1535 drivers/net/ethernet/cavium/thunder/nicvf_queues.c qentry = nicvf_get_nxt_sqentry(sq, qentry); sq 1540 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->skbuff[hdr_qentry] = (u64)skb; sq 1549 drivers/net/ethernet/cavium/thunder/nicvf_queues.c int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq, sq 1558 drivers/net/ethernet/cavium/thunder/nicvf_queues.c if (subdesc_cnt > atomic_read(&sq->free_cnt)) sq 1561 drivers/net/ethernet/cavium/thunder/nicvf_queues.c qentry = nicvf_get_sq_desc(sq, subdesc_cnt); sq 1565 drivers/net/ethernet/cavium/thunder/nicvf_queues.c return nicvf_sq_append_tso(nic, sq, sq_num, qentry, skb); sq 1568 drivers/net/ethernet/cavium/thunder/nicvf_queues.c nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1, sq 1573 drivers/net/ethernet/cavium/thunder/nicvf_queues.c qentry = nicvf_get_nxt_sqentry(sq, qentry); sq 1580 drivers/net/ethernet/cavium/thunder/nicvf_queues.c nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt); sq 1584 drivers/net/ethernet/cavium/thunder/nicvf_queues.c nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr); sq 1593 drivers/net/ethernet/cavium/thunder/nicvf_queues.c qentry = nicvf_get_nxt_sqentry(sq, qentry); sq 1604 drivers/net/ethernet/cavium/thunder/nicvf_queues.c nicvf_unmap_sndq_buffers(nic, sq, hdr_sqe, i); sq 1605 drivers/net/ethernet/cavium/thunder/nicvf_queues.c nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt); sq 1608 drivers/net/ethernet/cavium/thunder/nicvf_queues.c nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr); sq 1613 drivers/net/ethernet/cavium/thunder/nicvf_queues.c qentry = nicvf_get_nxt_sqentry(sq, qentry); sq 1614 drivers/net/ethernet/cavium/thunder/nicvf_queues.c nicvf_sq_add_cqe_subdesc(sq, qentry, hdr_sqe, skb); sq 1826 drivers/net/ethernet/cavium/thunder/nicvf_queues.c struct snd_queue *sq; sq 1832 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq = &nic->qs->sq[sq_idx]; sq 1833 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); sq 1834 drivers/net/ethernet/cavium/thunder/nicvf_queues.c sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); sq 301 drivers/net/ethernet/cavium/thunder/nicvf_queues.h struct snd_queue sq[MAX_SND_QUEUES_PER_QS]; sq 328 drivers/net/ethernet/cavium/thunder/nicvf_queues.h void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq, sq 338 drivers/net/ethernet/cavium/thunder/nicvf_queues.h void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx); sq 340 drivers/net/ethernet/cavium/thunder/nicvf_queues.h void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt); sq 342 drivers/net/ethernet/cavium/thunder/nicvf_queues.h struct snd_queue *sq, int qidx); sq 343 drivers/net/ethernet/cavium/thunder/nicvf_queues.h int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq, sq 345 drivers/net/ethernet/cavium/thunder/nicvf_queues.h int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq, sq 347 drivers/net/ethernet/cavium/thunder/nicvf_queues.h void nicvf_xdp_sq_doorbell(struct nicvf *nic, struct snd_queue *sq, int sq_num); sq 863 drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c return &qp->sq; sq 928 drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq, sq 931 drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq); sq 947 drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c hw_ci.msix_entry_idx = sq->msix_entry; sq 953 drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c hw_ci.ci_addr = ADDR_IN_4BYTES(sq->hw_ci_dma_addr); sq 285 drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq, sq 128 drivers/net/ethernet/huawei/hinic/hinic_hw_io.c hinic_sq_prepare_ctxt(&sq_ctxt[i], &qp->sq, sq 305 drivers/net/ethernet/huawei/hinic/hinic_hw_io.c err = hinic_init_sq(&qp->sq, hwif, &func_to_io->sq_wq[q_id], sq 324 drivers/net/ethernet/huawei/hinic/hinic_hw_io.c hinic_clean_sq(&qp->sq); sq 348 drivers/net/ethernet/huawei/hinic/hinic_hw_io.c hinic_clean_sq(&qp->sq); sq 59 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c #define SQ_DB_ADDR(sq, pi) ((u64 *)((sq)->db_base) + SQ_DB_PI_LOW(pi)) sq 61 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c #define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask) sq 93 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c struct hinic_sq *sq, u16 global_qid) sq 100 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c wq = sq->wq; sq 214 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c static int alloc_sq_skb_arr(struct hinic_sq *sq) sq 216 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c struct hinic_wq *wq = sq->wq; sq 219 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c skb_arr_size = wq->q_depth * sizeof(*sq->saved_skb); sq 220 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c sq->saved_skb = vzalloc(skb_arr_size); sq 221 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c if (!sq->saved_skb) sq 231 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c static void free_sq_skb_arr(struct hinic_sq *sq) sq 233 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c vfree(sq->saved_skb); sq 276 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif, sq 281 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c sq->hwif = hwif; sq 283 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c sq->wq = wq; sq 285 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c sq->irq = entry->vector; sq 286 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c sq->msix_entry = entry->entry; sq 288 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c sq->hw_ci_addr = ci_addr; sq 289 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c sq->hw_ci_dma_addr = ci_dma_addr; sq 291 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c sq->db_base = db_base + SQ_DB_OFF; sq 293 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c return alloc_sq_skb_arr(sq); sq 300 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c void hinic_clean_sq(struct hinic_sq *sq) sq 302 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c free_sq_skb_arr(sq); sq 451 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c int hinic_get_sq_free_wqebbs(struct hinic_sq *sq) sq 453 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c struct hinic_wq *wq = sq->wq; sq 592 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx, sq 614 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c static u32 sq_prepare_db(struct hinic_sq *sq, u16 prod_idx, unsigned int cos) sq 616 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq); sq 617 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c u8 hi_prod_idx = SQ_DB_PI_HI(SQ_MASKED_IDX(sq, prod_idx)); sq 634 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size, sq 637 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c struct hinic_wq *wq = sq->wq; sq 644 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c writel(sq_prepare_db(sq, prod_idx, cos), SQ_DB_ADDR(sq, prod_idx)); sq 655 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq, sq 658 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c struct hinic_hw_wqe *hw_wqe = hinic_get_wqe(sq->wq, wqe_size, sq 672 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c void hinic_sq_return_wqe(struct hinic_sq *sq, unsigned int wqe_size) sq 674 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c hinic_return_wqe(sq->wq, wqe_size); sq 685 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx, sq 691 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c sq->saved_skb[prod_idx] = skb; sq 696 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c hinic_write_wqe(sq->wq, hw_wqe, wqe_size); sq 709 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c struct hinic_sq_wqe *hinic_sq_read_wqebb(struct hinic_sq *sq, sq 720 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c hw_wqe = hinic_read_wqe(sq->wq, sizeof(*ctrl), cons_idx); sq 724 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c *skb = sq->saved_skb[*cons_idx]; sq 733 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c *wqe_size = ALIGN(*wqe_size, sq->wq->wqebb_size); sq 747 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq, sq 753 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c hw_wqe = hinic_read_wqe(sq->wq, wqe_size, cons_idx); sq 754 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c *skb = sq->saved_skb[*cons_idx]; sq 764 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c void hinic_sq_put_wqe(struct hinic_sq *sq, unsigned int wqe_size) sq 766 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c hinic_put_wqe(sq->wq, wqe_size); sq 54 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h #define HINIC_MIN_TX_NUM_WQEBBS(sq) \ sq 55 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h (HINIC_MIN_TX_WQE_SIZE((sq)->wq) / (sq)->wq->wqebb_size) sq 113 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h struct hinic_sq sq; sq 124 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h struct hinic_sq *sq, u16 global_qid); sq 129 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h int hinic_init_sq(struct hinic_sq *sq, struct hinic_hwif *hwif, sq 133 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h void hinic_clean_sq(struct hinic_sq *sq); sq 140 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h int hinic_get_sq_free_wqebbs(struct hinic_sq *sq); sq 169 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx, sq 173 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size, sq 176 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq, sq 179 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h void hinic_sq_return_wqe(struct hinic_sq *sq, unsigned int wqe_size); sq 181 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx, sq 185 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h struct hinic_sq_wqe *hinic_sq_read_wqe(struct hinic_sq *sq, sq 189 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h struct hinic_sq_wqe *hinic_sq_read_wqebb(struct hinic_sq *sq, sq 193 drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h void hinic_sq_put_wqe(struct hinic_sq *sq, unsigned int wqe_size); sq 147 drivers/net/ethernet/huawei/hinic/hinic_main.c struct hinic_sq *sq = hinic_hwdev_get_sq(nic_dev->hwdev, i); sq 149 drivers/net/ethernet/huawei/hinic/hinic_main.c err = hinic_init_txq(&nic_dev->txqs[i], sq, netdev); sq 46 drivers/net/ethernet/huawei/hinic/hinic_tx.c #define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr)) sq 474 drivers/net/ethernet/huawei/hinic/hinic_tx.c qp = container_of(txq->sq, struct hinic_qp, sq); sq 503 drivers/net/ethernet/huawei/hinic/hinic_tx.c sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); sq 510 drivers/net/ethernet/huawei/hinic/hinic_tx.c sq_wqe = hinic_sq_get_wqe(txq->sq, wqe_size, &prod_idx); sq 527 drivers/net/ethernet/huawei/hinic/hinic_tx.c hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges); sq 533 drivers/net/ethernet/huawei/hinic/hinic_tx.c hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size); sq 538 drivers/net/ethernet/huawei/hinic/hinic_tx.c hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0); sq 543 drivers/net/ethernet/huawei/hinic/hinic_tx.c hinic_sq_return_wqe(txq->sq, wqe_size); sq 578 drivers/net/ethernet/huawei/hinic/hinic_tx.c struct hinic_sq *sq = txq->sq; sq 585 drivers/net/ethernet/huawei/hinic/hinic_tx.c while ((sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &ci))) { sq 586 drivers/net/ethernet/huawei/hinic/hinic_tx.c sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &ci); sq 594 drivers/net/ethernet/huawei/hinic/hinic_tx.c hinic_sq_put_wqe(sq, wqe_size); sq 610 drivers/net/ethernet/huawei/hinic/hinic_tx.c struct hinic_qp *qp = container_of(txq->sq, struct hinic_qp, sq); sq 613 drivers/net/ethernet/huawei/hinic/hinic_tx.c struct hinic_sq *sq = txq->sq; sq 614 drivers/net/ethernet/huawei/hinic/hinic_tx.c struct hinic_wq *wq = sq->wq; sq 623 drivers/net/ethernet/huawei/hinic/hinic_tx.c hw_ci = HW_CONS_IDX(sq) & wq->mask; sq 628 drivers/net/ethernet/huawei/hinic/hinic_tx.c sq_wqe = hinic_sq_read_wqebb(sq, &skb, &wqe_size, &sw_ci); sq 637 drivers/net/ethernet/huawei/hinic/hinic_tx.c sq_wqe = hinic_sq_read_wqe(sq, &skb, wqe_size, &sw_ci); sq 649 drivers/net/ethernet/huawei/hinic/hinic_tx.c hinic_sq_put_wqe(sq, wqe_size); sq 655 drivers/net/ethernet/huawei/hinic/hinic_tx.c hinic_get_sq_free_wqebbs(sq) >= HINIC_MIN_TX_NUM_WQEBBS(sq)) { sq 677 drivers/net/ethernet/huawei/hinic/hinic_tx.c sq->msix_entry, sq 706 drivers/net/ethernet/huawei/hinic/hinic_tx.c txq->sq->msix_entry, sq 709 drivers/net/ethernet/huawei/hinic/hinic_tx.c hinic_hwdev_msix_cnt_set(nic_dev->hwdev, txq->sq->msix_entry); sq 721 drivers/net/ethernet/huawei/hinic/hinic_tx.c struct hinic_sq *sq = txq->sq; sq 726 drivers/net/ethernet/huawei/hinic/hinic_tx.c hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry, sq 731 drivers/net/ethernet/huawei/hinic/hinic_tx.c err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq); sq 743 drivers/net/ethernet/huawei/hinic/hinic_tx.c struct hinic_sq *sq = txq->sq; sq 745 drivers/net/ethernet/huawei/hinic/hinic_tx.c free_irq(sq->irq, txq); sq 757 drivers/net/ethernet/huawei/hinic/hinic_tx.c int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, sq 760 drivers/net/ethernet/huawei/hinic/hinic_tx.c struct hinic_qp *qp = container_of(sq, struct hinic_qp, sq); sq 767 drivers/net/ethernet/huawei/hinic/hinic_tx.c txq->sq = sq; sq 794 drivers/net/ethernet/huawei/hinic/hinic_tx.c err = hinic_hwdev_hw_ci_addr_set(hwdev, sq, CI_UPDATE_NO_PENDING, sq 31 drivers/net/ethernet/huawei/hinic/hinic_tx.h struct hinic_sq *sq; sq 49 drivers/net/ethernet/huawei/hinic/hinic_tx.h int hinic_init_txq(struct hinic_txq *txq, struct hinic_sq *sq, sq 8 drivers/net/ethernet/intel/ice/ice_controlq.c (qinfo)->sq.head = prefix##_ATQH; \ sq 9 drivers/net/ethernet/intel/ice/ice_controlq.c (qinfo)->sq.tail = prefix##_ATQT; \ sq 10 drivers/net/ethernet/intel/ice/ice_controlq.c (qinfo)->sq.len = prefix##_ATQLEN; \ sq 11 drivers/net/ethernet/intel/ice/ice_controlq.c (qinfo)->sq.bah = prefix##_ATQBAH; \ sq 12 drivers/net/ethernet/intel/ice/ice_controlq.c (qinfo)->sq.bal = prefix##_ATQBAL; \ sq 13 drivers/net/ethernet/intel/ice/ice_controlq.c (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \ sq 14 drivers/net/ethernet/intel/ice/ice_controlq.c (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \ sq 15 drivers/net/ethernet/intel/ice/ice_controlq.c (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \ sq 62 drivers/net/ethernet/intel/ice/ice_controlq.c if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask) sq 63 drivers/net/ethernet/intel/ice/ice_controlq.c return (rd32(hw, cq->sq.len) & (cq->sq.len_mask | sq 64 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.len_ena_mask)) == sq 65 drivers/net/ethernet/intel/ice/ice_controlq.c (cq->num_sq_entries | cq->sq.len_ena_mask); sq 80 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size, sq 81 drivers/net/ethernet/intel/ice/ice_controlq.c &cq->sq.desc_buf.pa, sq 83 drivers/net/ethernet/intel/ice/ice_controlq.c if (!cq->sq.desc_buf.va) sq 85 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.desc_buf.size = size; sq 87 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, sq 89 drivers/net/ethernet/intel/ice/ice_controlq.c if (!cq->sq.cmd_buf) { sq 90 drivers/net/ethernet/intel/ice/ice_controlq.c dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size, sq 91 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.desc_buf.va, cq->sq.desc_buf.pa); sq 92 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.desc_buf.va = NULL; sq 93 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.desc_buf.pa = 0; sq 94 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.desc_buf.size = 0; sq 218 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries, sq 219 drivers/net/ethernet/intel/ice/ice_controlq.c sizeof(cq->sq.desc_buf), GFP_KERNEL); sq 220 drivers/net/ethernet/intel/ice/ice_controlq.c if (!cq->sq.dma_head) sq 222 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head; sq 228 drivers/net/ethernet/intel/ice/ice_controlq.c bi = &cq->sq.r.sq_bi[i]; sq 242 drivers/net/ethernet/intel/ice/ice_controlq.c dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size, sq 243 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa); sq 244 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.r.sq_bi[i].va = NULL; sq 245 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.r.sq_bi[i].pa = 0; sq 246 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.r.sq_bi[i].size = 0; sq 248 drivers/net/ethernet/intel/ice/ice_controlq.c devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); sq 282 drivers/net/ethernet/intel/ice/ice_controlq.c return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries); sq 325 drivers/net/ethernet/intel/ice/ice_controlq.c if (cq->sq.count > 0) { sq 337 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.next_to_use = 0; sq 338 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.next_to_clean = 0; sq 356 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.count = cq->num_sq_entries; sq 360 drivers/net/ethernet/intel/ice/ice_controlq.c ice_free_cq_ring(hw, &cq->sq); sq 460 drivers/net/ethernet/intel/ice/ice_controlq.c if (!cq->sq.count) { sq 466 drivers/net/ethernet/intel/ice/ice_controlq.c wr32(hw, cq->sq.head, 0); sq 467 drivers/net/ethernet/intel/ice/ice_controlq.c wr32(hw, cq->sq.tail, 0); sq 468 drivers/net/ethernet/intel/ice/ice_controlq.c wr32(hw, cq->sq.len, 0); sq 469 drivers/net/ethernet/intel/ice/ice_controlq.c wr32(hw, cq->sq.bal, 0); sq 470 drivers/net/ethernet/intel/ice/ice_controlq.c wr32(hw, cq->sq.bah, 0); sq 472 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.count = 0; /* to indicate uninitialized queue */ sq 475 drivers/net/ethernet/intel/ice/ice_controlq.c ICE_FREE_CQ_BUFS(hw, cq, sq); sq 476 drivers/net/ethernet/intel/ice/ice_controlq.c ice_free_cq_ring(hw, &cq->sq); sq 787 drivers/net/ethernet/intel/ice/ice_controlq.c struct ice_ctl_q_ring *sq = &cq->sq; sq 788 drivers/net/ethernet/intel/ice/ice_controlq.c u16 ntc = sq->next_to_clean; sq 792 drivers/net/ethernet/intel/ice/ice_controlq.c desc = ICE_CTL_Q_DESC(*sq, ntc); sq 793 drivers/net/ethernet/intel/ice/ice_controlq.c details = ICE_CTL_Q_DETAILS(*sq, ntc); sq 795 drivers/net/ethernet/intel/ice/ice_controlq.c while (rd32(hw, cq->sq.head) != ntc) { sq 797 drivers/net/ethernet/intel/ice/ice_controlq.c "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head)); sq 801 drivers/net/ethernet/intel/ice/ice_controlq.c if (ntc == sq->count) sq 803 drivers/net/ethernet/intel/ice/ice_controlq.c desc = ICE_CTL_Q_DESC(*sq, ntc); sq 804 drivers/net/ethernet/intel/ice/ice_controlq.c details = ICE_CTL_Q_DETAILS(*sq, ntc); sq 807 drivers/net/ethernet/intel/ice/ice_controlq.c sq->next_to_clean = ntc; sq 809 drivers/net/ethernet/intel/ice/ice_controlq.c return ICE_CTL_Q_DESC_UNUSED(sq); sq 825 drivers/net/ethernet/intel/ice/ice_controlq.c return rd32(hw, cq->sq.head) == cq->sq.next_to_use; sq 861 drivers/net/ethernet/intel/ice/ice_controlq.c if (!cq->sq.count) { sq 887 drivers/net/ethernet/intel/ice/ice_controlq.c val = rd32(hw, cq->sq.head); sq 896 drivers/net/ethernet/intel/ice/ice_controlq.c details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use); sq 915 drivers/net/ethernet/intel/ice/ice_controlq.c desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use); sq 922 drivers/net/ethernet/intel/ice/ice_controlq.c dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use]; sq 942 drivers/net/ethernet/intel/ice/ice_controlq.c (cq->sq.next_to_use)++; sq 943 drivers/net/ethernet/intel/ice/ice_controlq.c if (cq->sq.next_to_use == cq->sq.count) sq 944 drivers/net/ethernet/intel/ice/ice_controlq.c cq->sq.next_to_use = 0; sq 945 drivers/net/ethernet/intel/ice/ice_controlq.c wr32(hw, cq->sq.tail, cq->sq.next_to_use); sq 85 drivers/net/ethernet/intel/ice/ice_controlq.h struct ice_ctl_q_ring sq; /* send queue */ sq 1014 drivers/net/ethernet/intel/ice/ice_main.c val = rd32(hw, cq->sq.len); sq 1033 drivers/net/ethernet/intel/ice/ice_main.c wr32(hw, cq->sq.len, val); sq 471 drivers/net/ethernet/marvell/octeontx2/af/mbox.h struct nix_sq_ctx_s sq; sq 489 drivers/net/ethernet/marvell/octeontx2/af/mbox.h struct nix_sq_ctx_s sq; sq 527 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) || sq 529 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) { sq 531 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c pcifunc, req->sq.smq)) sq 574 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c memcpy(ctx, &req->sq, sizeof(struct nix_sq_ctx_s)); sq 605 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c if (req->ctype == NIX_AQ_CTYPE_SQ && req->sq.ena) sq 648 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c memcpy(&rsp->sq, ctx, sq 687 drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c aq_req.sq.ena = 0; sq 711 drivers/net/ethernet/mellanox/mlx5/core/en.h struct mlx5e_txqsq sq[MLX5E_MAX_NUM_TC]; sq 753 drivers/net/ethernet/mellanox/mlx5/core/en.h struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC]; sq 910 drivers/net/ethernet/mellanox/mlx5/core/en.h netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, sq 913 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_trigger_irq(struct mlx5e_icosq *sq); sq 919 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq); sq 1020 drivers/net/ethernet/mellanox/mlx5/core/en.h struct mlx5e_sq_param *param, struct mlx5e_icosq *sq); sq 1021 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_close_icosq(struct mlx5e_icosq *sq); sq 1024 drivers/net/ethernet/mellanox/mlx5/core/en.h struct mlx5e_xdpsq *sq, bool is_redirect); sq 1025 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq); sq 1070 drivers/net/ethernet/mellanox/mlx5/core/en.h void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq); sq 20 drivers/net/ethernet/mellanox/mlx5/core/en/health.h void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq); sq 21 drivers/net/ethernet/mellanox/mlx5/core/en/health.h int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq); sq 28 drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c data->tx_packets += stats->sq[tc].packets; sq 29 drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c data->tx_bytes += stats->sq[tc].bytes; sq 35 drivers/net/ethernet/mellanox/mlx5/core/en/params.h struct mlx5e_sq_param sq; sq 6 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq) sq 11 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c if (sq->cc == sq->pc) sq 17 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c netdev_err(sq->channel->netdev, sq 19 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c sq->sqn, sq->cc, sq->pc); sq 24 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c static void mlx5e_reset_txqsq_cc_pc(struct mlx5e_txqsq *sq) sq 26 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c WARN_ONCE(sq->cc != sq->pc, sq 28 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c sq->sqn, sq->cc, sq->pc); sq 29 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c sq->cc = 0; sq 30 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c sq->dma_fifo_cc = 0; sq 31 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c sq->pc = 0; sq 38 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c struct mlx5e_txqsq *sq; sq 42 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c sq = ctx; sq 43 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c mdev = sq->channel->mdev; sq 44 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c dev = sq->channel->netdev; sq 46 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c if (!test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) sq 49 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c err = mlx5_core_query_sq_state(mdev, sq->sqn, &state); sq 52 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c sq->sqn, err); sq 59 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c mlx5e_tx_disable_queue(sq->txq); sq 61 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c err = mlx5e_wait_for_sq_flush(sq); sq 70 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c err = mlx5e_health_sq_to_ready(sq->channel, sq->sqn); sq 74 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c mlx5e_reset_txqsq_cc_pc(sq); sq 75 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c sq->stats->recover++; sq 76 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state); sq 77 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c mlx5e_activate_txqsq(sq); sq 81 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c clear_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state); sq 85 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c void mlx5e_reporter_tx_err_cqe(struct mlx5e_txqsq *sq) sq 87 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c struct mlx5e_priv *priv = sq->channel->priv; sq 91 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c err_ctx.ctx = sq; sq 93 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c sprintf(err_str, "ERR CQE on SQ: 0x%x", sq->sqn); sq 101 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c struct mlx5e_txqsq *sq; sq 104 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c sq = ctx; sq 105 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c eq = sq->cq.mcq.eq; sq 106 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c err = mlx5e_health_channel_eq_recover(eq, sq->channel); sq 108 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); sq 113 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c int mlx5e_reporter_tx_timeout(struct mlx5e_txqsq *sq) sq 115 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c struct mlx5e_priv *priv = sq->channel->priv; sq 119 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c err_ctx.ctx = sq; sq 123 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c sq->channel->ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc, sq 124 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c jiffies_to_usecs(jiffies - sq->txq->trans_start)); sq 149 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c struct mlx5e_txqsq *sq, int tc) sq 151 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c struct mlx5e_priv *priv = sq->channel->priv; sq 152 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c bool stopped = netif_xmit_stopped(sq->txq); sq 156 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c err = mlx5_core_query_sq_state(priv->mdev, sq->sqn, &state); sq 164 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c err = devlink_fmsg_u32_pair_put(fmsg, "channel ix", sq->ch_ix); sq 172 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c err = devlink_fmsg_u32_pair_put(fmsg, "txq ix", sq->txq_ix); sq 176 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c err = devlink_fmsg_u32_pair_put(fmsg, "sqn", sq->sqn); sq 188 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c err = devlink_fmsg_u32_pair_put(fmsg, "cc", sq->cc); sq 192 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c err = devlink_fmsg_u32_pair_put(fmsg, "pc", sq->pc); sq 196 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c err = mlx5e_reporter_cq_diagnose(&sq->cq, fmsg); sq 260 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c struct mlx5e_txqsq *sq = &c->sq[tc]; sq 262 drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq, tc); sq 37 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h mlx5e_sq_fetch_wqe(struct mlx5e_txqsq *sq, size_t size, u16 *pi) sq 39 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h struct mlx5_wq_cyc *wq = &sq->wq; sq 42 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); sq 85 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, struct mlx5_wq_cyc *wq, sq 88 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi]; sq 96 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h mlx5e_post_nop(wq, sq->sqn, &sq->pc); sq 98 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h sq->stats->nop += nnops; sq 125 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h mlx5e_tx_wqe_inline_mode(struct mlx5e_txqsq *sq, struct mlx5_wqe_ctrl_seg *cseg, sq 133 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h mode = sq->min_inline_mode; sq 136 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h test_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state)) sq 151 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i) sq 153 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h return &sq->db.dma_fifo[i & sq->dma_fifo_mask]; sq 157 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h mlx5e_dma_push(struct mlx5e_txqsq *sq, dma_addr_t addr, u32 size, sq 160 drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++); sq 59 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, sq 89 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c dma_addr = dma_map_single(sq->pdev, xdptxd.data, xdptxd.len, sq 91 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c if (dma_mapping_error(sq->pdev, dma_addr)) { sq 109 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, sq 117 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c return sq->xmit_xdp_frame(sq, &xdptxd, &xdpi, 0); sq 181 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq) sq 183 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c struct mlx5e_xdp_mpwqe *session = &sq->mpwqe; sq 184 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c struct mlx5e_xdpsq_stats *stats = sq->stats; sq 185 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c struct mlx5_wq_cyc *wq = &sq->wq; sq 188 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); sq 192 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c mlx5e_fill_xdpsq_frag_edge(sq, wq, pi, contig_wqebbs); sq 194 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c session->wqe = mlx5e_xdpsq_fetch_wqe(sq, &pi); sq 200 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c mlx5e_xdp_update_inline_state(sq); sq 205 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq) sq 207 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c struct mlx5_wq_cyc *wq = &sq->wq; sq 208 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c struct mlx5e_xdp_mpwqe *session = &sq->mpwqe; sq 211 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); sq 212 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[pi]; sq 215 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_ENHANCED_MPSW); sq 216 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count); sq 221 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c sq->pc += wi->num_wqebbs; sq 223 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c sq->doorbell_cseg = cseg; sq 233 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c static int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq) sq 235 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c if (unlikely(!sq->mpwqe.wqe)) { sq 236 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq 239 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c mlx5e_xmit_xdp_doorbell(sq); sq 240 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c sq->stats->full++; sq 250 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, sq 255 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c struct mlx5e_xdp_mpwqe *session = &sq->mpwqe; sq 256 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c struct mlx5e_xdpsq_stats *stats = sq->stats; sq 258 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c if (unlikely(xdptxd->len > sq->hw_mtu)) { sq 264 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c check_result = mlx5e_xmit_xdp_frame_check_mpwqe(sq); sq 273 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c mlx5e_xdp_mpwqe_session_start(sq); sq 276 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c mlx5e_xdp_mpwqe_add_dseg(sq, xdptxd, stats); sq 280 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c mlx5e_xdp_mpwqe_complete(sq); sq 282 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi); sq 287 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c static int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq) sq 289 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) { sq 291 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c mlx5e_xmit_xdp_doorbell(sq); sq 292 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c sq->stats->full++; sq 299 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c static bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, sq 304 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c struct mlx5_wq_cyc *wq = &sq->wq; sq 305 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); sq 315 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c struct mlx5e_xdpsq_stats *stats = sq->stats; sq 319 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || sq->hw_mtu < dma_len)) { sq 325 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c check_result = mlx5e_xmit_xdp_frame_check(sq); sq 332 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { sq 344 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND); sq 346 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c sq->pc++; sq 348 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c sq->doorbell_cseg = cseg; sq 350 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi); sq 355 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq, sq 360 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo; sq 369 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c dma_unmap_single(sq->pdev, xdpi.frame.dma_addr, sq 389 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c struct mlx5e_xdpsq *sq; sq 395 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c sq = container_of(cq, struct mlx5e_xdpsq, cq); sq 397 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) sq 407 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c sqcc = sq->cc; sq 419 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c netdev_WARN_ONCE(sq->channel->netdev, sq 428 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); sq 429 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c wi = &sq->db.wqe_info[ci]; sq 433 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, true); sq 438 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c xsk_umem_complete_tx(sq->umem, xsk_frames); sq 440 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c sq->stats->cqes += i; sq 447 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c sq->cc = sqcc; sq 451 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq) sq 455 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c while (sq->cc != sq->pc) { sq 459 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); sq 460 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c wi = &sq->db.wqe_info[ci]; sq 462 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c sq->cc += wi->num_wqebbs; sq 464 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, false); sq 468 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c xsk_umem_complete_tx(sq->umem, xsk_frames); sq 475 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c struct mlx5e_xdpsq *sq; sq 492 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c sq = &priv->channels.c[sq_num]->xdpsq; sq 501 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c xdptxd.dma_addr = dma_map_single(sq->pdev, xdptxd.data, sq 504 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c if (unlikely(dma_mapping_error(sq->pdev, xdptxd.dma_addr))) { sq 514 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c if (unlikely(!sq->xmit_xdp_frame(sq, &xdptxd, &xdpi, 0))) { sq 515 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c dma_unmap_single(sq->pdev, xdptxd.dma_addr, sq 523 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c if (sq->mpwqe.wqe) sq 524 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c mlx5e_xdp_mpwqe_complete(sq); sq 525 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c mlx5e_xmit_xdp_doorbell(sq); sq 546 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw) sq 548 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c sq->xmit_xdp_frame_check = is_mpw ? sq 550 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c sq->xmit_xdp_frame = is_mpw ? sq 67 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq); sq 69 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq); sq 70 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw); sq 103 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) sq 105 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h if (sq->doorbell_cseg) { sq 106 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg); sq 107 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h sq->doorbell_cseg = NULL; sq 114 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h static inline void mlx5e_xdp_update_inline_state(struct mlx5e_xdpsq *sq) sq 116 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h u16 outstanding = sq->xdpi_fifo_pc - sq->xdpi_fifo_cc; sq 117 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h struct mlx5e_xdp_mpwqe *session = &sq->mpwqe; sq 141 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h mlx5e_fill_xdpsq_frag_edge(struct mlx5e_xdpsq *sq, struct mlx5_wq_cyc *wq, sq 144 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h struct mlx5e_xdp_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi]; sq 151 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h mlx5e_post_nop(wq, sq->sqn, &sq->pc); sq 154 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h sq->stats->nops += nnops; sq 158 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, sq 162 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h struct mlx5e_xdp_mpwqe *session = &sq->mpwqe; sq 185 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h dseg->lkey = sq->mkey_be; sq 190 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h mlx5e_xdpsq_fetch_wqe(struct mlx5e_xdpsq *sq, u16 *pi) sq 192 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h struct mlx5_wq_cyc *wq = &sq->wq; sq 195 drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); sq 52 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c static void mlx5e_xsk_tx_post_err(struct mlx5e_xdpsq *sq, sq 55 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c u16 pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); sq 56 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[pi]; sq 62 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); sq 63 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi); sq 64 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c sq->doorbell_cseg = &nopwqe->ctrl; sq 67 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget) sq 69 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c struct xdp_umem *umem = sq->umem; sq 78 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c int check_result = sq->xmit_xdp_frame_check(sq); sq 99 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c dma_sync_single_for_device(sq->pdev, xdptxd.dma_addr, sq 102 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c if (unlikely(!sq->xmit_xdp_frame(sq, &xdptxd, &xdpi, check_result))) { sq 103 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c if (sq->mpwqe.wqe) sq 104 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c mlx5e_xdp_mpwqe_complete(sq); sq 106 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c mlx5e_xsk_tx_post_err(sq, &xdpi); sq 113 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c if (sq->mpwqe.wqe) sq 114 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c mlx5e_xdp_mpwqe_complete(sq); sq 115 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c mlx5e_xmit_xdp_doorbell(sq); sq 14 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget); sq 16 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h static inline void mlx5e_xsk_update_tx_wakeup(struct mlx5e_xdpsq *sq) sq 18 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h if (!xsk_umem_uses_need_wakeup(sq->umem)) sq 21 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h if (sq->pc != sq->cc) sq 22 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h xsk_clear_tx_need_wakeup(sq->umem); sq 24 drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h xsk_set_tx_need_wakeup(sq->umem); sq 105 drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h struct mlx5e_txqsq *sq, sq 111 drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) { sq 112 drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h skb = mlx5e_tls_handle_tx_skb(dev, sq, skb, wqe, pi); sq 119 drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h if (test_bit(MLX5E_SQ_STATE_IPSEC, &sq->state)) { sq 91 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h struct mlx5e_txqsq *sq, sq 94 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq, sq 98 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h mlx5e_ktls_dumps_num_wqebbs(struct mlx5e_txqsq *sq, unsigned int nfrags, sq 105 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h (nfrags + DIV_ROUND_UP(sync_len, sq->hw_mtu)); sq 114 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq, sq 105 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c static void tx_fill_wi(struct mlx5e_txqsq *sq, sq 109 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c struct mlx5e_tx_wqe_info *wi = &sq->db.wqe_info[pi]; sq 133 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c post_static_params(struct mlx5e_txqsq *sq, sq 140 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c umr_wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_STATIC_UMR_WQE_SZ, &pi); sq 141 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c build_static_params(umr_wqe, sq->pc, sq->sqn, priv_tx, fence); sq 142 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c tx_fill_wi(sq, pi, MLX5E_KTLS_STATIC_WQEBBS, 0, NULL); sq 143 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c sq->pc += MLX5E_KTLS_STATIC_WQEBBS; sq 147 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c post_progress_params(struct mlx5e_txqsq *sq, sq 154 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c wqe = mlx5e_sq_fetch_wqe(sq, MLX5E_KTLS_PROGRESS_WQE_SZ, &pi); sq 155 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c build_progress_params(wqe, sq->pc, sq->sqn, priv_tx, fence); sq 156 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c tx_fill_wi(sq, pi, MLX5E_KTLS_PROGRESS_WQEBBS, 0, NULL); sq 157 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c sq->pc += MLX5E_KTLS_PROGRESS_WQEBBS; sq 161 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq, sq 166 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c struct mlx5_wq_cyc *wq = &sq->wq; sq 169 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); sq 173 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); sq 176 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c post_static_params(sq, priv_tx, fence_first_post); sq 178 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c post_progress_params(sq, priv_tx, progress_fence); sq 250 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c tx_post_resync_params(struct mlx5e_txqsq *sq, sq 267 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, skip_static_post, true); sq 271 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c tx_post_resync_dump(struct mlx5e_txqsq *sq, skb_frag_t *frag, u32 tisn, bool first) sq 281 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi); sq 288 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_DUMP); sq 289 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); sq 294 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, sq 296 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) sq 300 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c dseg->lkey = sq->mkey_be; sq 302 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); sq 304 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c tx_fill_wi(sq, pi, MLX5E_KTLS_DUMP_WQEBBS, fsz, skb_frag_page(frag)); sq 305 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c sq->pc += MLX5E_KTLS_DUMP_WQEBBS; sq 310 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq, sq 320 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c dma = mlx5e_dma_get(sq, (*dma_fifo_cc)++); sq 321 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c stats = sq->stats; sq 323 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c mlx5e_tx_dma_unmap(sq->pdev, dma); sq 329 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c static void tx_post_fence_nop(struct mlx5e_txqsq *sq) sq 331 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c struct mlx5_wq_cyc *wq = &sq->wq; sq 332 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); sq 334 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c tx_fill_wi(sq, pi, 1, 0, NULL); sq 336 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c mlx5e_post_nop_fence(wq, sq->sqn, &sq->pc); sq 341 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c struct mlx5e_txqsq *sq, sq 345 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c struct mlx5e_sq_stats *stats = sq->stats; sq 346 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c struct mlx5_wq_cyc *wq = &sq->wq; sq 369 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c tx_post_resync_params(sq, priv_tx, info.rcd_sn); sq 375 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c tx_post_fence_nop(sq); sq 379 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c num_wqebbs = mlx5e_ktls_dumps_num_wqebbs(sq, info.nr_frags, info.sync_len); sq 380 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); sq 384 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); sq 397 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c fsz = min_t(unsigned int, sq->hw_mtu, orig_fsz - frag_offset); sq 399 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c if (tx_post_resync_dump(sq, f, priv_tx->tisn, fence)) { sq 426 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c struct mlx5e_txqsq *sq, sq 431 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c struct mlx5e_sq_stats *stats = sq->stats; sq 451 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c mlx5e_ktls_tx_post_param_wqes(sq, priv_tx, false, false); sq 452 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi); sq 459 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq); sq 463 drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi); sq 189 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c struct mlx5e_txqsq *sq, struct sk_buff *skb, sq 201 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c sq->stats->tls_ooo++; sq 247 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c sq->stats->tls_resync_bytes += nskb->len; sq 250 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c mlx5e_sq_xmit(sq, nskb, *wqe, *pi, true); sq 251 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi); sq 260 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c struct mlx5e_txqsq *sq, sq 272 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx)) { sq 273 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c skb = mlx5e_ktls_handle_tx_skb(netdev, sq, skb, wqe, pi); sq 293 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.c skb = mlx5e_tls_handle_ooo(context, sq, skb, wqe, pi, priv->tls); sq 44 drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_rxtx.h struct mlx5e_txqsq *sq, sq 57 drivers/net/ethernet/mellanox/mlx5/core/en_dim.c struct mlx5e_txqsq *sq = container_of(dim, struct mlx5e_txqsq, dim); sq 61 drivers/net/ethernet/mellanox/mlx5/core/en_dim.c mlx5e_complete_dim_work(dim, cur_moder, sq->cq.mdev, &sq->cq.mcq); sq 525 drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c &c->sq[tc].cq.mcq, sq 233 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_icosq *sq, sq 240 drivers/net/ethernet/mellanox/mlx5/core/en_main.c cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | sq 945 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq) sq 947 drivers/net/ethernet/mellanox/mlx5/core/en_main.c kvfree(sq->db.xdpi_fifo.xi); sq 948 drivers/net/ethernet/mellanox/mlx5/core/en_main.c kvfree(sq->db.wqe_info); sq 951 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa) sq 953 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo; sq 954 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); sq 962 drivers/net/ethernet/mellanox/mlx5/core/en_main.c xdpi_fifo->pc = &sq->xdpi_fifo_pc; sq 963 drivers/net/ethernet/mellanox/mlx5/core/en_main.c xdpi_fifo->cc = &sq->xdpi_fifo_cc; sq 969 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa) sq 971 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); sq 974 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->db.wqe_info = kvzalloc_node(sizeof(*sq->db.wqe_info) * wq_sz, sq 976 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (!sq->db.wqe_info) sq 979 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_alloc_xdpsq_fifo(sq, numa); sq 981 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_free_xdpsq_db(sq); sq 992 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_xdpsq *sq, sq 997 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5_wq_cyc *wq = &sq->wq; sq 1000 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->pdev = c->pdev; sq 1001 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->mkey_be = c->mkey_be; sq 1002 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->channel = c; sq 1003 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->uar_map = mdev->mlx5e_res.bfreg.map; sq 1004 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->min_inline_mode = params->tx_min_inline_mode; sq 1005 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); sq 1006 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->umem = umem; sq 1008 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->stats = sq->umem ? sq 1015 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); sq 1020 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu)); sq 1027 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5_wq_destroy(&sq->wq_ctrl); sq 1032 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq) sq 1034 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_free_xdpsq_db(sq); sq 1035 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5_wq_destroy(&sq->wq_ctrl); sq 1038 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq) sq 1040 drivers/net/ethernet/mellanox/mlx5/core/en_main.c kvfree(sq->db.ico_wqe); sq 1043 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa) sq 1045 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); sq 1047 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->db.ico_wqe = kvzalloc_node(array_size(wq_sz, sq 1048 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sizeof(*sq->db.ico_wqe)), sq 1050 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (!sq->db.ico_wqe) sq 1058 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_icosq *sq = container_of(recover_work, struct mlx5e_icosq, sq 1061 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_reporter_icosq_cqe_err(sq); sq 1066 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_icosq *sq) sq 1070 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5_wq_cyc *wq = &sq->wq; sq 1073 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->channel = c; sq 1074 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->uar_map = mdev->mlx5e_res.bfreg.map; sq 1077 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); sq 1082 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu)); sq 1086 drivers/net/ethernet/mellanox/mlx5/core/en_main.c INIT_WORK(&sq->recover_work, mlx5e_icosq_err_cqe_work); sq 1091 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5_wq_destroy(&sq->wq_ctrl); sq 1096 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_free_icosq(struct mlx5e_icosq *sq) sq 1098 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_free_icosq_db(sq); sq 1099 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5_wq_destroy(&sq->wq_ctrl); sq 1102 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq) sq 1104 drivers/net/ethernet/mellanox/mlx5/core/en_main.c kvfree(sq->db.wqe_info); sq 1105 drivers/net/ethernet/mellanox/mlx5/core/en_main.c kvfree(sq->db.dma_fifo); sq 1108 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa) sq 1110 drivers/net/ethernet/mellanox/mlx5/core/en_main.c int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); sq 1113 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->db.dma_fifo = kvzalloc_node(array_size(df_sz, sq 1114 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sizeof(*sq->db.dma_fifo)), sq 1116 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->db.wqe_info = kvzalloc_node(array_size(wq_sz, sq 1117 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sizeof(*sq->db.wqe_info)), sq 1119 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (!sq->db.dma_fifo || !sq->db.wqe_info) { sq 1120 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_free_txqsq_db(sq); sq 1124 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->dma_fifo_mask = df_sz - 1; sq 1134 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_txqsq *sq, sq 1139 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5_wq_cyc *wq = &sq->wq; sq 1142 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->pdev = c->pdev; sq 1143 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->tstamp = c->tstamp; sq 1144 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->clock = &mdev->clock; sq 1145 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->mkey_be = c->mkey_be; sq 1146 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->channel = c; sq 1147 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->ch_ix = c->ix; sq 1148 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->txq_ix = txq_ix; sq 1149 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->uar_map = mdev->mlx5e_res.bfreg.map; sq 1150 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->min_inline_mode = params->tx_min_inline_mode; sq 1151 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); sq 1152 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->stats = &c->priv->channel_stats[c->ix].sq[tc]; sq 1153 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->stop_room = MLX5E_SQ_STOP_ROOM; sq 1154 drivers/net/ethernet/mellanox/mlx5/core/en_main.c INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work); sq 1156 drivers/net/ethernet/mellanox/mlx5/core/en_main.c set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state); sq 1158 drivers/net/ethernet/mellanox/mlx5/core/en_main.c set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state); sq 1161 drivers/net/ethernet/mellanox/mlx5/core/en_main.c set_bit(MLX5E_SQ_STATE_TLS, &sq->state); sq 1162 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->stop_room += MLX5E_SQ_TLS_ROOM + sq 1163 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_ktls_dumps_num_wqebbs(sq, MAX_SKB_FRAGS, sq 1169 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl); sq 1174 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu)); sq 1178 drivers/net/ethernet/mellanox/mlx5/core/en_main.c INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work); sq 1179 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->dim.mode = params->tx_cq_moderation.cq_period_mode; sq 1184 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5_wq_destroy(&sq->wq_ctrl); sq 1189 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_free_txqsq(struct mlx5e_txqsq *sq) sq 1191 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_free_txqsq_db(sq); sq 1192 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5_wq_destroy(&sq->wq_ctrl); sq 1306 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_txqsq *sq, u32 rate); sq 1313 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_txqsq *sq, sq 1320 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq, tc); sq 1326 drivers/net/ethernet/mellanox/mlx5/core/en_main.c csp.cqn = sq->cq.mcq.cqn; sq 1327 drivers/net/ethernet/mellanox/mlx5/core/en_main.c csp.wq_ctrl = &sq->wq_ctrl; sq 1328 drivers/net/ethernet/mellanox/mlx5/core/en_main.c csp.min_inline_mode = sq->min_inline_mode; sq 1329 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn); sq 1333 drivers/net/ethernet/mellanox/mlx5/core/en_main.c tx_rate = c->priv->tx_rates[sq->txq_ix]; sq 1335 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate); sq 1338 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->state |= BIT(MLX5E_SQ_STATE_AM); sq 1343 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_free_txqsq(sq); sq 1348 drivers/net/ethernet/mellanox/mlx5/core/en_main.c void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq) sq 1350 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix); sq 1351 drivers/net/ethernet/mellanox/mlx5/core/en_main.c set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); sq 1352 drivers/net/ethernet/mellanox/mlx5/core/en_main.c netdev_tx_reset_queue(sq->txq); sq 1353 drivers/net/ethernet/mellanox/mlx5/core/en_main.c netif_tx_start_queue(sq->txq); sq 1363 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq) sq 1365 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_channel *c = sq->channel; sq 1366 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5_wq_cyc *wq = &sq->wq; sq 1368 drivers/net/ethernet/mellanox/mlx5/core/en_main.c clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); sq 1372 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_tx_disable_queue(sq->txq); sq 1375 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) { sq 1376 drivers/net/ethernet/mellanox/mlx5/core/en_main.c u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); sq 1380 drivers/net/ethernet/mellanox/mlx5/core/en_main.c wi = &sq->db.wqe_info[pi]; sq 1384 drivers/net/ethernet/mellanox/mlx5/core/en_main.c nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc); sq 1385 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl); sq 1389 drivers/net/ethernet/mellanox/mlx5/core/en_main.c static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq) sq 1391 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_channel *c = sq->channel; sq 1395 drivers/net/ethernet/mellanox/mlx5/core/en_main.c cancel_work_sync(&sq->dim.work); sq 1396 drivers/net/ethernet/mellanox/mlx5/core/en_main.c cancel_work_sync(&sq->recover_work); sq 1397 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_destroy_sq(mdev, sq->sqn); sq 1398 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (sq->rate_limit) { sq 1399 drivers/net/ethernet/mellanox/mlx5/core/en_main.c rl.rate = sq->rate_limit; sq 1402 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_free_txqsq_descs(sq); sq 1403 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_free_txqsq(sq); sq 1408 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq, sq 1411 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_reporter_tx_err_cqe(sq); sq 1415 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_sq_param *param, struct mlx5e_icosq *sq) sq 1420 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_alloc_icosq(c, param, sq); sq 1424 drivers/net/ethernet/mellanox/mlx5/core/en_main.c csp.cqn = sq->cq.mcq.cqn; sq 1425 drivers/net/ethernet/mellanox/mlx5/core/en_main.c csp.wq_ctrl = &sq->wq_ctrl; sq 1427 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn); sq 1434 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_free_icosq(sq); sq 1452 drivers/net/ethernet/mellanox/mlx5/core/en_main.c void mlx5e_close_icosq(struct mlx5e_icosq *sq) sq 1454 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_channel *c = sq->channel; sq 1456 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_destroy_sq(c->mdev, sq->sqn); sq 1457 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_free_icosq(sq); sq 1462 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_xdpsq *sq, bool is_redirect) sq 1467 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_alloc_xdpsq(c, params, umem, param, sq, is_redirect); sq 1473 drivers/net/ethernet/mellanox/mlx5/core/en_main.c csp.cqn = sq->cq.mcq.cqn; sq 1474 drivers/net/ethernet/mellanox/mlx5/core/en_main.c csp.wq_ctrl = &sq->wq_ctrl; sq 1475 drivers/net/ethernet/mellanox/mlx5/core/en_main.c csp.min_inline_mode = sq->min_inline_mode; sq 1476 drivers/net/ethernet/mellanox/mlx5/core/en_main.c set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); sq 1477 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn); sq 1481 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_set_xmit_fp(sq, param->is_mpw); sq 1488 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { sq 1494 drivers/net/ethernet/mellanox/mlx5/core/en_main.c for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) { sq 1495 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[i]; sq 1496 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i); sq 1501 drivers/net/ethernet/mellanox/mlx5/core/en_main.c cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); sq 1505 drivers/net/ethernet/mellanox/mlx5/core/en_main.c dseg->lkey = sq->mkey_be; sq 1515 drivers/net/ethernet/mellanox/mlx5/core/en_main.c clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); sq 1516 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_free_xdpsq(sq); sq 1521 drivers/net/ethernet/mellanox/mlx5/core/en_main.c void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq) sq 1523 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_channel *c = sq->channel; sq 1525 drivers/net/ethernet/mellanox/mlx5/core/en_main.c clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); sq 1528 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_destroy_sq(c->mdev, sq->sqn); sq 1529 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_free_xdpsq_descs(sq); sq 1530 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_free_xdpsq(sq); sq 1690 drivers/net/ethernet/mellanox/mlx5/core/en_main.c &cparam->tx_cq, &c->sq[tc].cq); sq 1699 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_cq(&c->sq[tc].cq); sq 1709 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_cq(&c->sq[tc].cq); sq 1722 drivers/net/ethernet/mellanox/mlx5/core/en_main.c params, &cparam->sq, &c->sq[tc], tc); sq 1731 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_txqsq(&c->sq[tc]); sq 1741 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_close_txqsq(&c->sq[tc]); sq 1745 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_txqsq *sq, u32 rate) sq 1754 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (rate == sq->rate_limit) sq 1758 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (sq->rate_limit) { sq 1759 drivers/net/ethernet/mellanox/mlx5/core/en_main.c rl.rate = sq->rate_limit; sq 1764 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->rate_limit = 0; sq 1780 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_modify_sq(mdev, sq->sqn, &msp); sq 1790 drivers/net/ethernet/mellanox/mlx5/core/en_main.c sq->rate_limit = rate; sq 1798 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_txqsq *sq = priv->txq2sq[index]; sq 1817 drivers/net/ethernet/mellanox/mlx5/core/en_main.c err = mlx5e_set_sq_maxrate(dev, sq, rate); sq 2037 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_activate_txqsq(&c->sq[tc]); sq 2056 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_deactivate_txqsq(&c->sq[tc]); sq 2338 drivers/net/ethernet/mellanox/mlx5/core/en_main.c mlx5e_build_sq_param(priv, params, &cparam->sq); sq 2934 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_txqsq *sq = &c->sq[tc]; sq 2936 drivers/net/ethernet/mellanox/mlx5/core/en_main.c priv->txq2sq[sq->txq_ix] = sq; sq 3568 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j]; sq 4352 drivers/net/ethernet/mellanox/mlx5/core/en_main.c struct mlx5e_txqsq *sq = priv->txq2sq[i]; sq 4357 drivers/net/ethernet/mellanox/mlx5/core/en_main.c if (mlx5e_reporter_tx_timeout(sq)) sq 482 drivers/net/ethernet/mellanox/mlx5/core/en_rep.c sqs[num_sqs++] = c->sq[tc].sqn; sq 469 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq, sq 473 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct mlx5e_sq_wqe_info *edge_wi, *wi = &sq->db.ico_wqe[pi]; sq 481 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5e_post_nop(wq, sq->sqn, &sq->pc); sq 489 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct mlx5e_icosq *sq = &rq->channel->icosq; sq 490 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct mlx5_wq_cyc *wq = &sq->wq; sq 503 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); sq 506 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5e_fill_icosq_frag_edge(sq, wq, pi, contig_wqebbs_room); sq 507 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); sq 524 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | sq 528 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR; sq 529 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c sq->db.ico_wqe[pi].num_wqebbs = MLX5E_UMR_WQEBBS; sq 530 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c sq->db.ico_wqe[pi].umr.rq = rq; sq 531 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c sq->pc += MLX5E_UMR_WQEBBS; sq 533 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c sq->doorbell_cseg = &umr_wqe->ctrl; sq 592 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq); sq 597 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) sq 607 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c sqcc = sq->cc; sq 621 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) sq 622 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c queue_work(cq->channel->priv->wq, &sq->recover_work); sq 631 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); sq 632 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c wi = &sq->db.ico_wqe[ci]; sq 646 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c sq->cc = sqcc; sq 655 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c struct mlx5e_icosq *sq = &rq->channel->icosq; sq 691 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c if (sq->doorbell_cseg) { sq 692 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg); sq 693 drivers/net/ethernet/mellanox/mlx5/core/en_rx.c sq->doorbell_cseg = NULL; sq 268 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j]; sq 1653 drivers/net/ethernet/mellanox/mlx5/core/en_stats.c MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc], sq 44 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma) sq 50 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_dma_get(sq, --sq->dma_fifo_pc); sq 52 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma); sq 150 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg) sq 157 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c sq->stats->csum_partial_inner++; sq 160 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c sq->stats->csum_partial++; sq 163 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c sq->stats->csum_none++; sq 167 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb) sq 169 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c struct mlx5e_sq_stats *stats = sq->stats; sq 189 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb, sq 198 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c dma_addr = dma_map_single(sq->pdev, skb_data, headlen, sq 200 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) sq 204 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c dseg->lkey = sq->mkey_be; sq 207 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE); sq 216 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz, sq 218 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c if (unlikely(dma_mapping_error(sq->pdev, dma_addr))) sq 222 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c dseg->lkey = sq->mkey_be; sq 225 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE); sq 233 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_dma_unmap_wqe_err(sq, num_dma); sq 238 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, sq 243 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c struct mlx5_wq_cyc *wq = &sq->wq; sq 251 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode); sq 252 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); sq 257 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c sq->pc += wi->num_wqebbs; sq 258 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, sq->stop_room))) { sq 259 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c netif_tx_stop_queue(sq->txq); sq 260 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c sq->stats->stopped++; sq 263 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c send_doorbell = __netdev_tx_sent_queue(sq->txq, num_bytes, sq 266 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg); sq 269 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, sq 272 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c struct mlx5_wq_cyc *wq = &sq->wq; sq 278 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c struct mlx5e_sq_stats *stats = sq->stats; sq 291 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c ihs = mlx5e_tx_get_gso_ihs(sq, skb); sq 295 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c u8 mode = mlx5e_tx_wqe_inline_mode(sq, &wqe->ctrl, skb); sq 327 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); sq 328 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi); sq 338 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c wi = &sq->db.wqe_info[pi]; sq 347 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_txwqe_build_eseg_csum(sq, skb, eseg); sq 369 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg); sq 373 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes, sq 389 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c struct mlx5e_txqsq *sq; sq 392 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c sq = priv->txq2sq[skb_get_queue_mapping(skb)]; sq 393 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c wqe = mlx5e_sq_fetch_wqe(sq, sizeof(*wqe), &pi); sq 396 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c skb = mlx5e_accel_handle_tx(skb, sq, dev, &wqe, &pi); sq 400 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c return mlx5e_sq_xmit(sq, skb, wqe, pi, netdev_xmit_more()); sq 403 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq, sq 406 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c struct mlx5_cqwq *wq = &sq->cq.wq; sq 411 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c netdev_err(sq->channel->netdev, sq 413 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c sq->cq.mcq.cqn, ci, sq->sqn, sq 416 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5_dump_err_cqe(sq->cq.mdev, err_cqe); sq 422 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c struct mlx5e_txqsq *sq; sq 430 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c sq = container_of(cq, struct mlx5e_txqsq, cq); sq 432 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) sq 439 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c stats = sq->stats; sq 447 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c sqcc = sq->cc; sq 450 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c dma_fifo_cc = sq->dma_fifo_cc; sq 463 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c &sq->state)) { sq 464 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_dump_error_cqe(sq, sq 467 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c &sq->recover_work); sq 480 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); sq 481 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c wi = &sq->db.wqe_info[ci]; sq 485 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc); sq 495 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5_timecounter_cyc2time(sq->clock, sq 502 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_dma_get(sq, dma_fifo_cc++); sq 504 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_tx_dma_unmap(sq->pdev, dma); sq 522 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c sq->dma_fifo_cc = dma_fifo_cc; sq 523 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c sq->cc = sqcc; sq 525 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c netdev_tx_completed_queue(sq->txq, npkts, nbytes); sq 527 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c if (netif_tx_queue_stopped(sq->txq) && sq 528 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) && sq 529 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) { sq 530 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c netif_tx_wake_queue(sq->txq); sq 537 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) sq 545 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c sqcc = sq->cc; sq 546 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c dma_fifo_cc = sq->dma_fifo_cc; sq 548 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c while (sqcc != sq->pc) { sq 549 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); sq 550 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c wi = &sq->db.wqe_info[ci]; sq 554 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_ktls_tx_handle_resync_dump_comp(sq, wi, &dma_fifo_cc); sq 561 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_dma_get(sq, dma_fifo_cc++); sq 563 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_tx_dma_unmap(sq->pdev, dma); sq 572 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c sq->dma_fifo_cc = dma_fifo_cc; sq 573 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c sq->cc = sqcc; sq 575 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c netdev_tx_completed_queue(sq->txq, npkts, nbytes); sq 588 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, sq 592 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c struct mlx5_wq_cyc *wq = &sq->wq; sq 601 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c struct mlx5e_sq_stats *stats = sq->stats; sq 614 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c ihs = mlx5e_tx_get_gso_ihs(sq, skb); sq 618 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c u8 mode = mlx5e_tx_wqe_inline_mode(sq, NULL, skb); sq 640 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); sq 643 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); sq 644 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); sq 647 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5i_sq_fetch_wqe(sq, &wqe, pi); sq 650 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c wi = &sq->db.wqe_info[pi]; sq 658 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_txwqe_build_eseg_csum(sq, skb, eseg); sq 668 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb->data + ihs, headlen, dseg); sq 672 drivers/net/ethernet/mellanox/mlx5/core/en_tx.c mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes, sq 50 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq) sq 52 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c struct mlx5e_sq_stats *stats = sq->stats; sq 55 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c if (unlikely(!test_bit(MLX5E_SQ_STATE_AM, &sq->state))) sq 58 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample); sq 59 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c net_dim(&sq->dim, dim_sample); sq 74 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c void mlx5e_trigger_irq(struct mlx5e_icosq *sq) sq 76 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c struct mlx5_wq_cyc *wq = &sq->wq; sq 78 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); sq 80 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; sq 81 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c sq->db.ico_wqe[pi].num_wqebbs = 1; sq 82 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc); sq 83 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl); sq 127 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget); sq 174 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c mlx5e_handle_tx_dim(&c->sq[i]); sq 175 drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c mlx5e_cq_arm(&c->sq[i].cq); sq 135 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c *conn->qp.wq.sq.db = cpu_to_be32(conn->qp.sq.pc); sq 149 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c ix = conn->qp.sq.pc & (conn->qp.sq.size - 1); sq 151 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c ctrl = mlx5_wq_cyc_get_wqe(&conn->qp.wq.sq, ix); sq 166 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c ctrl->opmod_idx_opcode = cpu_to_be32(((conn->qp.sq.pc & 0xffff) << 8) | sq 170 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->qp.sq.pc++; sq 171 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->qp.sq.bufs[ix] = buf; sq 189 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c spin_lock_irqsave(&conn->qp.sq.lock, flags); sq 191 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c if (conn->qp.sq.pc - conn->qp.sq.cc >= conn->qp.sq.size) { sq 192 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c list_add_tail(&buf->list, &conn->qp.sq.backlog); sq 199 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c spin_unlock_irqrestore(&conn->qp.sq.lock, flags); sq 297 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c spin_lock_irqsave(&conn->qp.sq.lock, flags); sq 299 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c ix = be16_to_cpu(cqe->wqe_counter) & (conn->qp.sq.size - 1); sq 300 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c buf = conn->qp.sq.bufs[ix]; sq 301 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->qp.sq.bufs[ix] = NULL; sq 302 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->qp.sq.cc++; sq 305 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c if (unlikely(!list_empty(&conn->qp.sq.backlog))) { sq 307 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c nextbuf = list_first_entry(&conn->qp.sq.backlog, sq 314 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c spin_unlock_irqrestore(&conn->qp.sq.lock, flags); sq 545 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->qp.sq.pc = 0; sq 546 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->qp.sq.cc = 0; sq 547 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->qp.sq.size = roundup_pow_of_two(tx_size); sq 551 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c MLX5_SET(qpc, temp_qpc, log_sq_size, ilog2(conn->qp.sq.size)); sq 564 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->qp.sq.bufs = kvcalloc(conn->qp.sq.size, sq 565 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c sizeof(conn->qp.sq.bufs[0]), sq 567 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c if (!conn->qp.sq.bufs) { sq 593 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c MLX5_SET(qpc, qpc, log_sq_size, ilog2(conn->qp.sq.size)); sq 613 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c kvfree(conn->qp.sq.bufs); sq 641 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c for (ix = 0; ix < conn->qp.sq.size; ix++) { sq 642 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c buf = conn->qp.sq.bufs[ix]; sq 645 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c conn->qp.sq.bufs[ix] = NULL; sq 651 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c list_for_each_entry_safe(buf, temp, &conn->qp.sq.backlog, list) { sq 664 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c kvfree(conn->qp.sq.bufs); sq 866 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c INIT_LIST_HEAD(&conn->qp.sq.backlog); sq 868 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c spin_lock_init(&conn->qp.sq.lock); sq 76 drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.h } sq; sq 134 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j]; sq 630 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c struct mlx5e_txqsq *sq = epriv->txq2sq[skb_get_queue_mapping(skb)]; sq 634 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c return mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey, netdev_xmit_more()); sq 113 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq, sq 117 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h struct mlx5_wq_cyc *wq = &sq->wq; sq 123 drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, sq 651 drivers/net/ethernet/mellanox/mlx5/core/qp.c struct mlx5_core_qp *sq) sq 660 drivers/net/ethernet/mellanox/mlx5/core/qp.c sq->uid = MLX5_GET(create_sq_in, in, uid); sq 661 drivers/net/ethernet/mellanox/mlx5/core/qp.c sq->qpn = sqn; sq 662 drivers/net/ethernet/mellanox/mlx5/core/qp.c err = create_resource_common(dev, sq, MLX5_RES_SQ); sq 669 drivers/net/ethernet/mellanox/mlx5/core/qp.c destroy_sq_tracked(dev, sq->qpn, sq->uid); sq 676 drivers/net/ethernet/mellanox/mlx5/core/qp.c struct mlx5_core_qp *sq) sq 678 drivers/net/ethernet/mellanox/mlx5/core/qp.c destroy_resource_common(dev, sq); sq 679 drivers/net/ethernet/mellanox/mlx5/core/qp.c destroy_sq_tracked(dev, sq->qpn, sq->uid); sq 58 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c (dr_cq->qp->sq.wqe_cnt - 1); sq 59 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c dr_cq->qp->sq.cc = dr_cq->qp->sq.wqe_head[idx] + 1; sq 61 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c ++dr_cq->qp->sq.cc; sq 64 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c (dr_cq->qp->sq.wqe_cnt - 1); sq 65 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c dr_cq->qp->sq.cc = dr_cq->qp->sq.wqe_head[idx] + 1; sq 129 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c dr_qp->sq.pc = 0; sq 130 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c dr_qp->sq.cc = 0; sq 131 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c dr_qp->sq.wqe_cnt = roundup_pow_of_two(attr->max_send_wr); sq 135 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c MLX5_SET(qpc, temp_qpc, log_sq_size, ilog2(dr_qp->sq.wqe_cnt)); sq 143 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c dr_qp->sq.wqe_head = kcalloc(dr_qp->sq.wqe_cnt, sq 144 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c sizeof(dr_qp->sq.wqe_head[0]), sq 147 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c if (!dr_qp->sq.wqe_head) { sq 175 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c MLX5_SET(qpc, qpc, log_sq_size, ilog2(dr_qp->sq.wqe_cnt)); sq 196 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c kfree(dr_qp->sq.wqe_head); sq 208 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c kfree(dr_qp->sq.wqe_head); sq 216 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c *dr_qp->wq.sq.db = cpu_to_be32(dr_qp->sq.pc & 0xfffff); sq 237 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c idx = dr_qp->sq.pc & (dr_qp->sq.wqe_cnt - 1); sq 239 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c wq_ctrl = mlx5_wq_cyc_get_wqe(&dr_qp->wq.sq, idx); sq 243 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c wq_ctrl->opmod_idx_opcode = cpu_to_be32(((dr_qp->sq.pc & 0xffff) << 8) | sq 256 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c dr_qp->sq.wqe_head[idx] = dr_qp->sq.pc++; sq 982 drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h } sq; sq 145 drivers/net/ethernet/mellanox/mlx5/core/wq.c &wq->sq.fbc); sq 150 drivers/net/ethernet/mellanox/mlx5/core/wq.c log_sq_stride, log_sq_sz, &wq->sq.fbc); sq 154 drivers/net/ethernet/mellanox/mlx5/core/wq.c wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR]; sq 61 drivers/net/ethernet/mellanox/mlx5/core/wq.h struct mlx5_wq_cyc sq; sq 180 drivers/net/virtio_net.c struct send_queue *sq; sq 340 drivers/net/virtio_net.c struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi; sq 447 drivers/net/virtio_net.c struct send_queue *sq, sq 467 drivers/net/virtio_net.c sg_init_one(sq->sg, xdpf->data, xdpf->len); sq 469 drivers/net/virtio_net.c err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf), sq 482 drivers/net/virtio_net.c return &vi->sq[qp]; sq 491 drivers/net/virtio_net.c struct send_queue *sq; sq 508 drivers/net/virtio_net.c sq = virtnet_xdp_sq(vi); sq 517 drivers/net/virtio_net.c while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { sq 535 drivers/net/virtio_net.c err = __virtnet_xdp_xmit_one(vi, sq, xdpf); sq 544 drivers/net/virtio_net.c if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) sq 548 drivers/net/virtio_net.c u64_stats_update_begin(&sq->stats.syncp); sq 549 drivers/net/virtio_net.c sq->stats.bytes += bytes; sq 550 drivers/net/virtio_net.c sq->stats.packets += packets; sq 551 drivers/net/virtio_net.c sq->stats.xdp_tx += n; sq 552 drivers/net/virtio_net.c sq->stats.xdp_tx_drops += drops; sq 553 drivers/net/virtio_net.c sq->stats.kicks += kicks; sq 554 drivers/net/virtio_net.c u64_stats_update_end(&sq->stats.syncp); sq 1354 drivers/net/virtio_net.c static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) sq 1361 drivers/net/virtio_net.c while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { sq 1384 drivers/net/virtio_net.c u64_stats_update_begin(&sq->stats.syncp); sq 1385 drivers/net/virtio_net.c sq->stats.bytes += bytes; sq 1386 drivers/net/virtio_net.c sq->stats.packets += packets; sq 1387 drivers/net/virtio_net.c u64_stats_update_end(&sq->stats.syncp); sq 1404 drivers/net/virtio_net.c struct send_queue *sq = &vi->sq[index]; sq 1407 drivers/net/virtio_net.c if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) sq 1411 drivers/net/virtio_net.c free_old_xmit_skbs(sq, true); sq 1415 drivers/net/virtio_net.c if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) sq 1424 drivers/net/virtio_net.c struct send_queue *sq; sq 1440 drivers/net/virtio_net.c sq = virtnet_xdp_sq(vi); sq 1441 drivers/net/virtio_net.c if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { sq 1442 drivers/net/virtio_net.c u64_stats_update_begin(&sq->stats.syncp); sq 1443 drivers/net/virtio_net.c sq->stats.kicks++; sq 1444 drivers/net/virtio_net.c u64_stats_update_end(&sq->stats.syncp); sq 1474 drivers/net/virtio_net.c virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi); sq 1482 drivers/net/virtio_net.c struct send_queue *sq = container_of(napi, struct send_queue, napi); sq 1483 drivers/net/virtio_net.c struct virtnet_info *vi = sq->vq->vdev->priv; sq 1484 drivers/net/virtio_net.c unsigned int index = vq2txq(sq->vq); sq 1495 drivers/net/virtio_net.c free_old_xmit_skbs(sq, true); sq 1498 drivers/net/virtio_net.c virtqueue_napi_complete(napi, sq->vq, 0); sq 1500 drivers/net/virtio_net.c if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) sq 1506 drivers/net/virtio_net.c static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) sq 1510 drivers/net/virtio_net.c struct virtnet_info *vi = sq->vq->vdev->priv; sq 1535 drivers/net/virtio_net.c sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); sq 1538 drivers/net/virtio_net.c num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); sq 1544 drivers/net/virtio_net.c sg_set_buf(sq->sg, hdr, hdr_len); sq 1545 drivers/net/virtio_net.c num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); sq 1550 drivers/net/virtio_net.c return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); sq 1557 drivers/net/virtio_net.c struct send_queue *sq = &vi->sq[qnum]; sq 1561 drivers/net/virtio_net.c bool use_napi = sq->napi.weight; sq 1564 drivers/net/virtio_net.c free_old_xmit_skbs(sq, false); sq 1567 drivers/net/virtio_net.c virtqueue_enable_cb_delayed(sq->vq); sq 1573 drivers/net/virtio_net.c err = xmit_skb(sq, skb); sq 1603 drivers/net/virtio_net.c if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { sq 1606 drivers/net/virtio_net.c unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { sq 1608 drivers/net/virtio_net.c free_old_xmit_skbs(sq, false); sq 1609 drivers/net/virtio_net.c if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { sq 1611 drivers/net/virtio_net.c virtqueue_disable_cb(sq->vq); sq 1617 drivers/net/virtio_net.c if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { sq 1618 drivers/net/virtio_net.c u64_stats_update_begin(&sq->stats.syncp); sq 1619 drivers/net/virtio_net.c sq->stats.kicks++; sq 1620 drivers/net/virtio_net.c u64_stats_update_end(&sq->stats.syncp); sq 1728 drivers/net/virtio_net.c struct send_queue *sq = &vi->sq[i]; sq 1731 drivers/net/virtio_net.c start = u64_stats_fetch_begin_irq(&sq->stats.syncp); sq 1732 drivers/net/virtio_net.c tpackets = sq->stats.packets; sq 1733 drivers/net/virtio_net.c tbytes = sq->stats.bytes; sq 1734 drivers/net/virtio_net.c } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); sq 1812 drivers/net/virtio_net.c virtnet_napi_tx_disable(&vi->sq[i].napi); sq 1925 drivers/net/virtio_net.c virtqueue_set_affinity(vi->sq[i].vq, NULL); sq 1962 drivers/net/virtio_net.c virtqueue_set_affinity(vi->sq[i].vq, mask); sq 2026 drivers/net/virtio_net.c ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); sq 2144 drivers/net/virtio_net.c struct send_queue *sq = &vi->sq[i]; sq 2146 drivers/net/virtio_net.c stats_base = (u8 *)&sq->stats; sq 2148 drivers/net/virtio_net.c start = u64_stats_fetch_begin_irq(&sq->stats.syncp); sq 2153 drivers/net/virtio_net.c } while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start)); sq 2247 drivers/net/virtio_net.c if (napi_weight ^ vi->sq[0].napi.weight) { sq 2251 drivers/net/virtio_net.c vi->sq[i].napi.weight = napi_weight; sq 2268 drivers/net/virtio_net.c if (vi->sq[0].napi.weight) sq 2332 drivers/net/virtio_net.c virtnet_napi_tx_disable(&vi->sq[i].napi); sq 2357 drivers/net/virtio_net.c virtnet_napi_tx_enable(vi, vi->sq[i].vq, sq 2358 drivers/net/virtio_net.c &vi->sq[i].napi); sq 2460 drivers/net/virtio_net.c virtnet_napi_tx_disable(&vi->sq[i].napi); sq 2492 drivers/net/virtio_net.c virtnet_napi_tx_enable(vi, vi->sq[i].vq, sq 2493 drivers/net/virtio_net.c &vi->sq[i].napi); sq 2509 drivers/net/virtio_net.c virtnet_napi_tx_enable(vi, vi->sq[i].vq, sq 2510 drivers/net/virtio_net.c &vi->sq[i].napi); sq 2650 drivers/net/virtio_net.c netif_napi_del(&vi->sq[i].napi); sq 2659 drivers/net/virtio_net.c kfree(vi->sq); sq 2700 drivers/net/virtio_net.c struct virtqueue *vq = vi->sq[i].vq; sq 2796 drivers/net/virtio_net.c sprintf(vi->sq[i].name, "output.%d", i); sq 2798 drivers/net/virtio_net.c names[txq2vq(i)] = vi->sq[i].name; sq 2817 drivers/net/virtio_net.c vi->sq[i].vq = vqs[txq2vq(i)]; sq 2842 drivers/net/virtio_net.c vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL); sq 2843 drivers/net/virtio_net.c if (!vi->sq) sq 2854 drivers/net/virtio_net.c netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx, sq 2859 drivers/net/virtio_net.c sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); sq 2862 drivers/net/virtio_net.c u64_stats_init(&vi->sq[i].stats.syncp); sq 2868 drivers/net/virtio_net.c kfree(vi->sq); sq 841 drivers/net/wireless/ath/ath10k/qmi.c struct sockaddr_qrtr *sq, sq 850 drivers/net/wireless/ath/ath10k/qmi.c struct sockaddr_qrtr *sq, sq 880 drivers/net/wireless/ath/ath10k/qmi.c struct sockaddr_qrtr *sq = &qmi->sq; sq 884 drivers/net/wireless/ath/ath10k/qmi.c sq->sq_family = AF_QIPCRTR; sq 885 drivers/net/wireless/ath/ath10k/qmi.c sq->sq_node = service->node; sq 886 drivers/net/wireless/ath/ath10k/qmi.c sq->sq_port = service->port; sq 890 drivers/net/wireless/ath/ath10k/qmi.c ret = kernel_connect(qmi_hdl->sock, (struct sockaddr *)&qmi->sq, sq 891 drivers/net/wireless/ath/ath10k/qmi.c sizeof(qmi->sq), 0); sq 89 drivers/net/wireless/ath/ath10k/qmi.h struct sockaddr_qrtr sq; sq 29 drivers/net/wireless/ath/wil6210/rx_reorder.c static inline u16 seq_inc(u16 sq) sq 31 drivers/net/wireless/ath/wil6210/rx_reorder.c return (sq + 1) & SEQ_MASK; sq 5742 drivers/net/wireless/cisco/airo.c u16 sq; sq 5750 drivers/net/wireless/cisco/airo.c sq = le16_to_cpu(status_rid->signalQuality); sq 5752 drivers/net/wireless/cisco/airo.c if (sq > 0x20) sq 5755 drivers/net/wireless/cisco/airo.c quality = 0x20 - sq; sq 5757 drivers/net/wireless/cisco/airo.c if (sq > 0xb0) sq 5759 drivers/net/wireless/cisco/airo.c else if (sq < 0x10) sq 5762 drivers/net/wireless/cisco/airo.c quality = 0xb0 - sq; sq 2664 drivers/net/wireless/intel/ipw2x00/ipw2100.c struct ipw2100_status_queue *sq = &priv->status_queue; sq 2701 drivers/net/wireless/intel/ipw2x00/ipw2100.c frame_type = sq->drv[i].status_fields & STATUS_TYPE_MASK; sq 2702 drivers/net/wireless/intel/ipw2x00/ipw2100.c stats.rssi = sq->drv[i].rssi + IPW2100_RSSI_TO_DBM; sq 2703 drivers/net/wireless/intel/ipw2x00/ipw2100.c stats.len = sq->drv[i].frame_size; sq 147 drivers/net/wireless/intersil/hostap/hostap_80211_rx.c LWNG_SETVAL(sq, 5, 1 /* no value */, 4, 0); sq 993 drivers/net/wireless/intersil/hostap/hostap_main.c struct hfa384x_comms_quality sq; sq 1002 drivers/net/wireless/intersil/hostap/hostap_main.c &sq, sizeof(sq), 1) >= 0) { sq 1003 drivers/net/wireless/intersil/hostap/hostap_main.c local->comms_qual = (s16) le16_to_cpu(sq.comm_qual); sq 1004 drivers/net/wireless/intersil/hostap/hostap_main.c local->avg_signal = (s16) le16_to_cpu(sq.signal_level); sq 1005 drivers/net/wireless/intersil/hostap/hostap_main.c local->avg_noise = (s16) le16_to_cpu(sq.noise_level); sq 1011 drivers/net/wireless/intersil/hostap/hostap_main.c &sq, sizeof(sq), 1) >= 0) { sq 1012 drivers/net/wireless/intersil/hostap/hostap_main.c local->comms_qual = le16_to_cpu(sq.comm_qual); sq 1014 drivers/net/wireless/intersil/hostap/hostap_main.c le16_to_cpu(sq.signal_level)); sq 1016 drivers/net/wireless/intersil/hostap/hostap_main.c le16_to_cpu(sq.noise_level)); sq 43 drivers/net/wireless/intersil/hostap/hostap_wlan.h struct linux_wlan_ng_val hosttime, mactime, channel, rssi, sq, signal, sq 141 drivers/net/wireless/mediatek/mt76/dma.c struct mt76_sw_queue *sq = &dev->q_tx[qid]; sq 142 drivers/net/wireless/mediatek/mt76/dma.c struct mt76_queue *q = sq->q; sq 380 drivers/net/wireless/mediatek/mt76/tx.c mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq, sq 386 drivers/net/wireless/mediatek/mt76/tx.c struct mt76_queue *hwq = sq->q; sq 462 drivers/net/wireless/mediatek/mt76/tx.c hwq->entry[idx].qid = sq - dev->q_tx; sq 464 drivers/net/wireless/mediatek/mt76/tx.c sq->swq_queued++; sq 475 drivers/net/wireless/mediatek/mt76/tx.c struct mt76_sw_queue *sq = &dev->q_tx[qid]; sq 476 drivers/net/wireless/mediatek/mt76/tx.c struct mt76_queue *hwq = sq->q; sq 486 drivers/net/wireless/mediatek/mt76/tx.c if (sq->swq_queued >= 4) sq 516 drivers/net/wireless/mediatek/mt76/tx.c ret += mt76_txq_send_burst(dev, sq, mtxq, &empty); sq 528 drivers/net/wireless/mediatek/mt76/tx.c struct mt76_sw_queue *sq = &dev->q_tx[qid]; sq 534 drivers/net/wireless/mediatek/mt76/tx.c if (sq->swq_queued >= 4) sq 656 drivers/net/wireless/mediatek/mt76/usb.c struct mt76_sw_queue *sq; sq 664 drivers/net/wireless/mediatek/mt76/usb.c sq = &dev->q_tx[i]; sq 665 drivers/net/wireless/mediatek/mt76/usb.c q = sq->q; sq 686 drivers/net/wireless/mediatek/mt76/usb.c sq->swq_queued -= n_sw_dequeued; sq 216 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c u8 agc, sq; sq 291 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c sq = flags2 & 0xff; sq 292 drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c signal = priv->rf->calc_rssi(agc, sq); sq 68 drivers/net/wireless/realtek/rtl818x/rtl8180/grf5101.c static u8 grf5101_rf_calc_rssi(u8 agc, u8 sq) sq 73 drivers/net/wireless/realtek/rtl818x/rtl8180/max2820.c static u8 max2820_rf_calc_rssi(u8 agc, u8 sq) sq 88 drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.c static u8 sa2400_rf_calc_rssi(u8 agc, u8 sq) sq 90 drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.c if (sq == 0x80) sq 93 drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.c if (sq > 78) sq 97 drivers/net/wireless/realtek/rtl818x/rtl8180/sa2400.c return 65 * sa2400_rf_rssi_map[sq] / 100; sq 56 drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8187.h u8 sq; sq 354 drivers/net/wireless/realtek/rtl818x/rtl818x.h u8 (*calc_rssi)(u8 agc, u8 sq); sq 146 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c u8 sq; sq 149 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c sq = 100; sq 151 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c sq = cck_buf->sq_rpt; sq 152 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c if (sq > 64) sq 153 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c sq = 0; sq 154 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c else if (sq < 20) sq 155 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c sq = 100; sq 157 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c sq = ((64 - sq) * 100) / 44; sq 160 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c pstatus->signalquality = sq; sq 161 drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c pstatus->rx_mimo_signalquality[0] = sq; sq 167 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c u8 sq; sq 170 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c sq = 100; sq 172 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c sq = cck_buf->sq_rpt; sq 173 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c if (sq > 64) sq 174 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c sq = 0; sq 175 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c else if (sq < 20) sq 176 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c sq = 100; sq 178 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c sq = ((64 - sq) * 100) / 44; sq 181 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c pstats->signalquality = sq; sq 182 drivers/net/wireless/realtek/rtlwifi/rtl8192ce/trx.c pstats->rx_mimo_sig_qual[0] = sq; sq 685 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c u8 sq; sq 688 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c sq = 100; sq 690 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c sq = cck_buf->sq_rpt; sq 691 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c if (sq > 64) sq 692 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c sq = 0; sq 693 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c else if (sq < 20) sq 694 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c sq = 100; sq 696 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c sq = ((64 - sq) * 100) / 44; sq 698 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c pstats->signalquality = sq; sq 699 drivers/net/wireless/realtek/rtlwifi/rtl8192cu/mac.c pstats->RX_SIGQ[0] = sq; sq 162 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c u8 sq; sq 164 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c sq = 100; sq 166 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c sq = cck_buf->sq_rpt; sq 167 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c if (sq > 64) sq 168 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c sq = 0; sq 169 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c else if (sq < 20) sq 170 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c sq = 100; sq 172 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c sq = ((64 - sq) * 100) / 44; sq 174 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c pstats->signalquality = sq; sq 175 drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c pstats->rx_mimo_sig_qual[0] = sq; sq 120 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c u8 sq, sq_rpt; sq 123 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c sq = 100; sq 127 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c sq = 0; sq 129 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c sq = 100; sq 131 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c sq = ((64 - sq_rpt) * 100) / 44; sq 134 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c pstatus->signalquality = sq; sq 135 drivers/net/wireless/realtek/rtlwifi/rtl8192ee/trx.c pstatus->rx_mimo_signalquality[0] = sq; sq 127 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c u8 sq; sq 129 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c sq = 100; sq 131 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c sq = cck_buf->sq_rpt; sq 132 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c if (sq > 64) sq 133 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c sq = 0; sq 134 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c else if (sq < 20) sq 135 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c sq = 100; sq 137 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c sq = ((64 - sq) * 100) / 44; sq 140 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c pstats->signalquality = sq; sq 141 drivers/net/wireless/realtek/rtlwifi/rtl8192se/trx.c pstats->rx_mimo_sig_qual[0] = sq; sq 128 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c u8 sq; sq 131 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c sq = 100; sq 133 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c sq = cck_buf->sq_rpt; sq 134 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c if (sq > 64) sq 135 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c sq = 0; sq 136 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c else if (sq < 20) sq 137 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c sq = 100; sq 139 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c sq = ((64 - sq) * 100) / 44; sq 142 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c pstatus->signalquality = sq; sq 143 drivers/net/wireless/realtek/rtlwifi/rtl8723ae/trx.c pstatus->rx_mimo_signalquality[0] = sq; sq 97 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c u8 sq, sq_rpt; sq 99 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c sq = 100; sq 103 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c sq = 0; sq 105 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c sq = 100; sq 107 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c sq = ((64 - sq_rpt) * 100) / 44; sq 109 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c pstatus->signalquality = sq; sq 110 drivers/net/wireless/realtek/rtlwifi/rtl8723be/trx.c pstatus->rx_mimo_signalquality[0] = sq; sq 177 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c u8 sq; sq 180 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c sq = 100; sq 182 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c sq = p_phystrpt->pwdb_all; sq 183 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c if (sq > 64) sq 184 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c sq = 0; sq 185 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c else if (sq < 20) sq 186 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c sq = 100; sq 188 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c sq = ((64 - sq) * 100) / 44; sq 191 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c pstatus->signalquality = sq; sq 192 drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c pstatus->rx_mimo_signalquality[0] = sq; sq 39 drivers/nvme/target/admin-cmd.c struct nvmet_ctrl *ctrl = req->sq->ctrl; sq 69 drivers/nvme/target/admin-cmd.c ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid); sq 106 drivers/nvme/target/admin-cmd.c ctrl = req->sq->ctrl; sq 151 drivers/nvme/target/admin-cmd.c spin_lock_irqsave(&req->sq->ctrl->error_lock, flags); sq 152 drivers/nvme/target/admin-cmd.c put_unaligned_le64(req->sq->ctrl->err_counter, sq 154 drivers/nvme/target/admin-cmd.c spin_unlock_irqrestore(&req->sq->ctrl->error_lock, flags); sq 195 drivers/nvme/target/admin-cmd.c struct nvmet_ctrl *ctrl = req->sq->ctrl; sq 220 drivers/nvme/target/admin-cmd.c struct nvmet_ctrl *ctrl = req->sq->ctrl; sq 287 drivers/nvme/target/admin-cmd.c struct nvmet_ctrl *ctrl = req->sq->ctrl; sq 427 drivers/nvme/target/admin-cmd.c ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid); sq 479 drivers/nvme/target/admin-cmd.c struct nvmet_ctrl *ctrl = req->sq->ctrl; sq 537 drivers/nvme/target/admin-cmd.c ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid); sq 598 drivers/nvme/target/admin-cmd.c struct nvmet_subsys *subsys = req->sq->ctrl->subsys; sq 601 drivers/nvme/target/admin-cmd.c req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid); sq 633 drivers/nvme/target/admin-cmd.c req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000); sq 635 drivers/nvme/target/admin-cmd.c nvmet_set_result(req, req->sq->ctrl->kato); sq 649 drivers/nvme/target/admin-cmd.c WRITE_ONCE(req->sq->ctrl->aen_enabled, val32); sq 657 drivers/nvme/target/admin-cmd.c struct nvmet_subsys *subsys = req->sq->ctrl->subsys; sq 689 drivers/nvme/target/admin-cmd.c struct nvmet_subsys *subsys = req->sq->ctrl->subsys; sq 692 drivers/nvme/target/admin-cmd.c req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid); sq 710 drivers/nvme/target/admin-cmd.c nvmet_set_result(req, req->sq->ctrl->kato * 1000); sq 715 drivers/nvme/target/admin-cmd.c nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled)); sq 720 drivers/nvme/target/admin-cmd.c struct nvmet_subsys *subsys = req->sq->ctrl->subsys; sq 768 drivers/nvme/target/admin-cmd.c status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid, sq 769 drivers/nvme/target/admin-cmd.c sizeof(req->sq->ctrl->hostid)); sq 786 drivers/nvme/target/admin-cmd.c struct nvmet_ctrl *ctrl = req->sq->ctrl; sq 802 drivers/nvme/target/admin-cmd.c struct nvmet_ctrl *ctrl = req->sq->ctrl; sq 891 drivers/nvme/target/admin-cmd.c req->sq->qid); sq 663 drivers/nvme/target/core.c if (req->sq->size) { sq 667 drivers/nvme/target/core.c old_sqhd = req->sq->sqhd; sq 668 drivers/nvme/target/core.c new_sqhd = (old_sqhd + 1) % req->sq->size; sq 669 drivers/nvme/target/core.c } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) != sq 672 drivers/nvme/target/core.c req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF); sq 677 drivers/nvme/target/core.c struct nvmet_ctrl *ctrl = req->sq->ctrl; sq 692 drivers/nvme/target/core.c new_error_slot->sqid = cpu_to_le16(req->sq->qid); sq 706 drivers/nvme/target/core.c if (!req->sq->sqhd_disabled) sq 708 drivers/nvme/target/core.c req->cqe->sq_id = cpu_to_le16(req->sq->qid); sq 724 drivers/nvme/target/core.c percpu_ref_put(&req->sq->ref); sq 737 drivers/nvme/target/core.c void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, sq 740 drivers/nvme/target/core.c sq->sqhd = 0; sq 741 drivers/nvme/target/core.c sq->qid = qid; sq 742 drivers/nvme/target/core.c sq->size = size; sq 744 drivers/nvme/target/core.c ctrl->sqs[qid] = sq; sq 749 drivers/nvme/target/core.c struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref); sq 751 drivers/nvme/target/core.c complete(&sq->confirm_done); sq 754 drivers/nvme/target/core.c void nvmet_sq_destroy(struct nvmet_sq *sq) sq 760 drivers/nvme/target/core.c if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq) sq 761 drivers/nvme/target/core.c nvmet_async_events_free(sq->ctrl); sq 762 drivers/nvme/target/core.c percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq); sq 763 drivers/nvme/target/core.c wait_for_completion(&sq->confirm_done); sq 764 drivers/nvme/target/core.c wait_for_completion(&sq->free_done); sq 765 drivers/nvme/target/core.c percpu_ref_exit(&sq->ref); sq 767 drivers/nvme/target/core.c if (sq->ctrl) { sq 768 drivers/nvme/target/core.c nvmet_ctrl_put(sq->ctrl); sq 769 drivers/nvme/target/core.c sq->ctrl = NULL; /* allows reusing the queue later */ sq 776 drivers/nvme/target/core.c struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref); sq 778 drivers/nvme/target/core.c complete(&sq->free_done); sq 781 drivers/nvme/target/core.c int nvmet_sq_init(struct nvmet_sq *sq) sq 785 drivers/nvme/target/core.c ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL); sq 790 drivers/nvme/target/core.c init_completion(&sq->free_done); sq 791 drivers/nvme/target/core.c init_completion(&sq->confirm_done); sq 835 drivers/nvme/target/core.c req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid); sq 858 drivers/nvme/target/core.c struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops) sq 864 drivers/nvme/target/core.c req->sq = sq; sq 895 drivers/nvme/target/core.c if (unlikely(!req->sq->ctrl)) sq 898 drivers/nvme/target/core.c else if (likely(req->sq->qid != 0)) sq 902 drivers/nvme/target/core.c else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC) sq 910 drivers/nvme/target/core.c if (unlikely(!percpu_ref_tryget_live(&sq->ref))) { sq 915 drivers/nvme/target/core.c if (sq->ctrl) sq 916 drivers/nvme/target/core.c sq->ctrl->cmd_seen = true; sq 928 drivers/nvme/target/core.c percpu_ref_put(&req->sq->ref); sq 949 drivers/nvme/target/core.c if (req->sq->ctrl && req->ns) sq 950 drivers/nvme/target/core.c p2p_dev = radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, sq 954 drivers/nvme/target/core.c if (req->sq->qid && p2p_dev) { sq 1131 drivers/nvme/target/core.c if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) { sq 1133 drivers/nvme/target/core.c cmd->common.opcode, req->sq->qid); sq 1137 drivers/nvme/target/core.c if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) { sq 1139 drivers/nvme/target/core.c cmd->common.opcode, req->sq->qid); sq 145 drivers/nvme/target/discovery.c struct nvmet_ctrl *ctrl = req->sq->ctrl; sq 163 drivers/nvme/target/discovery.c struct nvmet_ctrl *ctrl = req->sq->ctrl; sq 232 drivers/nvme/target/discovery.c struct nvmet_ctrl *ctrl = req->sq->ctrl; sq 320 drivers/nvme/target/discovery.c if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) { sq 24 drivers/nvme/target/fabrics-cmd.c nvmet_update_cc(req->sq->ctrl, val); sq 37 drivers/nvme/target/fabrics-cmd.c struct nvmet_ctrl *ctrl = req->sq->ctrl; sq 110 drivers/nvme/target/fabrics-cmd.c old = cmpxchg(&req->sq->ctrl, NULL, ctrl); sq 125 drivers/nvme/target/fabrics-cmd.c nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1); sq 128 drivers/nvme/target/fabrics-cmd.c req->sq->sqhd_disabled = true; sq 133 drivers/nvme/target/fabrics-cmd.c ret = ctrl->ops->install_queue(req->sq); sq 144 drivers/nvme/target/fabrics-cmd.c req->sq->ctrl = NULL; sq 339 drivers/nvme/target/io-cmd-bdev.c req->sq->qid); sq 391 drivers/nvme/target/io-cmd-file.c cmd->common.opcode, req->sq->qid); sq 96 drivers/nvme/target/loop.c container_of(req->sq, struct nvme_loop_queue, nvme_sq); sq 290 drivers/nvme/target/nvmet.h struct nvmet_sq *sq; sq 351 drivers/nvme/target/nvmet.h clear_bit(bn, &req->sq->ctrl->aen_masked); sq 376 drivers/nvme/target/nvmet.h struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops); sq 387 drivers/nvme/target/nvmet.h void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, u16 qid, sq 389 drivers/nvme/target/nvmet.h void nvmet_sq_destroy(struct nvmet_sq *sq); sq 390 drivers/nvme/target/nvmet.h int nvmet_sq_init(struct nvmet_sq *sq); sq 1684 drivers/nvme/target/tcp.c static u16 nvmet_tcp_install_queue(struct nvmet_sq *sq) sq 1687 drivers/nvme/target/tcp.c container_of(sq, struct nvmet_tcp_queue, nvme_sq); sq 1689 drivers/nvme/target/tcp.c if (sq->qid == 0) { sq 1694 drivers/nvme/target/tcp.c queue->nr_cmds = sq->size * 2; sq 46 drivers/nvme/target/trace.h return req->sq->ctrl; sq 55 drivers/nvme/target/trace.h if ((init && req->sq->qid) || (!init && req->cq->qid)) { sq 85 drivers/nvme/target/trace.h __entry->qid = req->sq->qid; sq 264 drivers/remoteproc/qcom_sysmon.c static void sysmon_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, sq 287 drivers/s390/block/scm_blk.c struct scm_queue *sq = hctx->driver_data; sq 291 drivers/s390/block/scm_blk.c spin_lock(&sq->lock); sq 293 drivers/s390/block/scm_blk.c spin_unlock(&sq->lock); sq 297 drivers/s390/block/scm_blk.c scmrq = sq->scmrq; sq 302 drivers/s390/block/scm_blk.c spin_unlock(&sq->lock); sq 306 drivers/s390/block/scm_blk.c sq->scmrq = scmrq; sq 317 drivers/s390/block/scm_blk.c sq->scmrq = NULL; sq 318 drivers/s390/block/scm_blk.c spin_unlock(&sq->lock); sq 325 drivers/s390/block/scm_blk.c sq->scmrq = NULL; sq 327 drivers/s390/block/scm_blk.c spin_unlock(&sq->lock); sq 322 drivers/scsi/bnx2fc/bnx2fc.h struct fcoe_sqe *sq; sq 1379 drivers/scsi/bnx2fc/bnx2fc_hwi.c sqe = &tgt->sq[tgt->sq_prod_idx]; sq 675 drivers/scsi/bnx2fc/bnx2fc_tgt.c tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size, sq 677 drivers/scsi/bnx2fc/bnx2fc_tgt.c if (!tgt->sq) { sq 888 drivers/scsi/bnx2fc/bnx2fc_tgt.c if (tgt->sq) { sq 890 drivers/scsi/bnx2fc/bnx2fc_tgt.c tgt->sq, tgt->sq_dma); sq 891 drivers/scsi/bnx2fc/bnx2fc_tgt.c tgt->sq = NULL; sq 198 drivers/scsi/cxlflash/common.h struct sisl_ioarcb sq[NUM_SQ_ENTRY]; /* 16K SQ */ sq 1925 drivers/scsi/cxlflash/main.c memset(&hwq->sq, 0, sizeof(hwq->sq)); sq 1926 drivers/scsi/cxlflash/main.c hwq->hsq_start = &hwq->sq[0]; sq 1927 drivers/scsi/cxlflash/main.c hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1]; sq 217 drivers/scsi/qedf/qedf.h struct fcoe_wqe *sq; sq 120 drivers/scsi/qedf/qedf_els.c sqe = &fcport->sq[sqe_idx]; sq 717 drivers/scsi/qedf/qedf_els.c sqe = &fcport->sq[sqe_idx]; sq 913 drivers/scsi/qedf/qedf_io.c sqe = &fcport->sq[sqe_idx]; sq 1920 drivers/scsi/qedf/qedf_io.c sqe = &fcport->sq[sqe_idx]; sq 2206 drivers/scsi/qedf/qedf_io.c sqe = &fcport->sq[sqe_idx]; sq 2337 drivers/scsi/qedf/qedf_io.c sqe = &fcport->sq[sqe_idx]; sq 1191 drivers/scsi/qedf/qedf_main.c fcport->sq = dma_alloc_coherent(&qedf->pdev->dev, fcport->sq_mem_size, sq 1193 drivers/scsi/qedf/qedf_main.c if (!fcport->sq) { sq 1224 drivers/scsi/qedf/qedf_main.c dma_free_coherent(&qedf->pdev->dev, fcport->sq_mem_size, fcport->sq, sq 1235 drivers/scsi/qedf/qedf_main.c if (fcport->sq) sq 1237 drivers/scsi/qedf/qedf_main.c fcport->sq, fcport->sq_dma); sq 1089 drivers/scsi/qedi/qedi_fw.c task_params.sqe = &ep->sq[sq_idx]; sq 1163 drivers/scsi/qedi/qedi_fw.c task_params.sqe = &ep->sq[sq_idx]; sq 1534 drivers/scsi/qedi/qedi_fw.c task_params.sqe = &ep->sq[sq_idx]; sq 1674 drivers/scsi/qedi/qedi_fw.c task_params.sqe = &ep->sq[sq_idx]; sq 1791 drivers/scsi/qedi/qedi_fw.c task_params.sqe = &ep->sq[sq_idx]; sq 2153 drivers/scsi/qedi/qedi_fw.c task_params.sqe = &ep->sq[sq_idx]; sq 2208 drivers/scsi/qedi/qedi_fw.c task_params.sqe = &ep->sq[sq_idx]; sq 85 drivers/scsi/qedi/qedi_iscsi.h struct iscsi_wqe *sq; sq 1706 drivers/scsi/qedi/qedi_main.c ep->sq = dma_alloc_coherent(&qedi->pdev->dev, ep->sq_mem_size, sq 1708 drivers/scsi/qedi/qedi_main.c if (!ep->sq) { sq 1739 drivers/scsi/qedi/qedi_main.c dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq, sq 1750 drivers/scsi/qedi/qedi_main.c if (ep->sq) sq 1751 drivers/scsi/qedi/qedi_main.c dma_free_coherent(&qedi->pdev->dev, ep->sq_mem_size, ep->sq, sq 377 drivers/slimbus/qcom-ngd-ctrl.c struct sockaddr_qrtr *sq, sq 18 drivers/soc/qcom/qmi_interface.c struct sockaddr_qrtr *sq); sq 167 drivers/soc/qcom/qmi_interface.c struct sockaddr_qrtr sq; sq 177 drivers/soc/qcom/qmi_interface.c sq.sq_family = qmi->sq.sq_family; sq 178 drivers/soc/qcom/qmi_interface.c sq.sq_node = qmi->sq.sq_node; sq 179 drivers/soc/qcom/qmi_interface.c sq.sq_port = QRTR_PORT_CTRL; sq 181 drivers/soc/qcom/qmi_interface.c msg.msg_name = &sq; sq 182 drivers/soc/qcom/qmi_interface.c msg.msg_namelen = sizeof(sq); sq 230 drivers/soc/qcom/qmi_interface.c struct sockaddr_qrtr sq; sq 239 drivers/soc/qcom/qmi_interface.c pkt.server.node = cpu_to_le32(qmi->sq.sq_node); sq 240 drivers/soc/qcom/qmi_interface.c pkt.server.port = cpu_to_le32(qmi->sq.sq_port); sq 242 drivers/soc/qcom/qmi_interface.c sq.sq_family = qmi->sq.sq_family; sq 243 drivers/soc/qcom/qmi_interface.c sq.sq_node = qmi->sq.sq_node; sq 244 drivers/soc/qcom/qmi_interface.c sq.sq_port = QRTR_PORT_CTRL; sq 246 drivers/soc/qcom/qmi_interface.c msg.msg_name = &sq; sq 247 drivers/soc/qcom/qmi_interface.c msg.msg_namelen = sizeof(sq); sq 389 drivers/soc/qcom/qmi_interface.c static void qmi_invoke_handler(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, sq 417 drivers/soc/qcom/qmi_interface.c handler->fn(qmi, sq, txn, dest); sq 441 drivers/soc/qcom/qmi_interface.c struct sockaddr_qrtr sq; sq 445 drivers/soc/qcom/qmi_interface.c sock = qmi_sock_create(qmi, &sq); sq 461 drivers/soc/qcom/qmi_interface.c qmi->sq = sq; sq 472 drivers/soc/qcom/qmi_interface.c struct sockaddr_qrtr *sq, sq 509 drivers/soc/qcom/qmi_interface.c qmi_invoke_handler(qmi, sq, txn, buf, len); sq 518 drivers/soc/qcom/qmi_interface.c qmi_invoke_handler(qmi, sq, &tmp_txn, buf, len); sq 526 drivers/soc/qcom/qmi_interface.c struct sockaddr_qrtr sq; sq 527 drivers/soc/qcom/qmi_interface.c struct msghdr msg = { .msg_name = &sq, .msg_namelen = sizeof(sq) }; sq 557 drivers/soc/qcom/qmi_interface.c if (sq.sq_node == qmi->sq.sq_node && sq 558 drivers/soc/qcom/qmi_interface.c sq.sq_port == QRTR_PORT_CTRL) { sq 561 drivers/soc/qcom/qmi_interface.c ops->msg_handler(qmi, &sq, qmi->recv_buf, msglen); sq 563 drivers/soc/qcom/qmi_interface.c qmi_handle_message(qmi, &sq, qmi->recv_buf, msglen); sq 583 drivers/soc/qcom/qmi_interface.c struct sockaddr_qrtr *sq) sq 593 drivers/soc/qcom/qmi_interface.c ret = kernel_getsockname(sock, (struct sockaddr *)sq); sq 656 drivers/soc/qcom/qmi_interface.c qmi->sock = qmi_sock_create(qmi, &qmi->sq); sq 733 drivers/soc/qcom/qmi_interface.c struct sockaddr_qrtr *sq, struct qmi_txn *txn, sq 752 drivers/soc/qcom/qmi_interface.c if (sq) { sq 753 drivers/soc/qcom/qmi_interface.c msghdr.msg_name = sq; sq 754 drivers/soc/qcom/qmi_interface.c msghdr.msg_namelen = sizeof(*sq); sq 784 drivers/soc/qcom/qmi_interface.c ssize_t qmi_send_request(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, sq 788 drivers/soc/qcom/qmi_interface.c return qmi_send_message(qmi, sq, txn, QMI_REQUEST, msg_id, len, ei, sq 805 drivers/soc/qcom/qmi_interface.c ssize_t qmi_send_response(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, sq 809 drivers/soc/qcom/qmi_interface.c return qmi_send_message(qmi, sq, txn, QMI_RESPONSE, msg_id, len, ei, sq 825 drivers/soc/qcom/qmi_interface.c ssize_t qmi_send_indication(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, sq 837 drivers/soc/qcom/qmi_interface.c rval = qmi_send_message(qmi, sq, &txn, QMI_INDICATION, msg_id, len, ei, sq 131 drivers/staging/ks7010/ks_hostif.c ap->sq = ap_info->sq; sq 182 drivers/staging/ks7010/ks_hostif.c ap->channel, ap->rssi, ap->sq, ap->capability, sq 269 drivers/staging/ks7010/ks_hostif.c ap->sq = ap_info->sq; sq 305 drivers/staging/ks7010/ks_hostif.h u8 sq; /* +07 */ sq 320 drivers/staging/ks7010/ks_hostif.h u8 sq; /* +07 */ sq 205 drivers/staging/ks7010/ks_wlan.h u8 sq; sq 1103 drivers/staging/ks7010/ks_wlan_net.c qual[i].qual = priv->aplist.ap[i].sq; sq 1229 drivers/staging/ks7010/ks_wlan_net.c iwe.u.qual.qual = ap->sq; sq 107 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c u8 ss, sq; sq 305 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c sq = padapter->recvpriv.signal_qual; sq 308 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c sq = pnetwork->network.PhyInfo.SignalQuality; sq 312 drivers/staging/rtl8188eu/os_dep/ioctl_linux.c iwe.u.qual.qual = (u8)sq; /* signal quality */ sq 1575 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c u8 sq; sq 1578 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c sq = 100; sq 1580 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c sq = pcck_buf->sq_rpt; sq 1583 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c sq = 0; sq 1585 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c sq = 100; sq 1587 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c sq = ((64-sq) * 100) / 44; sq 1589 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c pstats->SignalQuality = sq; sq 1590 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c precord_stats->SignalQuality = sq; sq 1591 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c pstats->RxMIMOSignalQuality[0] = sq; sq 1592 drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c precord_stats->RxMIMOSignalQuality[0] = sq; sq 4091 drivers/staging/rtl8192u/r8192U_core.c u8 sq; sq 4174 drivers/staging/rtl8192u/r8192U_core.c sq = 100; sq 4176 drivers/staging/rtl8192u/r8192U_core.c sq = pcck_buf->sq_rpt; sq 4179 drivers/staging/rtl8192u/r8192U_core.c sq = 0; sq 4181 drivers/staging/rtl8192u/r8192U_core.c sq = 100; sq 4183 drivers/staging/rtl8192u/r8192U_core.c sq = ((64 - sq) * 100) / 44; sq 4185 drivers/staging/rtl8192u/r8192U_core.c pstats->SignalQuality = precord_stats->SignalQuality = sq; sq 4187 drivers/staging/rtl8192u/r8192U_core.c precord_stats->RxMIMOSignalQuality[0] = sq; sq 727 drivers/staging/rtl8712/rtl8712_recv.c u8 sq; sq 808 drivers/staging/rtl8712/rtl8712_recv.c sq = 100; sq 810 drivers/staging/rtl8712/rtl8712_recv.c sq = pcck_buf->sq_rpt; sq 812 drivers/staging/rtl8712/rtl8712_recv.c sq = 0; sq 814 drivers/staging/rtl8712/rtl8712_recv.c sq = 100; sq 816 drivers/staging/rtl8712/rtl8712_recv.c sq = ((64 - sq) * 100) / 44; sq 818 drivers/staging/rtl8712/rtl8712_recv.c prframe->u.hdr.attrib.signal_qual = sq; sq 819 drivers/staging/rtl8712/rtl8712_recv.c prframe->u.hdr.attrib.rx_mimo_signal_qual[0] = sq; sq 95 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c u8 ss, sq; sq 327 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c sq = padapter->recvpriv.signal_qual; sq 330 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c sq = pnetwork->network.PhyInfo.SignalQuality; sq 350 drivers/staging/rtl8723bs/os_dep/ioctl_linux.c iwe.u.qual.qual = (u8)sq; /* signal quality */ sq 39 drivers/staging/vt6656/dpc.c u8 *rx_rate, *sq, *sq_3; sq 116 drivers/staging/vt6656/dpc.c sq = sq_3; sq 118 drivers/staging/vt6656/dpc.c sq = skb_data + 8 + pay_load_with_padding + 8; sq 119 drivers/staging/vt6656/dpc.c sq_3 = sq; sq 104 fs/io_uring.c struct io_uring sq, cq; sq 759 fs/io_uring.c return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; sq 2627 fs/io_uring.c if (ctx->cached_sq_head != READ_ONCE(rings->sq.head)) { sq 2633 fs/io_uring.c smp_store_release(&rings->sq.head, ctx->cached_sq_head); sq 2661 fs/io_uring.c if (head == smp_load_acquire(&rings->sq.tail)) sq 3662 fs/io_uring.c if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head != sq 3944 fs/io_uring.c p->sq_off.head = offsetof(struct io_rings, sq.head); sq 3945 fs/io_uring.c p->sq_off.tail = offsetof(struct io_rings, sq.tail); sq 238 include/linux/dmar.h sq : 2, /* 80 - 81 */ sq 263 include/linux/dmar.h dst->sq = src->sq; sq 595 include/linux/mlx5/qp.h struct mlx5_core_qp *sq); sq 597 include/linux/mlx5/qp.h struct mlx5_core_qp *sq); sq 149 include/linux/soc/qcom/qmi.h void (*msg_handler)(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, sq 194 include/linux/soc/qcom/qmi.h void (*fn)(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, sq 219 include/linux/soc/qcom/qmi.h struct sockaddr_qrtr sq; sq 249 include/linux/soc/qcom/qmi.h ssize_t qmi_send_request(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, sq 252 include/linux/soc/qcom/qmi.h ssize_t qmi_send_response(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, sq 255 include/linux/soc/qcom/qmi.h ssize_t qmi_send_indication(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, sq 1692 kernel/rcu/tree.c struct swait_queue_head *sq; sq 1733 kernel/rcu/tree.c sq = rcu_nocb_gp_get(rnp); sq 1735 kernel/rcu/tree.c rcu_nocb_gp_cleanup(sq); sq 432 kernel/rcu/tree.h static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq); sq 1614 kernel/rcu/tree_plugin.h static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq) sq 1616 kernel/rcu/tree_plugin.h swake_up_all(sq); sq 2485 kernel/rcu/tree_plugin.h static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq) sq 932 net/qrtr/qrtr.c struct sockaddr_qrtr *sq; sq 959 net/qrtr/qrtr.c sq = (struct sockaddr_qrtr *)&ifr.ifr_addr; sq 960 net/qrtr/qrtr.c *sq = ipc->us; sq 336 samples/qmi/qmi_sample_client.c static void ping_pong_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, sq 455 samples/qmi/qmi_sample_client.c struct sockaddr_qrtr *sq; sq 470 samples/qmi/qmi_sample_client.c sq = dev_get_platdata(&pdev->dev); sq 471 samples/qmi/qmi_sample_client.c ret = kernel_connect(sample->qmi.sock, (struct sockaddr *)sq, sq 472 samples/qmi/qmi_sample_client.c sizeof(*sq), 0); sq 478 samples/qmi/qmi_sample_client.c snprintf(path, sizeof(path), "%d:%d", sq->sq_node, sq->sq_port); sq 539 samples/qmi/qmi_sample_client.c struct sockaddr_qrtr sq = { AF_QIPCRTR, service->node, service->port }; sq 546 samples/qmi/qmi_sample_client.c ret = platform_device_add_data(pdev, &sq, sizeof(sq)); sq 415 sound/oss/dmasound/dmasound_core.c static int sq_allocate_buffers(struct sound_queue *sq, int num, int size) sq 419 sound/oss/dmasound/dmasound_core.c if (sq->buffers) sq 421 sound/oss/dmasound/dmasound_core.c sq->numBufs = num; sq 422 sound/oss/dmasound/dmasound_core.c sq->bufSize = size; sq 423 sound/oss/dmasound/dmasound_core.c sq->buffers = kmalloc_array (num, sizeof(char *), GFP_KERNEL); sq 424 sound/oss/dmasound/dmasound_core.c if (!sq->buffers) sq 427 sound/oss/dmasound/dmasound_core.c sq->buffers[i] = dmasound.mach.dma_alloc(size, GFP_KERNEL); sq 428 sound/oss/dmasound/dmasound_core.c if (!sq->buffers[i]) { sq 430 sound/oss/dmasound/dmasound_core.c dmasound.mach.dma_free(sq->buffers[i], size); sq 431 sound/oss/dmasound/dmasound_core.c kfree(sq->buffers); sq 432 sound/oss/dmasound/dmasound_core.c sq->buffers = NULL; sq 439 sound/oss/dmasound/dmasound_core.c static void sq_release_buffers(struct sound_queue *sq) sq 443 sound/oss/dmasound/dmasound_core.c if (sq->buffers) { sq 444 sound/oss/dmasound/dmasound_core.c for (i = 0; i < sq->numBufs; i++) sq 445 sound/oss/dmasound/dmasound_core.c dmasound.mach.dma_free(sq->buffers[i], sq->bufSize); sq 446 sound/oss/dmasound/dmasound_core.c kfree(sq->buffers); sq 447 sound/oss/dmasound/dmasound_core.c sq->buffers = NULL; sq 452 sound/oss/dmasound/dmasound_core.c static int sq_setup(struct sound_queue *sq) sq 457 sound/oss/dmasound/dmasound_core.c if (sq->locked) { /* are we already set? - and not changeable */ sq 463 sound/oss/dmasound/dmasound_core.c sq->locked = 1 ; /* don't think we have a race prob. here _check_ */ sq 483 sound/oss/dmasound/dmasound_core.c if (sq->user_frags <= 0) { sq 484 sound/oss/dmasound/dmasound_core.c sq->max_count = sq->numBufs ; sq 485 sound/oss/dmasound/dmasound_core.c sq->max_active = sq->numBufs ; sq 486 sound/oss/dmasound/dmasound_core.c sq->block_size = sq->bufSize; sq 488 sound/oss/dmasound/dmasound_core.c sq->user_frags = sq->numBufs ; sq 489 sound/oss/dmasound/dmasound_core.c sq->user_frag_size = sq->bufSize ; sq 490 sound/oss/dmasound/dmasound_core.c sq->user_frag_size *= sq 492 sound/oss/dmasound/dmasound_core.c sq->user_frag_size /= sq 496 sound/oss/dmasound/dmasound_core.c sq->block_size = sq->user_frag_size ; sq 497 sound/oss/dmasound/dmasound_core.c sq->block_size *= sq 499 sound/oss/dmasound/dmasound_core.c sq->block_size /= sq 502 sound/oss/dmasound/dmasound_core.c sq->block_size *= dmasound.hard.speed ; sq 503 sound/oss/dmasound/dmasound_core.c sq->block_size /= dmasound.soft.speed ; sq 507 sound/oss/dmasound/dmasound_core.c sq->block_size += (hard_frame - 1) ; sq 508 sound/oss/dmasound/dmasound_core.c sq->block_size &= ~(hard_frame - 1) ; /* make sure we are aligned */ sq 510 sound/oss/dmasound/dmasound_core.c if ( sq->block_size <= 0 || sq->block_size > sq->bufSize) { sq 512 sound/oss/dmasound/dmasound_core.c printk("dmasound_core: invalid frag size (user set %d)\n", sq->user_frag_size) ; sq 514 sound/oss/dmasound/dmasound_core.c sq->block_size = sq->bufSize ; sq 516 sound/oss/dmasound/dmasound_core.c if ( sq->user_frags <= sq->numBufs ) { sq 517 sound/oss/dmasound/dmasound_core.c sq->max_count = sq->user_frags ; sq 519 sound/oss/dmasound/dmasound_core.c sq->max_active = (sq->max_active <= sq->max_count) ? sq 520 sound/oss/dmasound/dmasound_core.c sq->max_active : sq->max_count ; sq 523 sound/oss/dmasound/dmasound_core.c printk("dmasound_core: invalid frag count (user set %d)\n", sq->user_frags) ; sq 525 sound/oss/dmasound/dmasound_core.c sq->max_count = sq 526 sound/oss/dmasound/dmasound_core.c sq->max_active = sq->numBufs ; sq 529 sound/oss/dmasound/dmasound_core.c sq->front = sq->count = sq->rear_size = 0; sq 530 sound/oss/dmasound/dmasound_core.c sq->syncing = 0; sq 531 sound/oss/dmasound/dmasound_core.c sq->active = 0; sq 533 sound/oss/dmasound/dmasound_core.c if (sq == &write_sq) { sq 534 sound/oss/dmasound/dmasound_core.c sq->rear = -1; sq 692 sound/oss/dmasound/dmasound_core.c static inline void sq_init_waitqueue(struct sound_queue *sq) sq 694 sound/oss/dmasound/dmasound_core.c init_waitqueue_head(&sq->action_queue); sq 695 sound/oss/dmasound/dmasound_core.c init_waitqueue_head(&sq->open_queue); sq 696 sound/oss/dmasound/dmasound_core.c init_waitqueue_head(&sq->sync_queue); sq 697 sound/oss/dmasound/dmasound_core.c sq->busy = 0; sq 701 sound/oss/dmasound/dmasound_core.c static inline void sq_wake_up(struct sound_queue *sq, struct file *file, sq 705 sound/oss/dmasound/dmasound_core.c sq->busy = 0; /* CHECK: IS THIS OK??? */ sq 706 sound/oss/dmasound/dmasound_core.c WAKE_UP(sq->open_queue); sq 711 sound/oss/dmasound/dmasound_core.c static int sq_open2(struct sound_queue *sq, struct file *file, fmode_t mode, sq 717 sound/oss/dmasound/dmasound_core.c if (sq->busy) { sq 723 sound/oss/dmasound/dmasound_core.c if (wait_event_interruptible(sq->open_queue, !sq->busy)) sq 733 sound/oss/dmasound/dmasound_core.c sq->busy = 1; /* Let's play spot-the-race-condition */ sq 740 sound/oss/dmasound/dmasound_core.c if (( rc = sq_allocate_buffers(sq, numbufs, bufsize))) { sq 742 sound/oss/dmasound/dmasound_core.c sq_wake_up(sq, file, mode); sq 744 sound/oss/dmasound/dmasound_core.c sq->busy = 0 ; sq 749 sound/oss/dmasound/dmasound_core.c sq->non_blocking = file->f_flags & O_NONBLOCK; sq 957 sound/oss/dmasound/dmasound_core.c static int set_queue_frags(struct sound_queue *sq, int bufs, int size) sq 959 sound/oss/dmasound/dmasound_core.c if (sq->locked) { sq 969 sound/oss/dmasound/dmasound_core.c if (size > sq->bufSize) sq 974 sound/oss/dmasound/dmasound_core.c if (bufs > sq->numBufs) /* the user is allowed say "don't care" with 0x7fff */ sq 975 sound/oss/dmasound/dmasound_core.c bufs = sq->numBufs ; sq 982 sound/oss/dmasound/dmasound_core.c sq->user_frags = sq 983 sound/oss/dmasound/dmasound_core.c sq->max_active = bufs ; sq 984 sound/oss/dmasound/dmasound_core.c sq->user_frag_size = size ; sq 46 tools/io_uring/liburing.h struct io_uring_sq sq; sq 70 tools/io_uring/queue.c struct io_uring_sq *sq = &ring->sq; sq 71 tools/io_uring/queue.c const unsigned mask = *sq->kring_mask; sq 81 tools/io_uring/queue.c if (*sq->khead != *sq->ktail) { sq 82 tools/io_uring/queue.c submitted = *sq->kring_entries; sq 86 tools/io_uring/queue.c if (sq->sqe_head == sq->sqe_tail) sq 93 tools/io_uring/queue.c ktail = ktail_next = *sq->ktail; sq 94 tools/io_uring/queue.c to_submit = sq->sqe_tail - sq->sqe_head; sq 99 tools/io_uring/queue.c sq->array[ktail & mask] = sq->sqe_head & mask; sq 102 tools/io_uring/queue.c sq->sqe_head++; sq 109 tools/io_uring/queue.c if (*sq->ktail != ktail) { sq 117 tools/io_uring/queue.c *sq->ktail = ktail; sq 143 tools/io_uring/queue.c struct io_uring_sq *sq = &ring->sq; sq 144 tools/io_uring/queue.c unsigned next = sq->sqe_tail + 1; sq 150 tools/io_uring/queue.c if (next - sq->sqe_head > *sq->kring_entries) sq 153 tools/io_uring/queue.c sqe = &sq->sqes[sq->sqe_tail & *sq->kring_mask]; sq 154 tools/io_uring/queue.c sq->sqe_tail = next; sq 11 tools/io_uring/setup.c struct io_uring_sq *sq, struct io_uring_cq *cq) sq 17 tools/io_uring/setup.c sq->ring_sz = p->sq_off.array + p->sq_entries * sizeof(unsigned); sq 18 tools/io_uring/setup.c ptr = mmap(0, sq->ring_sz, PROT_READ | PROT_WRITE, sq 22 tools/io_uring/setup.c sq->khead = ptr + p->sq_off.head; sq 23 tools/io_uring/setup.c sq->ktail = ptr + p->sq_off.tail; sq 24 tools/io_uring/setup.c sq->kring_mask = ptr + p->sq_off.ring_mask; sq 25 tools/io_uring/setup.c sq->kring_entries = ptr + p->sq_off.ring_entries; sq 26 tools/io_uring/setup.c sq->kflags = ptr + p->sq_off.flags; sq 27 tools/io_uring/setup.c sq->kdropped = ptr + p->sq_off.dropped; sq 28 tools/io_uring/setup.c sq->array = ptr + p->sq_off.array; sq 31 tools/io_uring/setup.c sq->sqes = mmap(0, size, PROT_READ | PROT_WRITE, sq 34 tools/io_uring/setup.c if (sq->sqes == MAP_FAILED) { sq 37 tools/io_uring/setup.c munmap(sq->khead, sq->ring_sz); sq 46 tools/io_uring/setup.c munmap(sq->sqes, p->sq_entries * sizeof(struct io_uring_sqe)); sq 69 tools/io_uring/setup.c ret = io_uring_mmap(fd, p, &ring->sq, &ring->cq); sq 100 tools/io_uring/setup.c struct io_uring_sq *sq = &ring->sq; sq 103 tools/io_uring/setup.c munmap(sq->sqes, *sq->kring_entries * sizeof(struct io_uring_sqe)); sq 104 tools/io_uring/setup.c munmap(sq->khead, sq->ring_sz); sq 5228 tools/lib/traceevent/event-parse.c struct trace_seq sq; sq 5236 tools/lib/traceevent/event-parse.c trace_seq_init(&sq); sq 5264 tools/lib/traceevent/event-parse.c trace_seq_printf(&sq, "%c%c%c", sq 5274 tools/lib/traceevent/event-parse.c trace_seq_printf(&sq, "%x", pc); sq 5276 tools/lib/traceevent/event-parse.c trace_seq_printf(&sq, "."); sq 5280 tools/lib/traceevent/event-parse.c trace_seq_printf(&sq, "."); sq 5282 tools/lib/traceevent/event-parse.c trace_seq_printf(&sq, "%d", migrate_disable); sq 5287 tools/lib/traceevent/event-parse.c trace_seq_printf(&sq, "."); sq 5289 tools/lib/traceevent/event-parse.c trace_seq_printf(&sq, "%d", lock_depth); sq 5292 tools/lib/traceevent/event-parse.c if (sq.state == TRACE_SEQ__MEM_ALLOC_FAILED) { sq 5297 tools/lib/traceevent/event-parse.c trace_seq_terminate(&sq); sq 5298 tools/lib/traceevent/event-parse.c trace_seq_puts(s, sq.buffer); sq 5299 tools/lib/traceevent/event-parse.c trace_seq_destroy(&sq);