fq 97 block/blk-flush.c struct blk_flush_queue *fq, unsigned int flags); fq 157 block/blk-flush.c struct blk_flush_queue *fq, fq 161 block/blk-flush.c struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; fq 178 block/blk-flush.c fq->flush_pending_since = jiffies; fq 183 block/blk-flush.c list_move_tail(&rq->flush.list, &fq->flush_data_in_flight); fq 204 block/blk-flush.c blk_kick_flush(q, fq, cmd_flags); fq 213 block/blk-flush.c struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx); fq 217 block/blk-flush.c spin_lock_irqsave(&fq->mq_flush_lock, flags); fq 220 block/blk-flush.c fq->rq_status = error; fq 221 block/blk-flush.c spin_unlock_irqrestore(&fq->mq_flush_lock, flags); fq 225 block/blk-flush.c if (fq->rq_status != BLK_STS_OK) fq 226 block/blk-flush.c error = fq->rq_status; fq 230 block/blk-flush.c blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq); fq 237 block/blk-flush.c running = &fq->flush_queue[fq->flush_running_idx]; fq 238 block/blk-flush.c BUG_ON(fq->flush_pending_idx == fq->flush_running_idx); fq 241 block/blk-flush.c fq->flush_running_idx ^= 1; fq 248 block/blk-flush.c blk_flush_complete_seq(rq, fq, seq, error); fq 251 block/blk-flush.c fq->flush_queue_delayed = 0; fq 252 block/blk-flush.c spin_unlock_irqrestore(&fq->mq_flush_lock, flags); fq 268 block/blk-flush.c static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq, fq 271 block/blk-flush.c struct list_head *pending = &fq->flush_queue[fq->flush_pending_idx]; fq 274 block/blk-flush.c struct request *flush_rq = fq->flush_rq; fq 277 block/blk-flush.c if (fq->flush_pending_idx != fq->flush_running_idx || list_empty(pending)) fq 286 block/blk-flush.c if (!list_empty(&fq->flush_data_in_flight) && q->elevator && fq 288 block/blk-flush.c fq->flush_pending_since + FLUSH_PENDING_TIMEOUT)) fq 295 block/blk-flush.c fq->flush_pending_idx ^= 1; fq 311 block/blk-flush.c fq->orig_rq = first_rq; fq 333 block/blk-flush.c struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx); fq 344 block/blk-flush.c spin_lock_irqsave(&fq->mq_flush_lock, flags); fq 345 block/blk-flush.c blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error); fq 346 block/blk-flush.c spin_unlock_irqrestore(&fq->mq_flush_lock, flags); fq 365 block/blk-flush.c struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx); fq 417 block/blk-flush.c spin_lock_irq(&fq->mq_flush_lock); fq 418 block/blk-flush.c blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); fq 419 block/blk-flush.c spin_unlock_irq(&fq->mq_flush_lock); fq 478 block/blk-flush.c struct blk_flush_queue *fq; fq 481 block/blk-flush.c fq = kzalloc_node(sizeof(*fq), flags, node); fq 482 block/blk-flush.c if (!fq) fq 485 block/blk-flush.c spin_lock_init(&fq->mq_flush_lock); fq 488 block/blk-flush.c fq->flush_rq = kzalloc_node(rq_sz, flags, node); fq 489 block/blk-flush.c if (!fq->flush_rq) fq 492 block/blk-flush.c INIT_LIST_HEAD(&fq->flush_queue[0]); fq 493 block/blk-flush.c INIT_LIST_HEAD(&fq->flush_queue[1]); fq 494 block/blk-flush.c INIT_LIST_HEAD(&fq->flush_data_in_flight); fq 496 block/blk-flush.c lockdep_register_key(&fq->key); fq 497 block/blk-flush.c lockdep_set_class(&fq->mq_flush_lock, &fq->key); fq 499 block/blk-flush.c return fq; fq 502 block/blk-flush.c kfree(fq); fq 507 block/blk-flush.c void blk_free_flush_queue(struct blk_flush_queue *fq) fq 510 block/blk-flush.c if (!fq) fq 513 block/blk-flush.c lockdep_unregister_key(&fq->key); fq 514 block/blk-flush.c kfree(fq->flush_rq); fq 515 block/blk-flush.c kfree(fq); fq 43 block/blk-mq-sysfs.c blk_free_flush_queue(hctx->fq); fq 2283 block/blk-mq.c set->ops->exit_request(set, hctx->fq->flush_rq, hctx_idx); fq 2337 block/blk-mq.c if (blk_mq_init_request(set, hctx->fq->flush_rq, hctx_idx, fq 2395 block/blk-mq.c hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size, fq 2397 block/blk-mq.c if (!hctx->fq) fq 44 block/blk.h return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq; fq 55 block/blk.h return hctx->fq->flush_rq == req; fq 141 drivers/crypto/caam/qi.c static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq, fq 222 drivers/crypto/caam/qi.c static int empty_retired_fq(struct device *qidev, struct qman_fq *fq) fq 226 drivers/crypto/caam/qi.c ret = qman_volatile_dequeue(fq, QMAN_VOLATILE_FLAG_WAIT_INT | fq 231 drivers/crypto/caam/qi.c dev_err(qidev, "Volatile dequeue fail for FQ: %u\n", fq->fqid); fq 240 drivers/crypto/caam/qi.c } while (fq->flags & QMAN_FQ_STATE_NE); fq 245 drivers/crypto/caam/qi.c static int kill_fq(struct device *qidev, struct qman_fq *fq) fq 250 drivers/crypto/caam/qi.c ret = qman_retire_fq(fq, &flags); fq 264 drivers/crypto/caam/qi.c } while (fq->state != qman_fq_state_retired); fq 266 drivers/crypto/caam/qi.c WARN_ON(fq->flags & QMAN_FQ_STATE_BLOCKOOS); fq 267 drivers/crypto/caam/qi.c WARN_ON(fq->flags & QMAN_FQ_STATE_ORL); fq 271 drivers/crypto/caam/qi.c if (fq->flags & QMAN_FQ_STATE_NE) { fq 272 drivers/crypto/caam/qi.c ret = empty_retired_fq(qidev, fq); fq 275 drivers/crypto/caam/qi.c fq->fqid); fq 280 drivers/crypto/caam/qi.c ret = qman_oos_fq(fq); fq 282 drivers/crypto/caam/qi.c dev_err(qidev, "OOS of FQID: %u failed\n", fq->fqid); fq 284 drivers/crypto/caam/qi.c qman_destroy_fq(fq); fq 285 drivers/crypto/caam/qi.c kfree(fq); fq 290 drivers/crypto/caam/qi.c static int empty_caam_fq(struct qman_fq *fq) fq 297 drivers/crypto/caam/qi.c ret = qman_query_fq_np(fq, &np); fq 607 drivers/crypto/caam/qi.c struct qman_fq *fq; fq 610 drivers/crypto/caam/qi.c fq = kzalloc(sizeof(*fq), GFP_KERNEL | GFP_DMA); fq 611 drivers/crypto/caam/qi.c if (!fq) fq 614 drivers/crypto/caam/qi.c fq->cb.dqrr = caam_rsp_fq_dqrr_cb; fq 617 drivers/crypto/caam/qi.c QMAN_FQ_FLAG_DYNAMIC_FQID, fq); fq 620 drivers/crypto/caam/qi.c kfree(fq); fq 636 drivers/crypto/caam/qi.c ret = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &opts); fq 639 drivers/crypto/caam/qi.c kfree(fq); fq 643 drivers/crypto/caam/qi.c per_cpu(pcpu_qipriv.rsp_fq, cpu) = fq; fq 645 drivers/crypto/caam/qi.c dev_dbg(qidev, "Allocated response FQ %u for CPU %u", fq->fqid, cpu); fq 49 drivers/iommu/iova.c iovad->fq = NULL; fq 59 drivers/iommu/iova.c return !!iovad->fq; fq 72 drivers/iommu/iova.c free_percpu(iovad->fq); fq 74 drivers/iommu/iova.c iovad->fq = NULL; fq 96 drivers/iommu/iova.c struct iova_fq *fq; fq 98 drivers/iommu/iova.c fq = per_cpu_ptr(queue, cpu); fq 99 drivers/iommu/iova.c fq->head = 0; fq 100 drivers/iommu/iova.c fq->tail = 0; fq 102 drivers/iommu/iova.c spin_lock_init(&fq->lock); fq 107 drivers/iommu/iova.c iovad->fq = queue; fq 459 drivers/iommu/iova.c #define fq_ring_for_each(i, fq) \ fq 460 drivers/iommu/iova.c for ((i) = (fq)->head; (i) != (fq)->tail; (i) = ((i) + 1) % IOVA_FQ_SIZE) fq 462 drivers/iommu/iova.c static inline bool fq_full(struct iova_fq *fq) fq 464 drivers/iommu/iova.c assert_spin_locked(&fq->lock); fq 465 drivers/iommu/iova.c return (((fq->tail + 1) % IOVA_FQ_SIZE) == fq->head); fq 468 drivers/iommu/iova.c static inline unsigned fq_ring_add(struct iova_fq *fq) fq 470 drivers/iommu/iova.c unsigned idx = fq->tail; fq 472 drivers/iommu/iova.c assert_spin_locked(&fq->lock); fq 474 drivers/iommu/iova.c fq->tail = (idx + 1) % IOVA_FQ_SIZE; fq 479 drivers/iommu/iova.c static void fq_ring_free(struct iova_domain *iovad, struct iova_fq *fq) fq 484 drivers/iommu/iova.c assert_spin_locked(&fq->lock); fq 486 drivers/iommu/iova.c fq_ring_for_each(idx, fq) { fq 488 drivers/iommu/iova.c if (fq->entries[idx].counter >= counter) fq 492 drivers/iommu/iova.c iovad->entry_dtor(fq->entries[idx].data); fq 495 drivers/iommu/iova.c fq->entries[idx].iova_pfn, fq 496 drivers/iommu/iova.c fq->entries[idx].pages); fq 498 drivers/iommu/iova.c fq->head = (fq->head + 1) % IOVA_FQ_SIZE; fq 522 drivers/iommu/iova.c struct iova_fq *fq = per_cpu_ptr(iovad->fq, cpu); fq 525 drivers/iommu/iova.c fq_ring_for_each(idx, fq) fq 526 drivers/iommu/iova.c iovad->entry_dtor(fq->entries[idx].data); fq 540 drivers/iommu/iova.c struct iova_fq *fq; fq 542 drivers/iommu/iova.c fq = per_cpu_ptr(iovad->fq, cpu); fq 543 drivers/iommu/iova.c spin_lock_irqsave(&fq->lock, flags); fq 544 drivers/iommu/iova.c fq_ring_free(iovad, fq); fq 545 drivers/iommu/iova.c spin_unlock_irqrestore(&fq->lock, flags); fq 553 drivers/iommu/iova.c struct iova_fq *fq = raw_cpu_ptr(iovad->fq); fq 557 drivers/iommu/iova.c spin_lock_irqsave(&fq->lock, flags); fq 564 drivers/iommu/iova.c fq_ring_free(iovad, fq); fq 566 drivers/iommu/iova.c if (fq_full(fq)) { fq 568 drivers/iommu/iova.c fq_ring_free(iovad, fq); fq 571 drivers/iommu/iova.c idx = fq_ring_add(fq); fq 573 drivers/iommu/iova.c fq->entries[idx].iova_pfn = pfn; fq 574 drivers/iommu/iova.c fq->entries[idx].pages = pages; fq 575 drivers/iommu/iova.c fq->entries[idx].data = data; fq 576 drivers/iommu/iova.c fq->entries[idx].counter = atomic64_read(&iovad->fq_flush_start_cnt); fq 578 drivers/iommu/iova.c spin_unlock_irqrestore(&fq->lock, flags); fq 618 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx) fq 620 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c switch (fq->fq_type) { fq 623 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c fq->wq = 1; fq 627 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c fq->wq = 5; fq 631 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c fq->wq = 6; fq 637 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c fq->wq = 6; fq 641 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c fq->wq = 2; fq 645 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c fq->wq = 1; fq 649 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c fq->wq = 0; fq 658 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c fq->fq_type, fq->fqid); fq 864 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c struct dpaa_fq *fq, fq 867 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c fq->fq_base = *template; fq 868 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c fq->net_dev = priv->net_dev; fq 870 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c fq->flags = QMAN_FQ_FLAG_NO_ENQUEUE; fq 871 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c fq->channel = priv->channel; fq 875 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c struct dpaa_fq *fq, fq 879 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c fq->fq_base = *template; fq 880 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c fq->net_dev = priv->net_dev; fq 883 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c fq->flags = QMAN_FQ_FLAG_TO_DCPORTAL; fq 884 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c fq->channel = (u16)fman_port_get_qman_channel_id(port); fq 886 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c fq->flags = QMAN_FQ_FLAG_NO_MODIFY; fq 897 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c struct dpaa_fq *fq; fq 907 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c list_for_each_entry(fq, &priv->dpaa_fq_list, list) { fq 908 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c switch (fq->fq_type) { fq 910 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq); fq 913 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c dpaa_setup_ingress(priv, fq, &fq_cbs->rx_errq); fq 918 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c dpaa_setup_ingress(priv, fq, &fq_cbs->rx_defq); fq 919 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c fq->channel = channels[portal_cnt++ % num_portals]; fq 922 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c dpaa_setup_egress(priv, fq, tx_port, fq 928 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c priv->egress_fqs[egress_cnt++] = &fq->fq_base; fq 931 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c priv->conf_fqs[conf_cnt++] = &fq->fq_base; fq 934 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c dpaa_setup_ingress(priv, fq, &fq_cbs->tx_defq); fq 937 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c dpaa_setup_ingress(priv, fq, &fq_cbs->tx_errq); fq 948 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c list_for_each_entry(fq, &priv->dpaa_fq_list, list) { fq 949 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c if (fq->fq_type != FQ_TYPE_TX) fq 951 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c priv->egress_fqs[egress_cnt++] = &fq->fq_base; fq 976 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c struct qman_fq *fq; fq 993 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c fq = &dpaa_fq->fq_base; fq 1099 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c err = qman_init_fq(fq, QMAN_INITFQ_FLAG_SCHED, &initfq); fq 1102 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c qman_fq_fqid(fq), err); fq 1103 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c qman_destroy_fq(fq); fq 1108 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c dpaa_fq->fqid = qman_fq_fqid(fq); fq 1113 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c static int dpaa_fq_free_entry(struct device *dev, struct qman_fq *fq) fq 1121 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c dpaa_fq = container_of(fq, struct dpaa_fq, fq_base); fq 1125 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c err = qman_retire_fq(fq, NULL); fq 1128 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c qman_fq_fqid(fq), err); fq 1130 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c error = qman_oos_fq(fq); fq 1133 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c qman_fq_fqid(fq), error); fq 1139 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c qman_destroy_fq(fq); fq 2229 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c struct qman_fq *fq, fq 2232 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base); fq 2250 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); fq 2256 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c struct qman_fq *fq, fq 2277 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c net_dev = ((struct dpaa_fq *)fq)->net_dev; fq 2284 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c trace_dpaa_rx_fd(net_dev, fq, &dq->fd); fq 2376 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c struct qman_fq *fq, fq 2383 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c net_dev = ((struct dpaa_fq *)fq)->net_dev; fq 2391 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); fq 2397 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c struct qman_fq *fq, fq 2404 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c net_dev = ((struct dpaa_fq *)fq)->net_dev; fq 2408 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c trace_dpaa_tx_conf_fd(net_dev, fq, &dq->fd); fq 2415 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid); fq 2421 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c struct qman_fq *fq, fq 2430 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c net_dev = ((struct dpaa_fq *)fq)->net_dev; fq 59 drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c struct dpaa_fq *fq; fq 66 drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c list_for_each_entry_safe(fq, tmp, &priv->dpaa_fq_list, list) { fq 67 drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c switch (fq->fq_type) { fq 93 drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c if (prev && (abs(fq->fqid - prev->fqid) != 1 || fq 104 drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c if (prev && abs(fq->fqid - prev->fqid) == 1 && fq 106 drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c last_fqid = fq->fqid; fq 108 drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c first_fqid = fq->fqid; fq 109 drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c last_fqid = fq->fqid; fq 112 drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c prev = fq; fq 58 drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h struct qman_fq *fq, fq 62 drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h TP_ARGS(netdev, fq, fd), fq 80 drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h __entry->fqid = fq->fqid; fq 104 drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h struct qman_fq *fq, fq 107 drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h TP_ARGS(netdev, fq, fd) fq 114 drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h struct qman_fq *fq, fq 117 drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h TP_ARGS(netdev, fq, fd) fq 124 drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h struct qman_fq *fq, fq 127 drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h TP_ARGS(netdev, fq, fd) fq 64 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c static char *fq_type_to_str(struct dpaa2_eth_fq *fq) fq 66 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c switch (fq->type) { fq 79 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c struct dpaa2_eth_fq *fq; fq 88 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c fq = &priv->fq[i]; fq 89 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt); fq 94 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c fq->fqid, fq 95 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c fq->target_cpu, fq 96 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c fq_type_to_str(fq), fq 97 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-debugfs.c fq->stats.frames, fq 246 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c struct dpaa2_eth_fq *fq; fq 265 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c fq = &priv->fq[queue_id]; fq 267 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c err = priv->enqueue(priv, fq, fd, 0); fq 355 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c struct dpaa2_eth_fq *fq) fq 385 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c xdp_act = run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr); fq 433 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c skb_record_rx_queue(skb, fq->flowid); fq 458 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c struct dpaa2_eth_fq *fq = NULL; fq 476 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq); fq 478 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c fq->consume(priv, ch, fd, fq); fq 485 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c fq->stats.frames += cleaned; fq 491 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c *src = fq; fq 673 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c struct dpaa2_eth_fq *fq, fq 719 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c fq->dq_frames++; fq 720 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c fq->dq_bytes += fd_len; fq 755 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c struct dpaa2_eth_fq *fq; fq 830 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c fq = &priv->fq[queue_mapping]; fq 840 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c err = priv->enqueue(priv, fq, &fd, prio); fq 848 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c free_tx_fd(priv, fq, &fd, false); fq 868 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c struct dpaa2_eth_fq *fq) fq 884 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c free_tx_fd(priv, fq, fd, true); fq 1117 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c struct dpaa2_eth_fq *fq, *txc_fq = NULL; fq 1138 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c store_cleaned = consume_frames(ch, &fq); fq 1141 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c if (fq->type == DPAA2_RX_FQ) { fq 1146 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c txc_fq = fq; fq 1223 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c if (priv->fq[i].type != DPAA2_RX_FQ) fq 1227 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c priv->fq[i].flowid, &td); fq 1337 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c struct dpaa2_eth_fq *fq; fq 1342 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c fq = &priv->fq[i]; fq 1343 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt); fq 1855 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c struct dpaa2_eth_fq *fq; fq 1901 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c fq = &priv->fq[smp_processor_id() % dpaa2_eth_queue_count(priv)]; fq 1903 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c err = priv->enqueue(priv, fq, &fd, 0); fq 1949 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c struct dpaa2_eth_fq *fq; fq 1960 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c fq = &priv->fq[i % num_queues]; fq 1963 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c cpumask_set_cpu(fq->target_cpu, &xps_mask); fq 2292 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c struct dpaa2_eth_fq *fq; fq 2303 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c fq = &priv->fq[i]; fq 2304 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c switch (fq->type) { fq 2306 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c fq->target_cpu = rx_cpu; fq 2312 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c fq->target_cpu = txc_cpu; fq 2318 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c dev_err(dev, "Unknown FQ type: %d\n", fq->type); fq 2320 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c fq->channel = get_affine_channel(priv, fq->target_cpu); fq 2335 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ; fq 2336 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf; fq 2337 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c priv->fq[priv->num_fqs++].flowid = (u16)i; fq 2341 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; fq 2342 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; fq 2343 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c priv->fq[priv->num_fqs++].flowid = (u16)i; fq 2500 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c struct dpaa2_eth_fq *fq, fq 2503 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c return dpaa2_io_service_enqueue_qd(fq->channel->dpio, fq 2505 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c fq->tx_qdbin, fd); fq 2509 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c struct dpaa2_eth_fq *fq, fq 2512 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c return dpaa2_io_service_enqueue_fq(fq->channel->dpio, fq 2513 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c fq->tx_fqid[prio], fd); fq 2555 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c struct dpaa2_eth_fq *fq; fq 2567 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c fq = &priv->fq[i]; fq 2568 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c if (fq->type != DPAA2_TX_CONF_FQ) fq 2572 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c DPNI_QUEUE_TX, j, fq->flowid, fq 2577 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c fq->tx_fqid[j] = qid.fqid; fq 2578 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c if (fq->tx_fqid[j] == 0) fq 2681 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c struct dpaa2_eth_fq *fq) fq 2689 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid); fq 2695 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c fq->fqid = qid.fqid; fq 2697 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c queue.destination.id = fq->channel->dpcon_id; fq 2700 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c queue.user_context = (u64)(uintptr_t)fq; fq 2702 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c DPNI_QUEUE_RX, 0, fq->flowid, fq 2711 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev, fq 2712 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c fq->flowid); fq 2718 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq, fq 2729 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c struct dpaa2_eth_fq *fq) fq 2738 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c DPNI_QUEUE_TX, i, fq->flowid, fq 2744 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c fq->tx_fqid[i] = qid.fqid; fq 2748 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c fq->tx_qdbin = qid.qdbin; fq 2751 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, fq 2758 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c fq->fqid = qid.fqid; fq 2760 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c queue.destination.id = fq->channel->dpcon_id; fq 2763 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c queue.user_context = (u64)(uintptr_t)fq; fq 2765 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, fq 3138 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c switch (priv->fq[i].type) { fq 3140 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c err = setup_rx_flow(priv, &priv->fq[i]); fq 3143 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c err = setup_tx_flow(priv, &priv->fq[i]); fq 3146 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type); fq 317 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h struct dpaa2_eth_fq *fq); fq 362 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES]; fq 364 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h struct dpaa2_eth_fq *fq, fq 245 drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid, fq 252 drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c if (priv->fq[j].type == DPAA2_TX_CONF_FQ) { fq 3535 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c u8 di0, dq0, ei, eq, fi, fq; fq 3553 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c wlc_lcnphy_get_radio_loft(pi, &ei, &eq, &fi, &fq); fq 3558 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c wlc_lcnphy_get_radio_loft(pi, &ei, &eq, &fi, &fq); fq 3560 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c cc.im = (u16) fq; fq 268 drivers/soc/fsl/qbman/qman.c struct qm_mcc_fq fq; fq 962 drivers/soc/fsl/qbman/qman.c static inline void fq_set(struct qman_fq *fq, u32 mask) fq 964 drivers/soc/fsl/qbman/qman.c fq->flags |= mask; fq 967 drivers/soc/fsl/qbman/qman.c static inline void fq_clear(struct qman_fq *fq, u32 mask) fq 969 drivers/soc/fsl/qbman/qman.c fq->flags &= ~mask; fq 972 drivers/soc/fsl/qbman/qman.c static inline int fq_isset(struct qman_fq *fq, u32 mask) fq 974 drivers/soc/fsl/qbman/qman.c return fq->flags & mask; fq 977 drivers/soc/fsl/qbman/qman.c static inline int fq_isclear(struct qman_fq *fq, u32 mask) fq 979 drivers/soc/fsl/qbman/qman.c return !(fq->flags & mask); fq 1126 drivers/soc/fsl/qbman/qman.c struct qman_fq *fq; fq 1132 drivers/soc/fsl/qbman/qman.c fq = fq_table[idx]; fq 1133 drivers/soc/fsl/qbman/qman.c DPAA_ASSERT(!fq || idx == fq->idx); fq 1135 drivers/soc/fsl/qbman/qman.c return fq; fq 1156 drivers/soc/fsl/qbman/qman.c static u32 fq_to_tag(struct qman_fq *fq) fq 1159 drivers/soc/fsl/qbman/qman.c return fq->idx; fq 1161 drivers/soc/fsl/qbman/qman.c return (u32)fq; fq 1430 drivers/soc/fsl/qbman/qman.c static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq, fq 1435 drivers/soc/fsl/qbman/qman.c DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL)); fq 1436 drivers/soc/fsl/qbman/qman.c fq_clear(fq, QMAN_FQ_STATE_ORL); fq 1439 drivers/soc/fsl/qbman/qman.c DPAA_ASSERT(fq->state == qman_fq_state_parked || fq 1440 drivers/soc/fsl/qbman/qman.c fq->state == qman_fq_state_sched); fq 1441 drivers/soc/fsl/qbman/qman.c DPAA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING)); fq 1442 drivers/soc/fsl/qbman/qman.c fq_clear(fq, QMAN_FQ_STATE_CHANGING); fq 1443 drivers/soc/fsl/qbman/qman.c if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY) fq 1444 drivers/soc/fsl/qbman/qman.c fq_set(fq, QMAN_FQ_STATE_NE); fq 1445 drivers/soc/fsl/qbman/qman.c if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT) fq 1446 drivers/soc/fsl/qbman/qman.c fq_set(fq, QMAN_FQ_STATE_ORL); fq 1447 drivers/soc/fsl/qbman/qman.c fq->state = qman_fq_state_retired; fq 1450 drivers/soc/fsl/qbman/qman.c DPAA_ASSERT(fq->state == qman_fq_state_sched); fq 1451 drivers/soc/fsl/qbman/qman.c DPAA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING)); fq 1452 drivers/soc/fsl/qbman/qman.c fq->state = qman_fq_state_parked; fq 1493 drivers/soc/fsl/qbman/qman.c struct qman_fq *fq; fq 1514 drivers/soc/fsl/qbman/qman.c fq = fqid_to_fq(qm_fqid_get(&msg->fq)); fq 1515 drivers/soc/fsl/qbman/qman.c if (WARN_ON(!fq)) fq 1517 drivers/soc/fsl/qbman/qman.c fq_state_change(p, fq, msg, verb); fq 1518 drivers/soc/fsl/qbman/qman.c if (fq->cb.fqs) fq 1519 drivers/soc/fsl/qbman/qman.c fq->cb.fqs(p, fq, msg); fq 1523 drivers/soc/fsl/qbman/qman.c fq = tag_to_fq(be32_to_cpu(msg->fq.context_b)); fq 1524 drivers/soc/fsl/qbman/qman.c fq_state_change(p, fq, msg, verb); fq 1525 drivers/soc/fsl/qbman/qman.c if (fq->cb.fqs) fq 1526 drivers/soc/fsl/qbman/qman.c fq->cb.fqs(p, fq, msg); fq 1537 drivers/soc/fsl/qbman/qman.c fq = tag_to_fq(be32_to_cpu(msg->ern.tag)); fq 1538 drivers/soc/fsl/qbman/qman.c fq->cb.ern(p, fq, msg); fq 1576 drivers/soc/fsl/qbman/qman.c static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq) fq 1579 drivers/soc/fsl/qbman/qman.c fq_clear(fq, QMAN_FQ_STATE_VDQCR); fq 1613 drivers/soc/fsl/qbman/qman.c struct qman_fq *fq; fq 1629 drivers/soc/fsl/qbman/qman.c fq = p->vdqcr_owned; fq 1637 drivers/soc/fsl/qbman/qman.c fq_clear(fq, QMAN_FQ_STATE_NE); fq 1644 drivers/soc/fsl/qbman/qman.c res = fq->cb.dqrr(p, fq, dq); fq 1649 drivers/soc/fsl/qbman/qman.c clear_vdqcr(p, fq); fq 1652 drivers/soc/fsl/qbman/qman.c fq = tag_to_fq(be32_to_cpu(dq->context_b)); fq 1654 drivers/soc/fsl/qbman/qman.c res = fq->cb.dqrr(p, fq, dq); fq 1793 drivers/soc/fsl/qbman/qman.c int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq) fq 1801 drivers/soc/fsl/qbman/qman.c fq->fqid = fqid; fq 1802 drivers/soc/fsl/qbman/qman.c fq->flags = flags; fq 1803 drivers/soc/fsl/qbman/qman.c fq->state = qman_fq_state_oos; fq 1804 drivers/soc/fsl/qbman/qman.c fq->cgr_groupid = 0; fq 1812 drivers/soc/fsl/qbman/qman.c fq->idx = fqid * 2; fq 1814 drivers/soc/fsl/qbman/qman.c fq->idx++; fq 1816 drivers/soc/fsl/qbman/qman.c WARN_ON(fq_table[fq->idx]); fq 1817 drivers/soc/fsl/qbman/qman.c fq_table[fq->idx] = fq; fq 1823 drivers/soc/fsl/qbman/qman.c void qman_destroy_fq(struct qman_fq *fq) fq 1829 drivers/soc/fsl/qbman/qman.c switch (fq->state) { fq 1832 drivers/soc/fsl/qbman/qman.c if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID)) fq 1833 drivers/soc/fsl/qbman/qman.c qman_release_fqid(fq->fqid); fq 1835 drivers/soc/fsl/qbman/qman.c DPAA_ASSERT(fq_table[fq->idx]); fq 1836 drivers/soc/fsl/qbman/qman.c fq_table[fq->idx] = NULL; fq 1845 drivers/soc/fsl/qbman/qman.c u32 qman_fq_fqid(struct qman_fq *fq) fq 1847 drivers/soc/fsl/qbman/qman.c return fq->fqid; fq 1851 drivers/soc/fsl/qbman/qman.c int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts) fq 1862 drivers/soc/fsl/qbman/qman.c if (fq->state != qman_fq_state_oos && fq 1863 drivers/soc/fsl/qbman/qman.c fq->state != qman_fq_state_parked) fq 1866 drivers/soc/fsl/qbman/qman.c if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) fq 1876 drivers/soc/fsl/qbman/qman.c if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || fq 1877 drivers/soc/fsl/qbman/qman.c (fq->state != qman_fq_state_oos && fq 1878 drivers/soc/fsl/qbman/qman.c fq->state != qman_fq_state_parked)) { fq 1885 drivers/soc/fsl/qbman/qman.c qm_fqid_set(&mcc->fq, fq->fqid); fq 1892 drivers/soc/fsl/qbman/qman.c if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) { fq 1896 drivers/soc/fsl/qbman/qman.c mcc->initfq.fqd.context_b = cpu_to_be32(fq_to_tag(fq)); fq 1910 drivers/soc/fsl/qbman/qman.c phys_fq = dma_map_single(p->config->dev, fq, fq 1911 drivers/soc/fsl/qbman/qman.c sizeof(*fq), DMA_TO_DEVICE); fq 1948 drivers/soc/fsl/qbman/qman.c fq_set(fq, QMAN_FQ_STATE_CGR_EN); fq 1950 drivers/soc/fsl/qbman/qman.c fq_clear(fq, QMAN_FQ_STATE_CGR_EN); fq 1953 drivers/soc/fsl/qbman/qman.c fq->cgr_groupid = opts->fqd.cgid; fq 1955 drivers/soc/fsl/qbman/qman.c fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ? fq 1964 drivers/soc/fsl/qbman/qman.c int qman_schedule_fq(struct qman_fq *fq) fq 1971 drivers/soc/fsl/qbman/qman.c if (fq->state != qman_fq_state_parked) fq 1974 drivers/soc/fsl/qbman/qman.c if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) fq 1979 drivers/soc/fsl/qbman/qman.c if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || fq 1980 drivers/soc/fsl/qbman/qman.c fq->state != qman_fq_state_parked) { fq 1985 drivers/soc/fsl/qbman/qman.c qm_fqid_set(&mcc->fq, fq->fqid); fq 1998 drivers/soc/fsl/qbman/qman.c fq->state = qman_fq_state_sched; fq 2005 drivers/soc/fsl/qbman/qman.c int qman_retire_fq(struct qman_fq *fq, u32 *flags) fq 2013 drivers/soc/fsl/qbman/qman.c if (fq->state != qman_fq_state_parked && fq 2014 drivers/soc/fsl/qbman/qman.c fq->state != qman_fq_state_sched) fq 2017 drivers/soc/fsl/qbman/qman.c if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) fq 2021 drivers/soc/fsl/qbman/qman.c if (fq_isset(fq, QMAN_FQ_STATE_CHANGING) || fq 2022 drivers/soc/fsl/qbman/qman.c fq->state == qman_fq_state_retired || fq 2023 drivers/soc/fsl/qbman/qman.c fq->state == qman_fq_state_oos) { fq 2028 drivers/soc/fsl/qbman/qman.c qm_fqid_set(&mcc->fq, fq->fqid); fq 2051 drivers/soc/fsl/qbman/qman.c fq_set(fq, QMAN_FQ_STATE_NE); fq 2053 drivers/soc/fsl/qbman/qman.c fq_set(fq, QMAN_FQ_STATE_ORL); fq 2055 drivers/soc/fsl/qbman/qman.c *flags = fq->flags; fq 2056 drivers/soc/fsl/qbman/qman.c fq->state = qman_fq_state_retired; fq 2057 drivers/soc/fsl/qbman/qman.c if (fq->cb.fqs) { fq 2070 drivers/soc/fsl/qbman/qman.c msg.fq.fqs = mcr->alterfq.fqs; fq 2071 drivers/soc/fsl/qbman/qman.c qm_fqid_set(&msg.fq, fq->fqid); fq 2072 drivers/soc/fsl/qbman/qman.c msg.fq.context_b = cpu_to_be32(fq_to_tag(fq)); fq 2073 drivers/soc/fsl/qbman/qman.c fq->cb.fqs(p, fq, &msg); fq 2077 drivers/soc/fsl/qbman/qman.c fq_set(fq, QMAN_FQ_STATE_CHANGING); fq 2087 drivers/soc/fsl/qbman/qman.c int qman_oos_fq(struct qman_fq *fq) fq 2094 drivers/soc/fsl/qbman/qman.c if (fq->state != qman_fq_state_retired) fq 2097 drivers/soc/fsl/qbman/qman.c if (fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)) fq 2101 drivers/soc/fsl/qbman/qman.c if (fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS) || fq 2102 drivers/soc/fsl/qbman/qman.c fq->state != qman_fq_state_retired) { fq 2107 drivers/soc/fsl/qbman/qman.c qm_fqid_set(&mcc->fq, fq->fqid); fq 2118 drivers/soc/fsl/qbman/qman.c fq->state = qman_fq_state_oos; fq 2125 drivers/soc/fsl/qbman/qman.c int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd) fq 2133 drivers/soc/fsl/qbman/qman.c qm_fqid_set(&mcc->fq, fq->fqid); fq 2150 drivers/soc/fsl/qbman/qman.c int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np) fq 2158 drivers/soc/fsl/qbman/qman.c qm_fqid_set(&mcc->fq, fq->fqid); fq 2221 drivers/soc/fsl/qbman/qman.c static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr) fq 2229 drivers/soc/fsl/qbman/qman.c if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) fq 2232 drivers/soc/fsl/qbman/qman.c fq_set(fq, QMAN_FQ_STATE_VDQCR); fq 2233 drivers/soc/fsl/qbman/qman.c p->vdqcr_owned = fq; fq 2241 drivers/soc/fsl/qbman/qman.c static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr) fq 2246 drivers/soc/fsl/qbman/qman.c ret = set_p_vdqcr(*p, fq, vdqcr); fq 2251 drivers/soc/fsl/qbman/qman.c static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq, fq 2258 drivers/soc/fsl/qbman/qman.c !set_vdqcr(p, fq, vdqcr)); fq 2260 drivers/soc/fsl/qbman/qman.c wait_event(affine_queue, !set_vdqcr(p, fq, vdqcr)); fq 2264 drivers/soc/fsl/qbman/qman.c int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr) fq 2269 drivers/soc/fsl/qbman/qman.c if (fq->state != qman_fq_state_parked && fq 2270 drivers/soc/fsl/qbman/qman.c fq->state != qman_fq_state_retired) fq 2274 drivers/soc/fsl/qbman/qman.c if (fq_isset(fq, QMAN_FQ_STATE_VDQCR)) fq 2276 drivers/soc/fsl/qbman/qman.c vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid; fq 2278 drivers/soc/fsl/qbman/qman.c ret = wait_vdqcr_start(&p, fq, vdqcr, flags); fq 2280 drivers/soc/fsl/qbman/qman.c ret = set_vdqcr(&p, fq, vdqcr); fq 2293 drivers/soc/fsl/qbman/qman.c !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); fq 2296 drivers/soc/fsl/qbman/qman.c !fq_isset(fq, QMAN_FQ_STATE_VDQCR)); fq 2310 drivers/soc/fsl/qbman/qman.c int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd) fq 2340 drivers/soc/fsl/qbman/qman.c qm_fqid_set(eq, fq->fqid); fq 2341 drivers/soc/fsl/qbman/qman.c eq->tag = cpu_to_be32(fq_to_tag(fq)); fq 2630 drivers/soc/fsl/qbman/qman.c qm_fqid_set(&mcc->fq, fqid); fq 2645 drivers/soc/fsl/qbman/qman.c qm_fqid_set(&mcc->fq, fqid); fq 2676 drivers/soc/fsl/qbman/qman.c qm_fqid_set(&mcc->fq, fqid); fq 2777 drivers/soc/fsl/qbman/qman.c qm_fqid_set(&mcc->fq, fqid); fq 2797 drivers/soc/fsl/qbman/qman.c qm_fqid_set(&mcc->fq, fqid); fq 2894 drivers/soc/fsl/qbman/qman.c struct qman_fq fq = { fq 2902 drivers/soc/fsl/qbman/qman.c err = qman_query_fq_np(&fq, &np); fq 2912 drivers/soc/fsl/qbman/qman.c err = qman_query_fq(&fq, &fqd); fq 2917 drivers/soc/fsl/qbman/qman.c err = qman_shutdown_fq(fq.fqid); fq 2927 drivers/soc/fsl/qbman/qman.c fq.fqid++; fq 2952 drivers/soc/fsl/qbman/qman.c struct qman_fq fq = { fq 2960 drivers/soc/fsl/qbman/qman.c err = qman_query_fq_np(&fq, &np); fq 2970 drivers/soc/fsl/qbman/qman.c err = qman_query_fq(&fq, &fqd); fq 2976 drivers/soc/fsl/qbman/qman.c cgrid, fq.fqid); fq 2981 drivers/soc/fsl/qbman/qman.c fq.fqid++; fq 216 drivers/soc/fsl/qbman/qman_priv.h int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd); fq 105 drivers/soc/fsl/qbman/qman_test_api.c static int do_enqueues(struct qman_fq *fq) fq 111 drivers/soc/fsl/qbman/qman_test_api.c if (qman_enqueue(fq, &fd)) { fq 125 drivers/soc/fsl/qbman/qman_test_api.c struct qman_fq *fq = &fq_base; fq 132 drivers/soc/fsl/qbman/qman_test_api.c err = qman_create_fq(0, FQ_FLAGS, fq); fq 137 drivers/soc/fsl/qbman/qman_test_api.c err = qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL); fq 143 drivers/soc/fsl/qbman/qman_test_api.c err = do_enqueues(fq); fq 148 drivers/soc/fsl/qbman/qman_test_api.c err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); fq 153 drivers/soc/fsl/qbman/qman_test_api.c err = do_enqueues(fq); fq 158 drivers/soc/fsl/qbman/qman_test_api.c err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); fq 166 drivers/soc/fsl/qbman/qman_test_api.c err = qman_volatile_dequeue(fq, VDQCR_FLAGS, frmcnt); fq 172 drivers/soc/fsl/qbman/qman_test_api.c err = do_enqueues(fq); fq 176 drivers/soc/fsl/qbman/qman_test_api.c err = qman_schedule_fq(fq); fq 184 drivers/soc/fsl/qbman/qman_test_api.c err = qman_retire_fq(fq, &flags); fq 195 drivers/soc/fsl/qbman/qman_test_api.c err = qman_oos_fq(fq); fq 200 drivers/soc/fsl/qbman/qman_test_api.c qman_destroy_fq(fq); fq 210 drivers/soc/fsl/qbman/qman_test_api.c struct qman_fq *fq, fq 225 drivers/soc/fsl/qbman/qman_test_api.c static void cb_ern(struct qman_portal *p, struct qman_fq *fq, fq 232 drivers/soc/fsl/qbman/qman_test_api.c static void cb_fqs(struct qman_portal *p, struct qman_fq *fq, fq 277 drivers/soc/fsl/qbman/qman_test_stash.c struct qman_fq *fq, fq 280 drivers/soc/fsl/qbman/qman_test_stash.c struct hp_handler *handler = (struct hp_handler *)fq; fq 295 drivers/soc/fsl/qbman/qman_test_stash.c struct qman_fq *fq, fq 298 drivers/soc/fsl/qbman/qman_test_stash.c struct hp_handler *handler = (struct hp_handler *)fq; fq 31 include/linux/blk-mq.h struct blk_flush_queue *fq; fq 540 include/linux/blkdev.h struct blk_flush_queue *fq; fq 77 include/linux/iova.h struct iova_fq __percpu *fq; /* Flush Queue */ fq 87 include/net/fq.h typedef void fq_skb_free_t(struct fq *, fq 93 include/net/fq.h typedef bool fq_skb_filter_t(struct fq *, fq 14 include/net/fq_impl.h static void fq_adjust_removal(struct fq *fq, fq 23 include/net/fq_impl.h fq->backlog--; fq 24 include/net/fq_impl.h fq->memory_usage -= skb->truesize; fq 27 include/net/fq_impl.h static void fq_rejigger_backlog(struct fq *fq, struct fq_flow *flow) fq 36 include/net/fq_impl.h list_for_each_entry_continue(i, &fq->backlogs, backlogchain) fq 45 include/net/fq_impl.h static struct sk_buff *fq_flow_dequeue(struct fq *fq, fq 50 include/net/fq_impl.h lockdep_assert_held(&fq->lock); fq 56 include/net/fq_impl.h fq_adjust_removal(fq, flow, skb); fq 57 include/net/fq_impl.h fq_rejigger_backlog(fq, flow); fq 62 include/net/fq_impl.h static struct sk_buff *fq_tin_dequeue(struct fq *fq, fq 70 include/net/fq_impl.h lockdep_assert_held(&fq->lock); fq 83 include/net/fq_impl.h flow->deficit += fq->quantum; fq 89 include/net/fq_impl.h skb = dequeue_func(fq, tin, flow); fq 109 include/net/fq_impl.h static u32 fq_flow_idx(struct fq *fq, struct sk_buff *skb) fq 111 include/net/fq_impl.h u32 hash = skb_get_hash_perturb(skb, &fq->perturbation); fq 113 include/net/fq_impl.h return reciprocal_scale(hash, fq->flows_cnt); fq 116 include/net/fq_impl.h static struct fq_flow *fq_flow_classify(struct fq *fq, fq 123 include/net/fq_impl.h lockdep_assert_held(&fq->lock); fq 125 include/net/fq_impl.h flow = &fq->flows[idx]; fq 127 include/net/fq_impl.h flow = get_default_func(fq, tin, idx, skb); fq 129 include/net/fq_impl.h fq->collisions++; fq 138 include/net/fq_impl.h static void fq_recalc_backlog(struct fq *fq, fq 145 include/net/fq_impl.h list_add_tail(&flow->backlogchain, &fq->backlogs); fq 148 include/net/fq_impl.h list_for_each_entry_continue_reverse(i, &fq->backlogs, fq 156 include/net/fq_impl.h static void fq_tin_enqueue(struct fq *fq, fq 165 include/net/fq_impl.h lockdep_assert_held(&fq->lock); fq 167 include/net/fq_impl.h flow = fq_flow_classify(fq, tin, idx, skb, get_default_func); fq 173 include/net/fq_impl.h fq->memory_usage += skb->truesize; fq 174 include/net/fq_impl.h fq->backlog++; fq 176 include/net/fq_impl.h fq_recalc_backlog(fq, tin, flow); fq 179 include/net/fq_impl.h flow->deficit = fq->quantum; fq 185 include/net/fq_impl.h oom = (fq->memory_usage > fq->memory_limit); fq 186 include/net/fq_impl.h while (fq->backlog > fq->limit || oom) { fq 187 include/net/fq_impl.h flow = list_first_entry_or_null(&fq->backlogs, fq 193 include/net/fq_impl.h skb = fq_flow_dequeue(fq, flow); fq 197 include/net/fq_impl.h free_func(fq, flow->tin, flow, skb); fq 200 include/net/fq_impl.h fq->overlimit++; fq 202 include/net/fq_impl.h fq->overmemory++; fq 203 include/net/fq_impl.h oom = (fq->memory_usage > fq->memory_limit); fq 208 include/net/fq_impl.h static void fq_flow_filter(struct fq *fq, fq 217 include/net/fq_impl.h lockdep_assert_held(&fq->lock); fq 220 include/net/fq_impl.h if (!filter_func(fq, tin, flow, skb, filter_data)) fq 224 include/net/fq_impl.h fq_adjust_removal(fq, flow, skb); fq 225 include/net/fq_impl.h free_func(fq, tin, flow, skb); fq 228 include/net/fq_impl.h fq_rejigger_backlog(fq, flow); fq 231 include/net/fq_impl.h static void fq_tin_filter(struct fq *fq, fq 239 include/net/fq_impl.h lockdep_assert_held(&fq->lock); fq 242 include/net/fq_impl.h fq_flow_filter(fq, flow, filter_func, filter_data, free_func); fq 244 include/net/fq_impl.h fq_flow_filter(fq, flow, filter_func, filter_data, free_func); fq 247 include/net/fq_impl.h static void fq_flow_reset(struct fq *fq, fq 253 include/net/fq_impl.h while ((skb = fq_flow_dequeue(fq, flow))) fq 254 include/net/fq_impl.h free_func(fq, flow->tin, flow, skb); fq 267 include/net/fq_impl.h static void fq_tin_reset(struct fq *fq, fq 283 include/net/fq_impl.h fq_flow_reset(fq, flow, free_func); fq 303 include/net/fq_impl.h static int fq_init(struct fq *fq, int flows_cnt) fq 307 include/net/fq_impl.h memset(fq, 0, sizeof(fq[0])); fq 308 include/net/fq_impl.h INIT_LIST_HEAD(&fq->backlogs); fq 309 include/net/fq_impl.h spin_lock_init(&fq->lock); fq 310 include/net/fq_impl.h fq->flows_cnt = max_t(u32, flows_cnt, 1); fq 311 include/net/fq_impl.h get_random_bytes(&fq->perturbation, sizeof(fq->perturbation)); fq 312 include/net/fq_impl.h fq->quantum = 300; fq 313 include/net/fq_impl.h fq->limit = 8192; fq 314 include/net/fq_impl.h fq->memory_limit = 16 << 20; /* 16 MBytes */ fq 316 include/net/fq_impl.h fq->flows = kvcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL); fq 317 include/net/fq_impl.h if (!fq->flows) fq 320 include/net/fq_impl.h for (i = 0; i < fq->flows_cnt; i++) fq 321 include/net/fq_impl.h fq_flow_init(&fq->flows[i]); fq 326 include/net/fq_impl.h static void fq_reset(struct fq *fq, fq 331 include/net/fq_impl.h for (i = 0; i < fq->flows_cnt; i++) fq 332 include/net/fq_impl.h fq_flow_reset(fq, &fq->flows[i], free_func); fq 334 include/net/fq_impl.h kvfree(fq->flows); fq 335 include/net/fq_impl.h fq->flows = NULL; fq 33 include/net/ipv6_frag.h struct frag_queue *fq = container_of(q, struct frag_queue, q); fq 37 include/net/ipv6_frag.h fq->ecn = 0; fq 48 include/net/ipv6_frag.h const struct inet_frag_queue *fq = data; fq 50 include/net/ipv6_frag.h return jhash2((const u32 *)&fq->key.v6, fq 58 include/net/ipv6_frag.h const struct inet_frag_queue *fq = ptr; fq 60 include/net/ipv6_frag.h return !!memcmp(&fq->key, key, sizeof(*key)); fq 64 include/net/ipv6_frag.h ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq) fq 70 include/net/ipv6_frag.h if (fq->q.fqdir->dead) fq 72 include/net/ipv6_frag.h spin_lock(&fq->q.lock); fq 74 include/net/ipv6_frag.h if (fq->q.flags & INET_FRAG_COMPLETE) fq 77 include/net/ipv6_frag.h inet_frag_kill(&fq->q); fq 79 include/net/ipv6_frag.h dev = dev_get_by_index_rcu(net, fq->iif); fq 87 include/net/ipv6_frag.h if (!(fq->q.flags & INET_FRAG_FIRST_IN)) fq 94 include/net/ipv6_frag.h head = inet_frag_pull_head(&fq->q); fq 99 include/net/ipv6_frag.h spin_unlock(&fq->q.lock); fq 106 include/net/ipv6_frag.h spin_unlock(&fq->q.lock); fq 109 include/net/ipv6_frag.h inet_frag_put(&fq->q); fq 45 include/net/xdp_sock.h struct xsk_queue *fq; fq 299 include/soc/fsl/qman.h } __packed fq; /* FQRN/FQRNI/FQRL/FQPN */ fq 33 net/ieee802154/6lowpan/reassembly.c static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *skb, fq 47 net/ieee802154/6lowpan/reassembly.c struct frag_queue *fq; fq 49 net/ieee802154/6lowpan/reassembly.c fq = container_of(frag, struct frag_queue, q); fq 51 net/ieee802154/6lowpan/reassembly.c spin_lock(&fq->q.lock); fq 53 net/ieee802154/6lowpan/reassembly.c if (fq->q.flags & INET_FRAG_COMPLETE) fq 56 net/ieee802154/6lowpan/reassembly.c inet_frag_kill(&fq->q); fq 58 net/ieee802154/6lowpan/reassembly.c spin_unlock(&fq->q.lock); fq 59 net/ieee802154/6lowpan/reassembly.c inet_frag_put(&fq->q); fq 84 net/ieee802154/6lowpan/reassembly.c static int lowpan_frag_queue(struct lowpan_frag_queue *fq, fq 97 net/ieee802154/6lowpan/reassembly.c if (fq->q.flags & INET_FRAG_COMPLETE) fq 108 net/ieee802154/6lowpan/reassembly.c if (end < fq->q.len || fq 109 net/ieee802154/6lowpan/reassembly.c ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) fq 111 net/ieee802154/6lowpan/reassembly.c fq->q.flags |= INET_FRAG_LAST_IN; fq 112 net/ieee802154/6lowpan/reassembly.c fq->q.len = end; fq 114 net/ieee802154/6lowpan/reassembly.c if (end > fq->q.len) { fq 116 net/ieee802154/6lowpan/reassembly.c if (fq->q.flags & INET_FRAG_LAST_IN) fq 118 net/ieee802154/6lowpan/reassembly.c fq->q.len = end; fq 127 net/ieee802154/6lowpan/reassembly.c prev_tail = fq->q.fragments_tail; fq 128 net/ieee802154/6lowpan/reassembly.c err = inet_frag_queue_insert(&fq->q, skb, offset, end); fq 132 net/ieee802154/6lowpan/reassembly.c fq->q.stamp = skb->tstamp; fq 134 net/ieee802154/6lowpan/reassembly.c fq->q.flags |= INET_FRAG_FIRST_IN; fq 136 net/ieee802154/6lowpan/reassembly.c fq->q.meat += skb->len; fq 137 net/ieee802154/6lowpan/reassembly.c add_frag_mem_limit(fq->q.fqdir, skb->truesize); fq 139 net/ieee802154/6lowpan/reassembly.c if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && fq 140 net/ieee802154/6lowpan/reassembly.c fq->q.meat == fq->q.len) { fq 145 net/ieee802154/6lowpan/reassembly.c res = lowpan_frag_reasm(fq, skb, prev_tail, ldev); fq 163 net/ieee802154/6lowpan/reassembly.c static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *skb, fq 168 net/ieee802154/6lowpan/reassembly.c inet_frag_kill(&fq->q); fq 170 net/ieee802154/6lowpan/reassembly.c reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail); fq 173 net/ieee802154/6lowpan/reassembly.c inet_frag_reasm_finish(&fq->q, skb, reasm_data, false); fq 176 net/ieee802154/6lowpan/reassembly.c skb->tstamp = fq->q.stamp; fq 177 net/ieee802154/6lowpan/reassembly.c fq->q.rb_fragments = RB_ROOT; fq 178 net/ieee802154/6lowpan/reassembly.c fq->q.fragments_tail = NULL; fq 179 net/ieee802154/6lowpan/reassembly.c fq->q.last_run_head = NULL; fq 278 net/ieee802154/6lowpan/reassembly.c struct lowpan_frag_queue *fq; fq 302 net/ieee802154/6lowpan/reassembly.c fq = fq_find(net, cb, &hdr.source, &hdr.dest); fq 303 net/ieee802154/6lowpan/reassembly.c if (fq != NULL) { fq 306 net/ieee802154/6lowpan/reassembly.c spin_lock(&fq->q.lock); fq 307 net/ieee802154/6lowpan/reassembly.c ret = lowpan_frag_queue(fq, skb, frag_type); fq 308 net/ieee802154/6lowpan/reassembly.c spin_unlock(&fq->q.lock); fq 310 net/ieee802154/6lowpan/reassembly.c inet_frag_put(&fq->q); fq 493 net/ieee802154/6lowpan/reassembly.c const struct inet_frag_queue *fq = data; fq 495 net/ieee802154/6lowpan/reassembly.c return jhash2((const u32 *)&fq->key, fq 502 net/ieee802154/6lowpan/reassembly.c const struct inet_frag_queue *fq = ptr; fq 504 net/ieee802154/6lowpan/reassembly.c return !!memcmp(&fq->key, key, sizeof(*key)); fq 130 net/ipv4/inet_fragment.c struct inet_frag_queue *fq = ptr; fq 133 net/ipv4/inet_fragment.c count = del_timer_sync(&fq->timer) ? 1 : 0; fq 135 net/ipv4/inet_fragment.c spin_lock_bh(&fq->lock); fq 136 net/ipv4/inet_fragment.c if (!(fq->flags & INET_FRAG_COMPLETE)) { fq 137 net/ipv4/inet_fragment.c fq->flags |= INET_FRAG_COMPLETE; fq 139 net/ipv4/inet_fragment.c } else if (fq->flags & INET_FRAG_HASH_DEAD) { fq 142 net/ipv4/inet_fragment.c spin_unlock_bh(&fq->lock); fq 144 net/ipv4/inet_fragment.c if (refcount_sub_and_test(count, &fq->refcnt)) fq 145 net/ipv4/inet_fragment.c inet_frag_destroy(fq); fq 194 net/ipv4/inet_fragment.c void inet_frag_kill(struct inet_frag_queue *fq) fq 196 net/ipv4/inet_fragment.c if (del_timer(&fq->timer)) fq 197 net/ipv4/inet_fragment.c refcount_dec(&fq->refcnt); fq 199 net/ipv4/inet_fragment.c if (!(fq->flags & INET_FRAG_COMPLETE)) { fq 200 net/ipv4/inet_fragment.c struct fqdir *fqdir = fq->fqdir; fq 202 net/ipv4/inet_fragment.c fq->flags |= INET_FRAG_COMPLETE; fq 210 net/ipv4/inet_fragment.c rhashtable_remove_fast(&fqdir->rhashtable, &fq->node, fq 212 net/ipv4/inet_fragment.c refcount_dec(&fq->refcnt); fq 214 net/ipv4/inet_fragment.c fq->flags |= INET_FRAG_HASH_DEAD; fq 324 net/ipv4/inet_fragment.c struct inet_frag_queue *fq = NULL, *prev; fq 333 net/ipv4/inet_fragment.c fq = inet_frag_create(fqdir, key, &prev); fq 335 net/ipv4/inet_fragment.c fq = prev; fq 336 net/ipv4/inet_fragment.c if (!refcount_inc_not_zero(&fq->refcnt)) fq 337 net/ipv4/inet_fragment.c fq = NULL; fq 340 net/ipv4/inet_fragment.c return fq; fq 715 net/ipv4/ip_fragment.c const struct inet_frag_queue *fq = data; fq 717 net/ipv4/ip_fragment.c return jhash2((const u32 *)&fq->key.v4, fq 724 net/ipv4/ip_fragment.c const struct inet_frag_queue *fq = ptr; fq 726 net/ipv4/ip_fragment.c return !!memcmp(&fq->key, key, sizeof(*key)); fq 130 net/ipv6/netfilter/nf_conntrack_reasm.c static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb, fq 141 net/ipv6/netfilter/nf_conntrack_reasm.c struct frag_queue *fq; fq 143 net/ipv6/netfilter/nf_conntrack_reasm.c fq = container_of(frag, struct frag_queue, q); fq 145 net/ipv6/netfilter/nf_conntrack_reasm.c ip6frag_expire_frag_queue(fq->q.fqdir->net, fq); fq 169 net/ipv6/netfilter/nf_conntrack_reasm.c static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb, fq 178 net/ipv6/netfilter/nf_conntrack_reasm.c if (fq->q.flags & INET_FRAG_COMPLETE) { fq 208 net/ipv6/netfilter/nf_conntrack_reasm.c if (end < fq->q.len || fq 209 net/ipv6/netfilter/nf_conntrack_reasm.c ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) { fq 213 net/ipv6/netfilter/nf_conntrack_reasm.c fq->q.flags |= INET_FRAG_LAST_IN; fq 214 net/ipv6/netfilter/nf_conntrack_reasm.c fq->q.len = end; fq 224 net/ipv6/netfilter/nf_conntrack_reasm.c inet_frag_kill(&fq->q); fq 227 net/ipv6/netfilter/nf_conntrack_reasm.c if (end > fq->q.len) { fq 229 net/ipv6/netfilter/nf_conntrack_reasm.c if (fq->q.flags & INET_FRAG_LAST_IN) { fq 233 net/ipv6/netfilter/nf_conntrack_reasm.c fq->q.len = end; fq 255 net/ipv6/netfilter/nf_conntrack_reasm.c prev = fq->q.fragments_tail; fq 256 net/ipv6/netfilter/nf_conntrack_reasm.c err = inet_frag_queue_insert(&fq->q, skb, offset, end); fq 267 net/ipv6/netfilter/nf_conntrack_reasm.c fq->iif = dev->ifindex; fq 269 net/ipv6/netfilter/nf_conntrack_reasm.c fq->q.stamp = skb->tstamp; fq 270 net/ipv6/netfilter/nf_conntrack_reasm.c fq->q.meat += skb->len; fq 271 net/ipv6/netfilter/nf_conntrack_reasm.c fq->ecn |= ecn; fq 272 net/ipv6/netfilter/nf_conntrack_reasm.c if (payload_len > fq->q.max_size) fq 273 net/ipv6/netfilter/nf_conntrack_reasm.c fq->q.max_size = payload_len; fq 274 net/ipv6/netfilter/nf_conntrack_reasm.c add_frag_mem_limit(fq->q.fqdir, skb->truesize); fq 280 net/ipv6/netfilter/nf_conntrack_reasm.c fq->nhoffset = nhoff; fq 281 net/ipv6/netfilter/nf_conntrack_reasm.c fq->q.flags |= INET_FRAG_FIRST_IN; fq 284 net/ipv6/netfilter/nf_conntrack_reasm.c if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && fq 285 net/ipv6/netfilter/nf_conntrack_reasm.c fq->q.meat == fq->q.len) { fq 289 net/ipv6/netfilter/nf_conntrack_reasm.c err = nf_ct_frag6_reasm(fq, skb, prev, dev); fq 302 net/ipv6/netfilter/nf_conntrack_reasm.c inet_frag_kill(&fq->q); fq 315 net/ipv6/netfilter/nf_conntrack_reasm.c static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb, fq 322 net/ipv6/netfilter/nf_conntrack_reasm.c inet_frag_kill(&fq->q); fq 324 net/ipv6/netfilter/nf_conntrack_reasm.c ecn = ip_frag_ecn_table[fq->ecn]; fq 328 net/ipv6/netfilter/nf_conntrack_reasm.c reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail); fq 333 net/ipv6/netfilter/nf_conntrack_reasm.c sizeof(struct ipv6hdr) + fq->q.len - fq 343 net/ipv6/netfilter/nf_conntrack_reasm.c skb_network_header(skb)[fq->nhoffset] = skb_transport_header(skb)[0]; fq 351 net/ipv6/netfilter/nf_conntrack_reasm.c inet_frag_reasm_finish(&fq->q, skb, reasm_data, false); fq 357 net/ipv6/netfilter/nf_conntrack_reasm.c IP6CB(skb)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size; fq 365 net/ipv6/netfilter/nf_conntrack_reasm.c fq->q.rb_fragments = RB_ROOT; fq 366 net/ipv6/netfilter/nf_conntrack_reasm.c fq->q.fragments_tail = NULL; fq 367 net/ipv6/netfilter/nf_conntrack_reasm.c fq->q.last_run_head = NULL; fq 372 net/ipv6/netfilter/nf_conntrack_reasm.c inet_frag_kill(&fq->q); fq 444 net/ipv6/netfilter/nf_conntrack_reasm.c struct frag_queue *fq; fq 465 net/ipv6/netfilter/nf_conntrack_reasm.c fq = fq_find(net, fhdr->identification, user, hdr, fq 467 net/ipv6/netfilter/nf_conntrack_reasm.c if (fq == NULL) { fq 472 net/ipv6/netfilter/nf_conntrack_reasm.c spin_lock_bh(&fq->q.lock); fq 474 net/ipv6/netfilter/nf_conntrack_reasm.c ret = nf_ct_frag6_queue(fq, skb, fhdr, nhoff); fq 480 net/ipv6/netfilter/nf_conntrack_reasm.c spin_unlock_bh(&fq->q.lock); fq 481 net/ipv6/netfilter/nf_conntrack_reasm.c inet_frag_put(&fq->q); fq 68 net/ipv6/reassembly.c static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb, fq 74 net/ipv6/reassembly.c struct frag_queue *fq; fq 76 net/ipv6/reassembly.c fq = container_of(frag, struct frag_queue, q); fq 78 net/ipv6/reassembly.c ip6frag_expire_frag_queue(fq->q.fqdir->net, fq); fq 104 net/ipv6/reassembly.c static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, fq 115 net/ipv6/reassembly.c if (fq->q.flags & INET_FRAG_COMPLETE) fq 145 net/ipv6/reassembly.c if (end < fq->q.len || fq 146 net/ipv6/reassembly.c ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) fq 148 net/ipv6/reassembly.c fq->q.flags |= INET_FRAG_LAST_IN; fq 149 net/ipv6/reassembly.c fq->q.len = end; fq 161 net/ipv6/reassembly.c if (end > fq->q.len) { fq 163 net/ipv6/reassembly.c if (fq->q.flags & INET_FRAG_LAST_IN) fq 165 net/ipv6/reassembly.c fq->q.len = end; fq 186 net/ipv6/reassembly.c prev_tail = fq->q.fragments_tail; fq 187 net/ipv6/reassembly.c err = inet_frag_queue_insert(&fq->q, skb, offset, end); fq 192 net/ipv6/reassembly.c fq->iif = dev->ifindex; fq 194 net/ipv6/reassembly.c fq->q.stamp = skb->tstamp; fq 195 net/ipv6/reassembly.c fq->q.meat += skb->len; fq 196 net/ipv6/reassembly.c fq->ecn |= ecn; fq 197 net/ipv6/reassembly.c add_frag_mem_limit(fq->q.fqdir, skb->truesize); fq 200 net/ipv6/reassembly.c if (fragsize > fq->q.max_size) fq 201 net/ipv6/reassembly.c fq->q.max_size = fragsize; fq 207 net/ipv6/reassembly.c fq->nhoffset = nhoff; fq 208 net/ipv6/reassembly.c fq->q.flags |= INET_FRAG_FIRST_IN; fq 211 net/ipv6/reassembly.c if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && fq 212 net/ipv6/reassembly.c fq->q.meat == fq->q.len) { fq 216 net/ipv6/reassembly.c err = ip6_frag_reasm(fq, skb, prev_tail, dev); fq 233 net/ipv6/reassembly.c inet_frag_kill(&fq->q); fq 248 net/ipv6/reassembly.c static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb, fq 251 net/ipv6/reassembly.c struct net *net = fq->q.fqdir->net; fq 257 net/ipv6/reassembly.c inet_frag_kill(&fq->q); fq 259 net/ipv6/reassembly.c ecn = ip_frag_ecn_table[fq->ecn]; fq 263 net/ipv6/reassembly.c reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail); fq 268 net/ipv6/reassembly.c sizeof(struct ipv6hdr) + fq->q.len - fq 275 net/ipv6/reassembly.c nhoff = fq->nhoffset; fq 285 net/ipv6/reassembly.c inet_frag_reasm_finish(&fq->q, skb, reasm_data, true); fq 292 net/ipv6/reassembly.c IP6CB(skb)->frag_max_size = fq->q.max_size; fq 301 net/ipv6/reassembly.c fq->q.rb_fragments = RB_ROOT; fq 302 net/ipv6/reassembly.c fq->q.fragments_tail = NULL; fq 303 net/ipv6/reassembly.c fq->q.last_run_head = NULL; fq 315 net/ipv6/reassembly.c inet_frag_kill(&fq->q); fq 322 net/ipv6/reassembly.c struct frag_queue *fq; fq 355 net/ipv6/reassembly.c fq = fq_find(net, fhdr->identification, hdr, iif); fq 356 net/ipv6/reassembly.c if (fq) { fq 360 net/ipv6/reassembly.c spin_lock(&fq->q.lock); fq 362 net/ipv6/reassembly.c fq->iif = iif; fq 363 net/ipv6/reassembly.c ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff, fq 366 net/ipv6/reassembly.c spin_unlock(&fq->q.lock); fq 367 net/ipv6/reassembly.c inet_frag_put(&fq->q); fq 194 net/mac80211/agg-tx.c struct fq *fq; fq 202 net/mac80211/agg-tx.c fq = &sdata->local->fq; fq 205 net/mac80211/agg-tx.c spin_lock_bh(&fq->lock); fq 207 net/mac80211/agg-tx.c spin_unlock_bh(&fq->lock); fq 3903 net/mac80211/cfg.c spin_lock_bh(&local->fq.lock); fq 3921 net/mac80211/cfg.c txqstats->backlog_packets = local->fq.backlog; fq 3922 net/mac80211/cfg.c txqstats->backlog_bytes = local->fq.memory_usage; fq 3923 net/mac80211/cfg.c txqstats->overlimit = local->fq.overlimit; fq 3924 net/mac80211/cfg.c txqstats->overmemory = local->fq.overmemory; fq 3925 net/mac80211/cfg.c txqstats->collisions = local->fq.collisions; fq 3926 net/mac80211/cfg.c txqstats->max_flows = local->fq.flows_cnt; fq 3931 net/mac80211/cfg.c spin_unlock_bh(&local->fq.lock); fq 79 net/mac80211/debugfs.c struct fq *fq = &local->fq; fq 83 net/mac80211/debugfs.c spin_lock_bh(&local->fq.lock); fq 97 net/mac80211/debugfs.c fq->flows_cnt, fq 98 net/mac80211/debugfs.c fq->backlog, fq 99 net/mac80211/debugfs.c fq->overmemory, fq 100 net/mac80211/debugfs.c fq->overlimit, fq 101 net/mac80211/debugfs.c fq->collisions, fq 102 net/mac80211/debugfs.c fq->memory_usage, fq 103 net/mac80211/debugfs.c fq->memory_limit, fq 104 net/mac80211/debugfs.c fq->limit, fq 105 net/mac80211/debugfs.c fq->quantum); fq 108 net/mac80211/debugfs.c spin_unlock_bh(&local->fq.lock); fq 134 net/mac80211/debugfs.c if (sscanf(buf, "fq_limit %u", &local->fq.limit) == 1) fq 136 net/mac80211/debugfs.c else if (sscanf(buf, "fq_memory_limit %u", &local->fq.memory_limit) == 1) fq 138 net/mac80211/debugfs.c else if (sscanf(buf, "fq_quantum %u", &local->fq.quantum) == 1) fq 498 net/mac80211/debugfs_netdev.c spin_lock_bh(&local->fq.lock); fq 517 net/mac80211/debugfs_netdev.c spin_unlock_bh(&local->fq.lock); fq 151 net/mac80211/debugfs_sta.c spin_lock_bh(&local->fq.lock); fq 188 net/mac80211/debugfs_sta.c spin_unlock_bh(&local->fq.lock); fq 1135 net/mac80211/ieee80211_i.h struct fq fq; fq 2153 net/mac80211/sta_info.c spin_lock_bh(&local->fq.lock); fq 2161 net/mac80211/sta_info.c spin_unlock_bh(&local->fq.lock); fq 1310 net/mac80211/tx.c struct fq *fq; fq 1315 net/mac80211/tx.c fq = &local->fq; fq 1320 net/mac80211/tx.c flow = &fq->flows[cvars - local->cvars]; fq 1322 net/mac80211/tx.c return fq_flow_dequeue(fq, flow); fq 1339 net/mac80211/tx.c static struct sk_buff *fq_tin_dequeue_func(struct fq *fq, fq 1349 net/mac80211/tx.c local = container_of(fq, struct ieee80211_local, fq); fq 1364 net/mac80211/tx.c cvars = &local->cvars[flow - fq->flows]; fq 1377 net/mac80211/tx.c static void fq_skb_free_func(struct fq *fq, fq 1384 net/mac80211/tx.c local = container_of(fq, struct ieee80211_local, fq); fq 1388 net/mac80211/tx.c static struct fq_flow *fq_flow_get_default_func(struct fq *fq, fq 1403 net/mac80211/tx.c struct fq *fq = &local->fq; fq 1405 net/mac80211/tx.c u32 flow_idx = fq_flow_idx(fq, skb); fq 1409 net/mac80211/tx.c spin_lock_bh(&fq->lock); fq 1410 net/mac80211/tx.c fq_tin_enqueue(fq, tin, flow_idx, skb, fq 1413 net/mac80211/tx.c spin_unlock_bh(&fq->lock); fq 1416 net/mac80211/tx.c static bool fq_vlan_filter_func(struct fq *fq, struct fq_tin *tin, fq 1428 net/mac80211/tx.c struct fq *fq = &local->fq; fq 1444 net/mac80211/tx.c spin_lock_bh(&fq->lock); fq 1445 net/mac80211/tx.c fq_tin_filter(fq, tin, fq_vlan_filter_func, &sdata->vif, fq 1447 net/mac80211/tx.c spin_unlock_bh(&fq->lock); fq 1495 net/mac80211/tx.c struct fq *fq = &local->fq; fq 1498 net/mac80211/tx.c spin_lock_bh(&fq->lock); fq 1499 net/mac80211/tx.c fq_tin_reset(fq, tin, fq_skb_free_func); fq 1501 net/mac80211/tx.c spin_unlock_bh(&fq->lock); fq 1511 net/mac80211/tx.c local->fq.limit = local->hw.wiphy->txq_limit; fq 1513 net/mac80211/tx.c local->hw.wiphy->txq_limit = local->fq.limit; fq 1516 net/mac80211/tx.c local->fq.memory_limit = local->hw.wiphy->txq_memory_limit; fq 1518 net/mac80211/tx.c local->hw.wiphy->txq_memory_limit = local->fq.memory_limit; fq 1521 net/mac80211/tx.c local->fq.quantum = local->hw.wiphy->txq_quantum; fq 1523 net/mac80211/tx.c local->hw.wiphy->txq_quantum = local->fq.quantum; fq 1528 net/mac80211/tx.c struct fq *fq = &local->fq; fq 1537 net/mac80211/tx.c ret = fq_init(fq, 4096); fq 1556 net/mac80211/tx.c fq->memory_limit = 4 << 20; /* 4 Mbytes */ fq 1563 net/mac80211/tx.c local->cvars = kcalloc(fq->flows_cnt, sizeof(local->cvars[0]), fq 1566 net/mac80211/tx.c spin_lock_bh(&fq->lock); fq 1567 net/mac80211/tx.c fq_reset(fq, fq_skb_free_func); fq 1568 net/mac80211/tx.c spin_unlock_bh(&fq->lock); fq 1572 net/mac80211/tx.c for (i = 0; i < fq->flows_cnt; i++) fq 1582 net/mac80211/tx.c struct fq *fq = &local->fq; fq 1590 net/mac80211/tx.c spin_lock_bh(&fq->lock); fq 1591 net/mac80211/tx.c fq_reset(fq, fq_skb_free_func); fq 1592 net/mac80211/tx.c spin_unlock_bh(&fq->lock); fq 3232 net/mac80211/tx.c struct fq *fq = &local->fq; fq 3273 net/mac80211/tx.c flow_idx = fq_flow_idx(fq, skb); fq 3275 net/mac80211/tx.c spin_lock_bh(&fq->lock); fq 3282 net/mac80211/tx.c flow = fq_flow_classify(fq, tin, flow_idx, skb, fq 3346 net/mac80211/tx.c fq->memory_usage += head->truesize - orig_truesize; fq 3351 net/mac80211/tx.c fq_recalc_backlog(fq, tin, flow); fq 3354 net/mac80211/tx.c spin_unlock_bh(&fq->lock); fq 3558 net/mac80211/tx.c struct fq *fq = &local->fq; fq 3568 net/mac80211/tx.c spin_lock_bh(&fq->lock); fq 3584 net/mac80211/tx.c skb = fq_tin_dequeue(fq, tin, fq_tin_dequeue_func); fq 3588 net/mac80211/tx.c spin_unlock_bh(&fq->lock); fq 3653 net/mac80211/tx.c spin_lock_bh(&fq->lock); fq 3655 net/mac80211/tx.c spin_unlock_bh(&fq->lock); fq 3698 net/mac80211/tx.c spin_unlock_bh(&fq->lock); fq 244 net/mac80211/util.c struct fq *fq = &local->fq; fq 251 net/mac80211/util.c spin_lock(&fq->lock); fq 277 net/mac80211/util.c spin_unlock(&fq->lock); fq 279 net/mac80211/util.c spin_lock(&fq->lock); fq 292 net/mac80211/util.c spin_unlock(&fq->lock); fq 298 net/mac80211/util.c spin_unlock(&fq->lock); fq 491 net/mac80211/util.c spin_lock(&local->fq.lock); fq 493 net/mac80211/util.c spin_unlock(&local->fq.lock); fq 237 net/xdp/xdp_umem.c if (umem->fq) { fq 238 net/xdp/xdp_umem.c xskq_destroy(umem->fq); fq 239 net/xdp/xdp_umem.c umem->fq = NULL; fq 462 net/xdp/xdp_umem.c return umem->fq && umem->cq; fq 37 net/xdp/xsk.c READ_ONCE(xs->umem->fq); fq 42 net/xdp/xsk.c return xskq_has_addrs(umem->fq, cnt); fq 48 net/xdp/xsk.c return xskq_peek_addr(umem->fq, addr, umem); fq 54 net/xdp/xsk.c xskq_discard_addr(umem->fq); fq 63 net/xdp/xsk.c umem->fq->ring->flags |= XDP_RING_NEED_WAKEUP; fq 90 net/xdp/xsk.c umem->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP; fq 150 net/xdp/xsk.c if (!xskq_peek_addr(xs->umem->fq, &addr, xs->umem) || fq 171 net/xdp/xsk.c xskq_discard_addr(xs->umem->fq); fq 238 net/xdp/xsk.c if (!xskq_peek_addr(xs->umem->fq, &addr, xs->umem) || fq 253 net/xdp/xsk.c xskq_discard_addr(xs->umem->fq); fq 692 net/xdp/xsk.c xskq_set_umem(xs->umem->fq, xs->umem->size, fq 820 net/xdp/xsk.c q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq : fq 988 net/xdp/xsk.c q = READ_ONCE(umem->fq); fq 70 net/xdp/xsk_diag.c if (!err && umem->fq) fq 71 net/xdp/xsk_diag.c err = xsk_diag_put_ring(umem->fq, XDP_DIAG_UMEM_FILL_RING, nlskb); fq 81 samples/bpf/xdpsock_user.c struct xsk_ring_prod fq; fq 300 samples/bpf/xdpsock_user.c ret = xsk_umem__create(&umem->umem, buffer, size, &umem->fq, &umem->cq, fq 337 samples/bpf/xdpsock_user.c ret = xsk_ring_prod__reserve(&xsk->umem->fq, fq 343 samples/bpf/xdpsock_user.c *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx++) = fq 345 samples/bpf/xdpsock_user.c xsk_ring_prod__submit(&xsk->umem->fq, fq 509 samples/bpf/xdpsock_user.c ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq); fq 513 samples/bpf/xdpsock_user.c if (xsk_ring_prod__needs_wakeup(&umem->fq)) fq 515 samples/bpf/xdpsock_user.c ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq); fq 519 samples/bpf/xdpsock_user.c *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = fq 522 samples/bpf/xdpsock_user.c xsk_ring_prod__submit(&xsk->umem->fq, rcvd); fq 556 samples/bpf/xdpsock_user.c if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) fq 561 samples/bpf/xdpsock_user.c ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq); fq 565 samples/bpf/xdpsock_user.c if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) fq 567 samples/bpf/xdpsock_user.c ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq); fq 579 samples/bpf/xdpsock_user.c *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) = orig; fq 582 samples/bpf/xdpsock_user.c xsk_ring_prod__submit(&xsk->umem->fq, rcvd); fq 671 samples/bpf/xdpsock_user.c if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq))