q 152 arch/alpha/include/asm/atomic.h ATOMIC64_OP(op, op##q) \ q 153 arch/alpha/include/asm/atomic.h ATOMIC64_OP_RETURN(op, op##q) \ q 154 arch/alpha/include/asm/atomic.h ATOMIC64_FETCH_OP(op, op##q) q 227 arch/alpha/include/asm/core_wildfire.h #define WILDFIRE_QBB(q) ((~((long)(q)) & WILDFIRE_QBB_MASK) << 36) q 230 arch/alpha/include/asm/core_wildfire.h #define WILDFIRE_QBB_IO(q) (WILDFIRE_BASE | WILDFIRE_QBB(q)) q 231 arch/alpha/include/asm/core_wildfire.h #define WILDFIRE_QBB_HOSE(q,h) (WILDFIRE_QBB_IO(q) | WILDFIRE_HOSE(h)) q 233 arch/alpha/include/asm/core_wildfire.h #define WILDFIRE_MEM(q,h) (WILDFIRE_QBB_HOSE(q,h) | 0x000000000UL) q 234 arch/alpha/include/asm/core_wildfire.h #define WILDFIRE_CONF(q,h) (WILDFIRE_QBB_HOSE(q,h) | 0x1FE000000UL) q 235 arch/alpha/include/asm/core_wildfire.h #define WILDFIRE_IO(q,h) (WILDFIRE_QBB_HOSE(q,h) | 0x1FF000000UL) q 237 arch/alpha/include/asm/core_wildfire.h #define WILDFIRE_qsd(q) \ q 238 arch/alpha/include/asm/core_wildfire.h ((wildfire_qsd *)(WILDFIRE_QBB_IO(q)|WILDFIRE_QSD_ENTITY_SLOW|(((1UL<<13)-1)<<23))) q 243 arch/alpha/include/asm/core_wildfire.h #define WILDFIRE_qsa(q) \ q 244 arch/alpha/include/asm/core_wildfire.h ((wildfire_qsa *)(WILDFIRE_QBB_IO(q)|WILDFIRE_QSA_ENTITY|(((1UL<<13)-1)<<23))) q 246 arch/alpha/include/asm/core_wildfire.h #define WILDFIRE_iop(q) \ q 247 arch/alpha/include/asm/core_wildfire.h ((wildfire_iop *)(WILDFIRE_QBB_IO(q)|WILDFIRE_IOP_ENTITY|(((1UL<<13)-1)<<23))) q 249 arch/alpha/include/asm/core_wildfire.h #define WILDFIRE_gp(q) \ q 250 arch/alpha/include/asm/core_wildfire.h ((wildfire_gp *)(WILDFIRE_QBB_IO(q)|WILDFIRE_GP_ENTITY|(((1UL<<13)-1)<<23))) q 252 arch/alpha/include/asm/core_wildfire.h #define WILDFIRE_pca(q,pca) \ q 253 arch/alpha/include/asm/core_wildfire.h ((wildfire_pca *)(WILDFIRE_QBB_IO(q)|WILDFIRE_PCA_ENTITY(pca)|(((1UL<<13)-1)<<23))) q 255 arch/alpha/include/asm/core_wildfire.h #define WILDFIRE_ne(q,pca) \ q 256 arch/alpha/include/asm/core_wildfire.h ((wildfire_ne *)(WILDFIRE_QBB_IO(q)|WILDFIRE_PCA_ENTITY(pca)|(((1UL<<13)-1)<<23)|(1UL<<16))) q 258 arch/alpha/include/asm/core_wildfire.h #define WILDFIRE_fe(q,pca) \ q 259 arch/alpha/include/asm/core_wildfire.h ((wildfire_fe *)(WILDFIRE_QBB_IO(q)|WILDFIRE_PCA_ENTITY(pca)|(((1UL<<13)-1)<<23)|(3UL<<15))) q 261 arch/alpha/include/asm/core_wildfire.h #define WILDFIRE_pci(q,h) \ q 262 arch/alpha/include/asm/core_wildfire.h ((wildfire_pci *)(WILDFIRE_QBB_IO(q)|WILDFIRE_PCA_ENTITY(((h)&6)>>1)|((((h)&1)|2)<<16)|(((1UL<<13)-1)<<23))) q 59 arch/alpha/kernel/core_marvel.c unsigned long q; q 62 arch/alpha/kernel/core_marvel.c q = ev7csr->csr; q 65 arch/alpha/kernel/core_marvel.c return q; q 69 arch/alpha/kernel/core_marvel.c write_ev7_csr(int pe, unsigned long offset, unsigned long q) q 74 arch/alpha/kernel/core_marvel.c ev7csr->csr = q; q 21 arch/alpha/math-emu/sfp-util.h #define udiv_qrnnd(q, r, n1, n0, d) \ q 23 arch/alpha/math-emu/sfp-util.h (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \ q 1352 arch/arm/nwfpe/softfloat.c bits32 q; q 1395 arch/arm/nwfpe/softfloat.c q = ( bSig <= aSig ); q 1396 arch/arm/nwfpe/softfloat.c if ( q ) aSig -= bSig; q 1400 arch/arm/nwfpe/softfloat.c q = tmp; q 1401 arch/arm/nwfpe/softfloat.c q >>= 32 - expDiff; q 1403 arch/arm/nwfpe/softfloat.c aSig = ( ( aSig>>1 )<<( expDiff - 1 ) ) - bSig * q; q 1424 arch/arm/nwfpe/softfloat.c q = q64>>( 64 - expDiff ); q 1426 arch/arm/nwfpe/softfloat.c aSig = ( ( aSig64>>33 )<<( expDiff - 1 ) ) - bSig * q; q 1430 arch/arm/nwfpe/softfloat.c ++q; q 1434 arch/arm/nwfpe/softfloat.c if ( ( sigMean < 0 ) || ( ( sigMean == 0 ) && ( q & 1 ) ) ) { q 2250 arch/arm/nwfpe/softfloat.c bits64 q, alternateASig; q 2288 arch/arm/nwfpe/softfloat.c q = ( bSig <= aSig ); q 2289 arch/arm/nwfpe/softfloat.c if ( q ) aSig -= bSig; q 2292 arch/arm/nwfpe/softfloat.c q = estimateDiv128To64( aSig, 0, bSig ); q 2293 arch/arm/nwfpe/softfloat.c q = ( 2 < q ) ? q - 2 : 0; q 2294 arch/arm/nwfpe/softfloat.c aSig = - ( ( bSig>>2 ) * q ); q 2299 arch/arm/nwfpe/softfloat.c q = estimateDiv128To64( aSig, 0, bSig ); q 2300 arch/arm/nwfpe/softfloat.c q = ( 2 < q ) ? q - 2 : 0; q 2301 arch/arm/nwfpe/softfloat.c q >>= 64 - expDiff; q 2303 arch/arm/nwfpe/softfloat.c aSig = ( ( aSig>>1 )<<( expDiff - 1 ) ) - bSig * q; q 2311 arch/arm/nwfpe/softfloat.c ++q; q 2315 arch/arm/nwfpe/softfloat.c if ( ( sigMean < 0 ) || ( ( sigMean == 0 ) && ( q & 1 ) ) ) { q 3074 arch/arm/nwfpe/softfloat.c bits64 q, term0, term1, alternateASig0, alternateASig1; q 3118 arch/arm/nwfpe/softfloat.c q = ( bSig <= aSig0 ); q 3119 arch/arm/nwfpe/softfloat.c if ( q ) aSig0 -= bSig; q 3122 arch/arm/nwfpe/softfloat.c q = estimateDiv128To64( aSig0, aSig1, bSig ); q 3123 arch/arm/nwfpe/softfloat.c q = ( 2 < q ) ? q - 2 : 0; q 3124 arch/arm/nwfpe/softfloat.c mul64To128( bSig, q, &term0, &term1 ); q 3131 arch/arm/nwfpe/softfloat.c q = estimateDiv128To64( aSig0, aSig1, bSig ); q 3132 arch/arm/nwfpe/softfloat.c q = ( 2 < q ) ? q - 2 : 0; q 3133 arch/arm/nwfpe/softfloat.c q >>= 64 - expDiff; q 3134 arch/arm/nwfpe/softfloat.c mul64To128( bSig, q<<( 64 - expDiff ), &term0, &term1 ); q 3138 arch/arm/nwfpe/softfloat.c ++q; q 3149 arch/arm/nwfpe/softfloat.c && ( q & 1 ) ) q 41 arch/arm64/kernel/module-plts.c u64 p, q; q 53 arch/arm64/kernel/module-plts.c q = ALIGN_DOWN((u64)b, SZ_4K); q 59 arch/arm64/kernel/module-plts.c if (a->adrp == b->adrp && p == q) q 63 arch/arm64/kernel/module-plts.c (q + aarch64_insn_adrp_get_offset(le32_to_cpu(b->adrp))); q 406 arch/ia64/kernel/acpi.c struct node_memblk_s *p, *q, *pend; q 433 arch/ia64/kernel/acpi.c for (q = pend - 1; q >= p; q--) q 434 arch/ia64/kernel/acpi.c *(q + 1) = *q; q 977 arch/ia64/kernel/efi.c void *efi_map_start, *efi_map_end, *p, *q; q 1003 arch/ia64/kernel/efi.c for (q = p + efi_desc_size; q < efi_map_end; q 1004 arch/ia64/kernel/efi.c q += efi_desc_size) { q 1005 arch/ia64/kernel/efi.c check_md = q; q 1054 arch/ia64/kernel/efi.c void *efi_map_start, *efi_map_end, *p, *q; q 1082 arch/ia64/kernel/efi.c for (q = p + efi_desc_size; q < efi_map_end; q 1083 arch/ia64/kernel/efi.c q += efi_desc_size) { q 1084 arch/ia64/kernel/efi.c check_md = q; q 474 arch/m68k/mac/iop.c struct iop_msg *msg, *q; q 491 arch/m68k/mac/iop.c if (!(q = iop_send_queue[iop_num][chan])) { q 494 arch/m68k/mac/iop.c while (q->next) q = q->next; q 495 arch/m68k/mac/iop.c q->next = msg; q 159 arch/mips/bmips/setup.c const struct bmips_quirk *q; q 177 arch/mips/bmips/setup.c for (q = bmips_quirk_list; q->quirk_fn; q++) { q 179 arch/mips/bmips/setup.c q->compatible)) { q 180 arch/mips/bmips/setup.c q->quirk_fn(); q 453 arch/mips/include/asm/io.h BUILDIO_MEM(q, u64) q 455 arch/mips/include/asm/io.h __BUILD_MEMORY_PFX(__raw_, q, u64, 0) q 456 arch/mips/include/asm/io.h __BUILD_MEMORY_PFX(__mem_, q, u64, 0) q 471 arch/mips/include/asm/io.h BUILDIO_IOPORT(q, u64) q 478 arch/mips/include/asm/io.h __BUILDIO(q, u64) q 577 arch/mips/include/asm/io.h BUILDSTRING(q, u64) q 224 arch/mips/include/asm/octeon/cvmx-cmd-queue.h int q = (queue_id >> 4) & 0xf; q 226 arch/mips/include/asm/octeon/cvmx-cmd-queue.h return unit * 256 + core * 16 + q; q 636 arch/mips/include/asm/sibyte/bcm1480_regs.h #define A_BCM1480_PMI_INT(q) (A_BCM1480_PMI_INT_0 + ((q>>8)<<8)) q 639 arch/mips/include/asm/sibyte/bcm1480_regs.h #define A_BCM1480_PMO_INT(q) (A_BCM1480_PMO_INT_0 + ((q>>8)<<8)) q 14 arch/mips/math-emu/sp_sqrt.c int ix, s, q, m, t, i; q 74 arch/mips/math-emu/sp_sqrt.c q = 0; /* q = sqrt(x) */ q 82 arch/mips/math-emu/sp_sqrt.c q += r; q 92 arch/mips/math-emu/sp_sqrt.c q += 2; q 95 arch/mips/math-emu/sp_sqrt.c q += (q & 1); q 99 arch/mips/math-emu/sp_sqrt.c ix = (q >> 1) + 0x3f000000; q 128 arch/nds32/include/asm/sfp-machine.h #define udiv_qrnnd(q, r, n1, n0, d) \ q 156 arch/nds32/include/asm/sfp-machine.h (q) = (UWtype) __q1 * __ll_B | __q0; \ q 209 arch/parisc/include/asm/io.h static inline void writeq(unsigned long long q, volatile void __iomem *addr) q 211 arch/parisc/include/asm/io.h __raw_writeq((__u64 __force) cpu_to_le64(q), addr); q 230 arch/parisc/include/asm/io.h #define writeq_relaxed(q, addr) writeq(q, addr) q 90 arch/parisc/include/asm/psw.h unsigned int q:1; q 16 arch/powerpc/boot/xz_config.h uint32_t *q = p; q 18 arch/powerpc/boot/xz_config.h return swab32(*q); q 280 arch/powerpc/include/asm/sfp-machine.h #define udiv_qrnnd(q, r, n1, n0, d) \ q 313 arch/powerpc/include/asm/sfp-machine.h (q) = (UWtype) __q1 * __ll_B | __q0; \ q 115 arch/powerpc/include/asm/xive.h extern int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio, q 117 arch/powerpc/include/asm/xive.h extern void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio); q 417 arch/powerpc/kernel/prom_init.c const char *p, *q; q 422 arch/powerpc/kernel/prom_init.c for (p = msg; *p != 0; p = q) { q 423 arch/powerpc/kernel/prom_init.c for (q = p; *q != 0 && *q != '\n'; ++q) q 425 arch/powerpc/kernel/prom_init.c if (q > p) q 426 arch/powerpc/kernel/prom_init.c call_prom("write", 3, 1, prom.stdout, p, q - p); q 427 arch/powerpc/kernel/prom_init.c if (*q == 0) q 429 arch/powerpc/kernel/prom_init.c ++q; q 475 arch/powerpc/kernel/prom_init.c const char *p, *q, *s; q 482 arch/powerpc/kernel/prom_init.c for (p = format; *p != 0; p = q) { q 483 arch/powerpc/kernel/prom_init.c for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q) q 485 arch/powerpc/kernel/prom_init.c if (q > p) q 486 arch/powerpc/kernel/prom_init.c call_prom("write", 3, 1, prom.stdout, p, q - p); q 487 arch/powerpc/kernel/prom_init.c if (*q == 0) q 489 arch/powerpc/kernel/prom_init.c if (*q == '\n') { q 490 arch/powerpc/kernel/prom_init.c ++q; q 495 arch/powerpc/kernel/prom_init.c ++q; q 496 arch/powerpc/kernel/prom_init.c if (*q == 0) q 498 arch/powerpc/kernel/prom_init.c while (*q == 'l') { q 499 arch/powerpc/kernel/prom_init.c ++q; q 502 arch/powerpc/kernel/prom_init.c switch (*q) { q 504 arch/powerpc/kernel/prom_init.c ++q; q 509 arch/powerpc/kernel/prom_init.c ++q; q 525 arch/powerpc/kernel/prom_init.c ++q; q 541 arch/powerpc/kernel/prom_init.c ++q; q 645 arch/powerpc/kernel/prom_init.c static void add_string(char **str, const char *q) q 649 arch/powerpc/kernel/prom_init.c while (*q) q 650 arch/powerpc/kernel/prom_init.c *p++ = *q++; q 179 arch/powerpc/kvm/book3s_xive.c struct xive_q *q = &xc->queues[prio]; q 188 arch/powerpc/kvm/book3s_xive.c xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq); q 249 arch/powerpc/kvm/book3s_xive.c struct xive_q *q = &xc->queues[prio]; q 253 arch/powerpc/kvm/book3s_xive.c if (WARN_ON(q->qpage)) q 272 arch/powerpc/kvm/book3s_xive.c rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage, q 317 arch/powerpc/kvm/book3s_xive.c struct xive_q *q; q 329 arch/powerpc/kvm/book3s_xive.c q = &xc->queues[prio]; q 330 arch/powerpc/kvm/book3s_xive.c atomic_inc(&q->pending_count); q 336 arch/powerpc/kvm/book3s_xive.c struct xive_q *q; q 344 arch/powerpc/kvm/book3s_xive.c q = &xc->queues[prio]; q 345 arch/powerpc/kvm/book3s_xive.c if (WARN_ON(!q->qpage)) q 349 arch/powerpc/kvm/book3s_xive.c max = (q->msk + 1) - XIVE_Q_GAP; q 350 arch/powerpc/kvm/book3s_xive.c return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY; q 1191 arch/powerpc/kvm/book3s_xive.c struct xive_q *q = &xc->queues[i]; q 1193 arch/powerpc/kvm/book3s_xive.c xive_native_disable_queue(xc->vp_id, q, i); q 1194 arch/powerpc/kvm/book3s_xive.c if (q->qpage) { q 1195 arch/powerpc/kvm/book3s_xive.c free_pages((unsigned long)q->qpage, q 1197 arch/powerpc/kvm/book3s_xive.c q->qpage = NULL; q 1300 arch/powerpc/kvm/book3s_xive.c struct xive_q *q = &xc->queues[i]; q 1316 arch/powerpc/kvm/book3s_xive.c q, i, NULL, 0, true); q 1423 arch/powerpc/kvm/book3s_xive.c static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q) q 1425 arch/powerpc/kvm/book3s_xive.c u32 idx = q->idx; q 1426 arch/powerpc/kvm/book3s_xive.c u32 toggle = q->toggle; q 1430 arch/powerpc/kvm/book3s_xive.c irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle); q 2050 arch/powerpc/kvm/book3s_xive.c struct xive_q *q = &xc->queues[i]; q 2053 arch/powerpc/kvm/book3s_xive.c if (!q->qpage && !xc->esc_virq[i]) q 2058 arch/powerpc/kvm/book3s_xive.c if (q->qpage) { q 2059 arch/powerpc/kvm/book3s_xive.c idx = q->idx; q 2060 arch/powerpc/kvm/book3s_xive.c i0 = be32_to_cpup(q->qpage + idx); q 2061 arch/powerpc/kvm/book3s_xive.c idx = (idx + 1) & q->msk; q 2062 arch/powerpc/kvm/book3s_xive.c i1 = be32_to_cpup(q->qpage + idx); q 2063 arch/powerpc/kvm/book3s_xive.c seq_printf(m, "T=%d %08x %08x...\n", q->toggle, q 44 arch/powerpc/kvm/book3s_xive_native.c struct xive_q *q = &xc->queues[prio]; q 46 arch/powerpc/kvm/book3s_xive_native.c xive_native_disable_queue(xc->vp_id, q, prio); q 47 arch/powerpc/kvm/book3s_xive_native.c if (q->qpage) { q 48 arch/powerpc/kvm/book3s_xive_native.c put_page(virt_to_page(q->qpage)); q 49 arch/powerpc/kvm/book3s_xive_native.c q->qpage = NULL; q 53 arch/powerpc/kvm/book3s_xive_native.c static int kvmppc_xive_native_configure_queue(u32 vp_id, struct xive_q *q, q 58 arch/powerpc/kvm/book3s_xive_native.c __be32 *qpage_prev = q->qpage; q 60 arch/powerpc/kvm/book3s_xive_native.c rc = xive_native_configure_queue(vp_id, q, prio, qpage, order, q 564 arch/powerpc/kvm/book3s_xive_native.c struct xive_q *q; q 592 arch/powerpc/kvm/book3s_xive_native.c q = &xc->queues[priority]; q 600 arch/powerpc/kvm/book3s_xive_native.c q->guest_qaddr = 0; q 601 arch/powerpc/kvm/book3s_xive_native.c q->guest_qshift = 0; q 603 arch/powerpc/kvm/book3s_xive_native.c rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority, q 662 arch/powerpc/kvm/book3s_xive_native.c q->guest_qaddr = kvm_eq.qaddr; q 663 arch/powerpc/kvm/book3s_xive_native.c q->guest_qshift = kvm_eq.qshift; q 670 arch/powerpc/kvm/book3s_xive_native.c rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority, q 705 arch/powerpc/kvm/book3s_xive_native.c struct xive_q *q; q 737 arch/powerpc/kvm/book3s_xive_native.c q = &xc->queues[priority]; q 741 arch/powerpc/kvm/book3s_xive_native.c if (!q->qpage) q 753 arch/powerpc/kvm/book3s_xive_native.c kvm_eq.qshift = q->guest_qshift; q 754 arch/powerpc/kvm/book3s_xive_native.c kvm_eq.qaddr = q->guest_qaddr; q 893 arch/powerpc/kvm/book3s_xive_native.c struct xive_q *q = &xc->queues[prio]; q 895 arch/powerpc/kvm/book3s_xive_native.c if (!q->qpage) q 900 arch/powerpc/kvm/book3s_xive_native.c mark_page_dirty(vcpu->kvm, gpa_to_gfn(q->guest_qaddr)); q 120 arch/powerpc/kvm/book3s_xive_template.c struct xive_q *q; q 140 arch/powerpc/kvm/book3s_xive_template.c q = &xc->queues[prio]; q 141 arch/powerpc/kvm/book3s_xive_template.c idx = q->idx; q 142 arch/powerpc/kvm/book3s_xive_template.c toggle = q->toggle; q 150 arch/powerpc/kvm/book3s_xive_template.c qpage = READ_ONCE(q->qpage); q 157 arch/powerpc/kvm/book3s_xive_template.c hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle); q 177 arch/powerpc/kvm/book3s_xive_template.c q->idx = idx; q 178 arch/powerpc/kvm/book3s_xive_template.c q->toggle = toggle; q 200 arch/powerpc/kvm/book3s_xive_template.c if (atomic_read(&q->pending_count)) { q 201 arch/powerpc/kvm/book3s_xive_template.c int p = atomic_xchg(&q->pending_count, 0); q 204 arch/powerpc/kvm/book3s_xive_template.c WARN_ON(p > atomic_read(&q->count)); q 206 arch/powerpc/kvm/book3s_xive_template.c atomic_sub(p, &q->count); q 224 arch/powerpc/kvm/book3s_xive_template.c q->idx = idx; q 225 arch/powerpc/kvm/book3s_xive_template.c q->toggle = toggle; q 380 arch/powerpc/kvm/book3s_xive_template.c struct xive_q *q = &xc->queues[prio]; q 388 arch/powerpc/kvm/book3s_xive_template.c idx = q->idx; q 389 arch/powerpc/kvm/book3s_xive_template.c toggle = q->toggle; q 390 arch/powerpc/kvm/book3s_xive_template.c qpage = READ_ONCE(q->qpage); q 432 arch/powerpc/kvm/book3s_xive_template.c idx = (idx + 1) & q->msk; q 282 arch/powerpc/kvm/mpic.c static inline void IRQ_setbit(struct irq_queue *q, int n_IRQ) q 284 arch/powerpc/kvm/mpic.c set_bit(n_IRQ, q->queue); q 287 arch/powerpc/kvm/mpic.c static inline void IRQ_resetbit(struct irq_queue *q, int n_IRQ) q 289 arch/powerpc/kvm/mpic.c clear_bit(n_IRQ, q->queue); q 292 arch/powerpc/kvm/mpic.c static void IRQ_check(struct openpic *opp, struct irq_queue *q) q 299 arch/powerpc/kvm/mpic.c irq = find_next_bit(q->queue, opp->max_irq, irq + 1); q 312 arch/powerpc/kvm/mpic.c q->next = next; q 313 arch/powerpc/kvm/mpic.c q->priority = priority; q 316 arch/powerpc/kvm/mpic.c static int IRQ_get_next(struct openpic *opp, struct irq_queue *q) q 319 arch/powerpc/kvm/mpic.c IRQ_check(opp, q); q 321 arch/powerpc/kvm/mpic.c return q->next; q 564 arch/powerpc/lib/code-patching.c unsigned int *p, *q; q 577 arch/powerpc/lib/code-patching.c q = p + 1; q 578 arch/powerpc/lib/code-patching.c patch_instruction(q, translate_branch(q, p)); q 579 arch/powerpc/lib/code-patching.c check(instr_is_branch_to_addr(q, addr)); q 585 arch/powerpc/lib/code-patching.c q = buf + 0x2000000; q 586 arch/powerpc/lib/code-patching.c patch_instruction(q, translate_branch(q, p)); q 588 arch/powerpc/lib/code-patching.c check(instr_is_branch_to_addr(q, addr)); q 589 arch/powerpc/lib/code-patching.c check(*q == 0x4a000000); q 595 arch/powerpc/lib/code-patching.c q = buf + 4; q 596 arch/powerpc/lib/code-patching.c patch_instruction(q, translate_branch(q, p)); q 598 arch/powerpc/lib/code-patching.c check(instr_is_branch_to_addr(q, addr)); q 599 arch/powerpc/lib/code-patching.c check(*q == 0x49fffffc); q 605 arch/powerpc/lib/code-patching.c q = buf + 0x1400000; q 606 arch/powerpc/lib/code-patching.c patch_instruction(q, translate_branch(q, p)); q 608 arch/powerpc/lib/code-patching.c check(instr_is_branch_to_addr(q, addr)); q 614 arch/powerpc/lib/code-patching.c q = buf + 4; q 615 arch/powerpc/lib/code-patching.c patch_instruction(q, translate_branch(q, p)); q 617 arch/powerpc/lib/code-patching.c check(instr_is_branch_to_addr(q, addr)); q 627 arch/powerpc/lib/code-patching.c q = p + 1; q 628 arch/powerpc/lib/code-patching.c patch_instruction(q, translate_branch(q, p)); q 629 arch/powerpc/lib/code-patching.c check(instr_is_branch_to_addr(q, addr)); q 635 arch/powerpc/lib/code-patching.c q = buf + 0x8000; q 636 arch/powerpc/lib/code-patching.c patch_instruction(q, translate_branch(q, p)); q 638 arch/powerpc/lib/code-patching.c check(instr_is_branch_to_addr(q, addr)); q 639 arch/powerpc/lib/code-patching.c check(*q == 0x43ff8000); q 645 arch/powerpc/lib/code-patching.c q = buf + 4; q 646 arch/powerpc/lib/code-patching.c patch_instruction(q, translate_branch(q, p)); q 648 arch/powerpc/lib/code-patching.c check(instr_is_branch_to_addr(q, addr)); q 649 arch/powerpc/lib/code-patching.c check(*q == 0x43ff7ffc); q 655 arch/powerpc/lib/code-patching.c q = buf + 0x5000; q 656 arch/powerpc/lib/code-patching.c patch_instruction(q, translate_branch(q, p)); q 658 arch/powerpc/lib/code-patching.c check(instr_is_branch_to_addr(q, addr)); q 664 arch/powerpc/lib/code-patching.c q = buf + 4; q 665 arch/powerpc/lib/code-patching.c patch_instruction(q, translate_branch(q, p)); q 667 arch/powerpc/lib/code-patching.c check(instr_is_branch_to_addr(q, addr)); q 10 arch/powerpc/math-emu/udivmodti4.c _fp_udivmodti4(_FP_W_TYPE q[2], _FP_W_TYPE r[2], q 190 arch/powerpc/math-emu/udivmodti4.c q[0] = q0; q[1] = q1; q 46 arch/powerpc/platforms/powermac/bootx_init.c const char *p, *q, *s; q 51 arch/powerpc/platforms/powermac/bootx_init.c for (p = format; *p != 0; p = q) { q 52 arch/powerpc/platforms/powermac/bootx_init.c for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q) q 54 arch/powerpc/platforms/powermac/bootx_init.c if (q > p) q 55 arch/powerpc/platforms/powermac/bootx_init.c btext_drawtext(p, q - p); q 56 arch/powerpc/platforms/powermac/bootx_init.c if (*q == 0) q 58 arch/powerpc/platforms/powermac/bootx_init.c if (*q == '\n') { q 59 arch/powerpc/platforms/powermac/bootx_init.c ++q; q 65 arch/powerpc/platforms/powermac/bootx_init.c ++q; q 66 arch/powerpc/platforms/powermac/bootx_init.c if (*q == 0) q 68 arch/powerpc/platforms/powermac/bootx_init.c switch (*q) { q 70 arch/powerpc/platforms/powermac/bootx_init.c ++q; q 77 arch/powerpc/platforms/powermac/bootx_init.c ++q; q 697 arch/powerpc/platforms/ps3/os-area.c static DECLARE_WORK(q, os_area_queue_work_handler); q 700 arch/powerpc/platforms/ps3/os-area.c schedule_work(&q); q 81 arch/powerpc/sysdev/xive/common.c static u32 xive_read_eq(struct xive_q *q, bool just_peek) q 85 arch/powerpc/sysdev/xive/common.c if (!q->qpage) q 87 arch/powerpc/sysdev/xive/common.c cur = be32_to_cpup(q->qpage + q->idx); q 90 arch/powerpc/sysdev/xive/common.c if ((cur >> 31) == q->toggle) q 96 arch/powerpc/sysdev/xive/common.c q->idx = (q->idx + 1) & q->msk; q 99 arch/powerpc/sysdev/xive/common.c if (q->idx == 0) q 100 arch/powerpc/sysdev/xive/common.c q->toggle ^= 1; q 136 arch/powerpc/sysdev/xive/common.c struct xive_q *q; q 167 arch/powerpc/sysdev/xive/common.c q = &xc->queue[prio]; q 168 arch/powerpc/sysdev/xive/common.c if (atomic_read(&q->pending_count)) { q 169 arch/powerpc/sysdev/xive/common.c int p = atomic_xchg(&q->pending_count, 0); q 171 arch/powerpc/sysdev/xive/common.c WARN_ON(p > atomic_read(&q->count)); q 172 arch/powerpc/sysdev/xive/common.c atomic_sub(p, &q->count); q 224 arch/powerpc/sysdev/xive/common.c static notrace void xive_dump_eq(const char *name, struct xive_q *q) q 228 arch/powerpc/sysdev/xive/common.c if (!q->qpage) q 230 arch/powerpc/sysdev/xive/common.c idx = q->idx; q 231 arch/powerpc/sysdev/xive/common.c i0 = be32_to_cpup(q->qpage + idx); q 232 arch/powerpc/sysdev/xive/common.c idx = (idx + 1) & q->msk; q 233 arch/powerpc/sysdev/xive/common.c i1 = be32_to_cpup(q->qpage + idx); q 235 arch/powerpc/sysdev/xive/common.c q->idx, q->toggle, i0, i1); q 467 arch/powerpc/sysdev/xive/common.c struct xive_q *q = &xc->queue[xive_irq_priority]; q 475 arch/powerpc/sysdev/xive/common.c max = (q->msk + 1) - 1; q 476 arch/powerpc/sysdev/xive/common.c return !!atomic_add_unless(&q->count, 1, max); q 491 arch/powerpc/sysdev/xive/common.c struct xive_q *q = &xc->queue[xive_irq_priority]; q 505 arch/powerpc/sysdev/xive/common.c atomic_inc(&q->pending_count); q 130 arch/powerpc/sysdev/xive/native.c int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio, q 147 arch/powerpc/sysdev/xive/native.c q->msk = order ? ((1u << (order - 2)) - 1) : 0; q 148 arch/powerpc/sysdev/xive/native.c q->idx = 0; q 149 arch/powerpc/sysdev/xive/native.c q->toggle = 0; q 160 arch/powerpc/sysdev/xive/native.c q->eoi_phys = be64_to_cpu(qeoi_page_be); q 167 arch/powerpc/sysdev/xive/native.c q->esc_irq = be32_to_cpu(esc_irq_be); q 187 arch/powerpc/sysdev/xive/native.c q->qpage = qpage; q 194 arch/powerpc/sysdev/xive/native.c static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio) q 209 arch/powerpc/sysdev/xive/native.c void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio) q 211 arch/powerpc/sysdev/xive/native.c __xive_native_disable_queue(vp_id, q, prio); q 217 arch/powerpc/sysdev/xive/native.c struct xive_q *q = &xc->queue[prio]; q 225 arch/powerpc/sysdev/xive/native.c q, prio, qpage, xive_queue_shift, false); q 230 arch/powerpc/sysdev/xive/native.c struct xive_q *q = &xc->queue[prio]; q 237 arch/powerpc/sysdev/xive/native.c __xive_native_disable_queue(get_hard_smp_processor_id(cpu), q, prio); q 239 arch/powerpc/sysdev/xive/native.c free_pages((unsigned long)q->qpage, alloc_order); q 240 arch/powerpc/sysdev/xive/native.c q->qpage = NULL; q 460 arch/powerpc/sysdev/xive/spapr.c static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio, q 478 arch/powerpc/sysdev/xive/spapr.c q->msk = order ? ((1u << (order - 2)) - 1) : 0; q 479 arch/powerpc/sysdev/xive/spapr.c q->idx = 0; q 480 arch/powerpc/sysdev/xive/spapr.c q->toggle = 0; q 491 arch/powerpc/sysdev/xive/spapr.c q->eoi_phys = esn_page; q 503 arch/powerpc/sysdev/xive/spapr.c q->qpage = qpage; q 512 arch/powerpc/sysdev/xive/spapr.c struct xive_q *q = &xc->queue[prio]; q 520 arch/powerpc/sysdev/xive/spapr.c q, prio, qpage, xive_queue_shift); q 526 arch/powerpc/sysdev/xive/spapr.c struct xive_q *q = &xc->queue[prio]; q 537 arch/powerpc/sysdev/xive/spapr.c free_pages((unsigned long)q->qpage, alloc_order); q 538 arch/powerpc/sysdev/xive/spapr.c q->qpage = NULL; q 41 arch/powerpc/xmon/nonstdio.c const char *p = ptr, *q; q 51 arch/powerpc/xmon/nonstdio.c while (paginating && (q = strchr(p, '\n'))) { q 52 arch/powerpc/xmon/nonstdio.c rv += udbg_write(p, q - p + 1); q 53 arch/powerpc/xmon/nonstdio.c p = q + 1; q 2045 arch/powerpc/xmon/xmon.c char *p, *q; q 2052 arch/powerpc/xmon/xmon.c q = (char *)buf; q 2055 arch/powerpc/xmon/xmon.c *(u16 *)q = *(u16 *)p; q 2058 arch/powerpc/xmon/xmon.c *(u32 *)q = *(u32 *)p; q 2061 arch/powerpc/xmon/xmon.c *(u64 *)q = *(u64 *)p; q 2065 arch/powerpc/xmon/xmon.c *q++ = *p++; q 2082 arch/powerpc/xmon/xmon.c char *p, *q; q 2095 arch/powerpc/xmon/xmon.c q = (char *) buf; q 2098 arch/powerpc/xmon/xmon.c *(u16 *)p = *(u16 *)q; q 2101 arch/powerpc/xmon/xmon.c *(u32 *)p = *(u32 *)q; q 2104 arch/powerpc/xmon/xmon.c *(u64 *)p = *(u64 *)q; q 2108 arch/powerpc/xmon/xmon.c *p++ = *q++; q 274 arch/riscv/include/asm/io.h __io_reads_ins(reads, u64, q, __io_br(), __io_ar(addr)) q 277 arch/riscv/include/asm/io.h __io_reads_ins(ins, u64, q, __io_pbr(), __io_par(addr)) q 280 arch/riscv/include/asm/io.h __io_writes_outs(writes, u64, q, __io_bw(), __io_aw()) q 283 arch/riscv/include/asm/io.h __io_writes_outs(outs, u64, q, __io_pbr(), __io_paw()) q 77 arch/riscv/include/uapi/asm/ptrace.h struct __riscv_q_ext_state q; q 42 arch/riscv/kernel/signal.c for (i = 0; i < ARRAY_SIZE(sc_fpregs->q.reserved); i++) { q 45 arch/riscv/kernel/signal.c err = __get_user(value, &sc_fpregs->q.reserved[i]); q 68 arch/riscv/kernel/signal.c for (i = 0; i < ARRAY_SIZE(sc_fpregs->q.reserved); i++) { q 69 arch/riscv/kernel/signal.c err = __put_user(0, &sc_fpregs->q.reserved[i]); q 88 arch/s390/include/asm/scsw.h u32 q:1; q 26 arch/s390/pci/pci_event.c u32 q : 1; /* event qualifier */ q 88 arch/sh/include/asm/io.h __BUILD_UNCACHED_IO(q, u64) q 125 arch/sh/include/asm/io.h __BUILD_MEMORY_STRING(__raw_, q, u64) q 192 arch/sh/include/asm/io.h BUILDIO_IOPORT(q, u64) q 221 arch/sh/include/asm/io.h __BUILD_IOPORT_STRING(q, u64) q 138 arch/sh/kernel/traps_64.c unsigned char *p, *q; q 140 arch/sh/kernel/traps_64.c q = (unsigned char *) &x; q 141 arch/sh/kernel/traps_64.c q[0] = p[0]; q 142 arch/sh/kernel/traps_64.c q[1] = p[1]; q 154 arch/sh/kernel/traps_64.c unsigned char *p, *q; q 156 arch/sh/kernel/traps_64.c q = (unsigned char *) &x; q 159 arch/sh/kernel/traps_64.c p[0] = q[0]; q 160 arch/sh/kernel/traps_64.c p[1] = q[1]; q 14 arch/sh/lib/div64-generic.c uint64_t q = __xdiv64_32(*xp, y); q 16 arch/sh/lib/div64-generic.c rem = *xp - q * y; q 17 arch/sh/lib/div64-generic.c *xp = q; q 32 arch/sh/math-emu/sfp-util.h #define udiv_qrnnd(q, r, n1, n0, d) \ q 65 arch/sh/math-emu/sfp-util.h (q) = (UWtype) __q1 * __ll_B | __q0; \ q 93 arch/sparc/include/asm/io_64.h static inline void __raw_writeq(u64 q, const volatile void __iomem *addr) q 97 arch/sparc/include/asm/io_64.h : "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); q 187 arch/sparc/include/asm/io_64.h static inline void writeq(u64 q, volatile void __iomem *addr) q 191 arch/sparc/include/asm/io_64.h : "Jr" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L) q 325 arch/sparc/include/asm/io_64.h static inline void sbus_writeq(u64 q, volatile void __iomem *addr) q 327 arch/sparc/include/asm/io_64.h __raw_writeq(q, addr); q 93 arch/sparc/include/asm/upa.h static inline void _upa_writeq(unsigned long q, unsigned long addr) q 97 arch/sparc/include/asm/upa.h : "r" (q), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); q 979 arch/sparc/kernel/ldc.c void *q; q 984 arch/sparc/kernel/ldc.c q = (void *) __get_free_pages(GFP_KERNEL, order); q 985 arch/sparc/kernel/ldc.c if (!q) { q 991 arch/sparc/kernel/ldc.c memset(q, 0, PAGE_SIZE << order); q 993 arch/sparc/kernel/ldc.c *base = q; q 994 arch/sparc/kernel/ldc.c *ra = __pa(q); q 999 arch/sparc/kernel/ldc.c static void free_queue(unsigned long num_entries, struct ldc_packet *q) q 1003 arch/sparc/kernel/ldc.c if (!q) q 1009 arch/sparc/kernel/ldc.c free_pages((unsigned long)q, order); q 272 arch/sparc/math-emu/math_32.c u64 q[2]; q 164 arch/sparc/math-emu/math_64.c u64 q[2]; q 457 arch/sparc/math-emu/math_64.c case FMOVQ: rd->q[0] = rs2->q[0]; rd->q[1] = rs2->q[1]; break; q 458 arch/sparc/math-emu/math_64.c case FABSQ: rd->q[0] = rs2->q[0] & 0x7fffffffffffffffUL; rd->q[1] = rs2->q[1]; break; q 459 arch/sparc/math-emu/math_64.c case FNEGQ: rd->q[0] = rs2->q[0] ^ 0x8000000000000000UL; rd->q[1] = rs2->q[1]; break; q 77 arch/sparc/math-emu/sfp-util_32.h #define udiv_qrnnd(q, r, n1, n0, d) \ q 102 arch/sparc/math-emu/sfp-util_32.h : "=&r" (q), \ q 76 arch/sparc/math-emu/sfp-util_64.h #define udiv_qrnnd(q, r, n1, n0, d) \ q 108 arch/sparc/math-emu/sfp-util_64.h (q) = (UWtype) (__q1 << 32) | __q0; \ q 523 arch/um/drivers/ubd_kern.c blk_queue_max_discard_sectors(io_req->req->q, 0); q 524 arch/um/drivers/ubd_kern.c blk_queue_max_write_zeroes_sectors(io_req->req->q, 0); q 525 arch/um/drivers/ubd_kern.c blk_queue_flag_clear(QUEUE_FLAG_DISCARD, io_req->req->q); q 26 arch/x86/include/asm/asm.h inst##q##__VA_ARGS__) q 79 arch/x86/include/asm/div64.h u64 q; q 81 arch/x86/include/asm/div64.h asm ("mulq %2; divq %3" : "=a" (q) q 85 arch/x86/include/asm/div64.h return q; q 20 arch/x86/include/asm/msr.h u64 q; q 341 arch/x86/include/asm/msr.h int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); q 342 arch/x86/include/asm/msr.h int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q); q 347 arch/x86/include/asm/msr.h int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); q 348 arch/x86/include/asm/msr.h int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q); q 362 arch/x86/include/asm/msr.h static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) q 364 arch/x86/include/asm/msr.h rdmsrl(msr_no, *q); q 367 arch/x86/include/asm/msr.h static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) q 369 arch/x86/include/asm/msr.h wrmsrl(msr_no, q); q 391 arch/x86/include/asm/msr.h static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) q 393 arch/x86/include/asm/msr.h return rdmsrl_safe(msr_no, q); q 395 arch/x86/include/asm/msr.h static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q) q 397 arch/x86/include/asm/msr.h return wrmsrl_safe(msr_no, q); q 626 arch/x86/kernel/cpu/common.c char *p, *q, *s; q 638 arch/x86/kernel/cpu/common.c p = q = s = &c->x86_model_id[0]; q 646 arch/x86/kernel/cpu/common.c s = q; q 648 arch/x86/kernel/cpu/common.c *q++ = *p++; q 13 arch/x86/kernel/pci-iommu_table.c struct iommu_table_entry *q) q 17 arch/x86/kernel/pci-iommu_table.c if (!q) q 21 arch/x86/kernel/pci-iommu_table.c if (p->detect == q->depend) q 31 arch/x86/kernel/pci-iommu_table.c struct iommu_table_entry *p, *q, tmp; q 35 arch/x86/kernel/pci-iommu_table.c q = find_dependents_of(start, finish, p); q 39 arch/x86/kernel/pci-iommu_table.c if (q > p) { q 41 arch/x86/kernel/pci-iommu_table.c memmove(p, q, sizeof(*p)); q 42 arch/x86/kernel/pci-iommu_table.c *q = tmp; q 53 arch/x86/kernel/pci-iommu_table.c struct iommu_table_entry *p, *q, *x; q 57 arch/x86/kernel/pci-iommu_table.c q = find_dependents_of(start, finish, p); q 58 arch/x86/kernel/pci-iommu_table.c x = find_dependents_of(start, finish, q); q 61 arch/x86/kernel/pci-iommu_table.c p->detect, q->detect); q 68 arch/x86/kernel/pci-iommu_table.c q = find_dependents_of(p, finish, p); q 69 arch/x86/kernel/pci-iommu_table.c if (q && q > p) { q 71 arch/x86/kernel/pci-iommu_table.c p->detect, q->detect); q 854 arch/x86/kernel/ptrace.c #define R32(l,q) \ q 856 arch/x86/kernel/ptrace.c regs->q = value; break q 928 arch/x86/kernel/ptrace.c #define R32(l,q) \ q 930 arch/x86/kernel/ptrace.c *val = regs->q; break q 361 arch/x86/kvm/emulate.c ON64(FOP1E(op##q, rax)) \ q 392 arch/x86/kvm/emulate.c ON64(FOP2E(op##q, rax, rdx)) \ q 401 arch/x86/kvm/emulate.c ON64(FOP2E(op##q, rax, rdx)) \ q 410 arch/x86/kvm/emulate.c ON64(FOP2E(op##q, rax, cl)) \ q 419 arch/x86/kvm/emulate.c ON64(FOP2E(op##q, rdx, rax)) \ q 433 arch/x86/kvm/emulate.c ON64(FOP3E(op##q, rax, rdx, cl)) \ q 1944 arch/x86/kvm/svm.c struct list_head *pos, *q; q 1956 arch/x86/kvm/svm.c list_for_each_safe(pos, q, head) { q 1407 arch/x86/kvm/vmx/nested.c int i, q; q 1414 arch/x86/kvm/vmx/nested.c for (q = 0; q < ARRAY_SIZE(fields); q++) { q 1415 arch/x86/kvm/vmx/nested.c for (i = 0; i < max_fields[q]; i++) { q 1416 arch/x86/kvm/vmx/nested.c field = fields[q][i]; q 52 arch/x86/lib/msr-smp.c int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) q 61 arch/x86/lib/msr-smp.c *q = rv.reg.q; q 83 arch/x86/lib/msr-smp.c int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) q 91 arch/x86/lib/msr-smp.c rv.reg.q = q; q 210 arch/x86/lib/msr-smp.c int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q) q 218 arch/x86/lib/msr-smp.c rv.reg.q = q; q 226 arch/x86/lib/msr-smp.c int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) q 232 arch/x86/lib/msr-smp.c *q = (u64)high << 32 | low; q 46 arch/x86/lib/msr.c m->q = val; q 59 arch/x86/lib/msr.c return wrmsrl_safe(msr, m->q); q 76 arch/x86/lib/msr.c m1.q |= BIT_64(bit); q 78 arch/x86/lib/msr.c m1.q &= ~BIT_64(bit); q 80 arch/x86/lib/msr.c if (m1.q == m.q) q 22 arch/x86/math-emu/fpu_trig.c unsigned long long st1, unsigned long long q, int n); q 38 arch/x86/math-emu/fpu_trig.c unsigned long long q; q 56 arch/x86/math-emu/fpu_trig.c q = significand(&tmp); q 57 arch/x86/math-emu/fpu_trig.c if (q) { q 61 arch/x86/math-emu/fpu_trig.c q, exponent(st0_ptr) - exponent(&CONST_PI2)); q 67 arch/x86/math-emu/fpu_trig.c if ((even && !(q & 1)) || (!even && (q & 1))) { q 78 arch/x86/math-emu/fpu_trig.c || (q > 1)) { q 82 arch/x86/math-emu/fpu_trig.c significand(&tmp) = q + 1; q 98 arch/x86/math-emu/fpu_trig.c q++; q 109 arch/x86/math-emu/fpu_trig.c if (((q > 0) q 111 arch/x86/math-emu/fpu_trig.c || (q > 1)) { q 115 arch/x86/math-emu/fpu_trig.c significand(&tmp) = q; q 138 arch/x86/math-emu/fpu_trig.c q++; q 148 arch/x86/math-emu/fpu_trig.c return (q & 3) | even; q 279 arch/x86/math-emu/fpu_trig.c int q; q 300 arch/x86/math-emu/fpu_trig.c if ((q = trig_arg(st0_ptr, 0)) == -1) { q 306 arch/x86/math-emu/fpu_trig.c setsign(st0_ptr, (q & 1) ^ (arg_sign != 0)); q 555 arch/x86/math-emu/fpu_trig.c int q; q 558 arch/x86/math-emu/fpu_trig.c if ((q = trig_arg(st0_ptr, 0)) == -1) { q 565 arch/x86/math-emu/fpu_trig.c if (q & 2) q 618 arch/x86/math-emu/fpu_trig.c int q; q 631 arch/x86/math-emu/fpu_trig.c } else if ((q = trig_arg(st0_ptr, FCOS)) != -1) { q 634 arch/x86/math-emu/fpu_trig.c if ((q + 1) & 2) q 747 arch/x86/math-emu/fpu_trig.c unsigned long long st1, unsigned long long q, int n) q 760 arch/x86/math-emu/fpu_trig.c :"2"(((unsigned *)&st1)[0]), "m"(((unsigned *)&q)[0]) q 765 arch/x86/math-emu/fpu_trig.c :"1"(((unsigned *)&st1)[1]), "m"(((unsigned *)&q)[0]) q 770 arch/x86/math-emu/fpu_trig.c :"1"(((unsigned *)&st1)[0]), "m"(((unsigned *)&q)[1]) q 791 arch/x86/math-emu/fpu_trig.c long long q; q 823 arch/x86/math-emu/fpu_trig.c q = significand(&tmp); q 828 arch/x86/math-emu/fpu_trig.c q, expdif); q 833 arch/x86/math-emu/fpu_trig.c q = 0; q 854 arch/x86/math-emu/fpu_trig.c && (q & 1))) { q 857 arch/x86/math-emu/fpu_trig.c q++; q 862 arch/x86/math-emu/fpu_trig.c if (q & 4) q 864 arch/x86/math-emu/fpu_trig.c if (q & 2) q 866 arch/x86/math-emu/fpu_trig.c if (q & 1) q 1562 arch/x86/platform/uv/tlb_uv.c char *q; q 1568 arch/x86/platform/uv/tlb_uv.c q = p; q 1569 arch/x86/platform/uv/tlb_uv.c for (; *p; p = q + strspn(q, WHITESPACE)) { q 1570 arch/x86/platform/uv/tlb_uv.c q = p + strcspn(p, WHITESPACE); q 1572 arch/x86/platform/uv/tlb_uv.c if (q == p) q 1581 arch/x86/platform/uv/tlb_uv.c q = p; q 1582 arch/x86/platform/uv/tlb_uv.c for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) { q 1583 arch/x86/platform/uv/tlb_uv.c q = p + strcspn(p, WHITESPACE); q 43 arch/x86/power/cpu.c msr->valid = !rdmsrl_safe(msr->info.msr_no, &msr->info.reg.q); q 55 arch/x86/power/cpu.c wrmsrl(msr->info.msr_no, msr->info.reg.q); q 426 arch/x86/power/cpu.c msr_array[i].info.reg.q = 0; q 179 arch/x86/xen/platform-pci-unplug.c char *p, *q; q 182 arch/x86/xen/platform-pci-unplug.c for (p = arg; p; p = q) { q 183 arch/x86/xen/platform-pci-unplug.c q = strchr(p, ','); q 184 arch/x86/xen/platform-pci-unplug.c if (q) { q 185 arch/x86/xen/platform-pci-unplug.c l = q - p; q 186 arch/x86/xen/platform-pci-unplug.c q++; q 104 arch/xtensa/platforms/iss/simdisk.c static blk_qc_t simdisk_make_request(struct request_queue *q, struct bio *bio) q 106 arch/xtensa/platforms/iss/simdisk.c struct simdisk *dev = q->queuedata; q 404 block/bfq-cgroup.c lockdep_assert_held(&bfqg_to_blkg(bfqg)->q->queue_lock); q 504 block/bfq-cgroup.c static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, struct request_queue *q, q 509 block/bfq-cgroup.c bfqg = kzalloc_node(sizeof(*bfqg), gfp, q->node); q 527 block/bfq-cgroup.c struct bfq_data *bfqd = blkg->q->elevator->elevator_data; q 1109 block/bfq-cgroup.c lockdep_assert_held(&blkg->q->queue_lock); q 380 block/bfq-iosched.c return bic->icq.q->elevator->elevator_data; q 401 block/bfq-iosched.c struct request_queue *q) q 407 block/bfq-iosched.c spin_lock_irqsave(&q->queue_lock, flags); q 408 block/bfq-iosched.c icq = icq_to_bic(ioc_lookup_icq(ioc, q)); q 409 block/bfq-iosched.c spin_unlock_irqrestore(&q->queue_lock, flags); q 537 block/bfq-iosched.c struct bfq_data *bfqd = data->q->elevator->elevator_data; q 2117 block/bfq-iosched.c struct request_queue *q) q 2137 block/bfq-iosched.c static void bfq_activate_request(struct request_queue *q, struct request *rq) q 2139 block/bfq-iosched.c struct bfq_data *bfqd = q->elevator->elevator_data; q 2144 block/bfq-iosched.c static void bfq_deactivate_request(struct request_queue *q, struct request *rq) q 2146 block/bfq-iosched.c struct bfq_data *bfqd = q->elevator->elevator_data; q 2152 block/bfq-iosched.c static void bfq_remove_request(struct request_queue *q, q 2170 block/bfq-iosched.c elv_rqhash_del(q, rq); q 2171 block/bfq-iosched.c if (q->last_merge == rq) q 2172 block/bfq-iosched.c q->last_merge = NULL; q 2216 block/bfq-iosched.c struct request_queue *q = hctx->queue; q 2217 block/bfq-iosched.c struct bfq_data *bfqd = q->elevator->elevator_data; q 2226 block/bfq-iosched.c struct bfq_io_cq *bic = bfq_bic_lookup(bfqd, current->io_context, q); q 2237 block/bfq-iosched.c ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free); q 2246 block/bfq-iosched.c static int bfq_request_merge(struct request_queue *q, struct request **req, q 2249 block/bfq-iosched.c struct bfq_data *bfqd = q->elevator->elevator_data; q 2252 block/bfq-iosched.c __rq = bfq_find_rq_fmerge(bfqd, bio, q); q 2263 block/bfq-iosched.c static void bfq_request_merged(struct request_queue *q, struct request *req, q 2320 block/bfq-iosched.c static void bfq_requests_merged(struct request_queue *q, struct request *rq, q 2813 block/bfq-iosched.c static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq, q 2816 block/bfq-iosched.c struct bfq_data *bfqd = q->elevator->elevator_data; q 3238 block/bfq-iosched.c static void bfq_dispatch_remove(struct request_queue *q, struct request *rq) q 3255 block/bfq-iosched.c bfq_update_peak_rate(q->elevator->elevator_data, rq); q 3257 block/bfq-iosched.c bfq_remove_request(q, rq); q 4740 block/bfq-iosched.c static void bfq_update_dispatch_stats(struct request_queue *q, q 4763 block/bfq-iosched.c spin_lock_irq(&q->queue_lock); q 4782 block/bfq-iosched.c spin_unlock_irq(&q->queue_lock); q 4785 block/bfq-iosched.c static inline void bfq_update_dispatch_stats(struct request_queue *q, q 5457 block/bfq-iosched.c static void bfq_update_insert_stats(struct request_queue *q, q 5475 block/bfq-iosched.c spin_lock_irq(&q->queue_lock); q 5479 block/bfq-iosched.c spin_unlock_irq(&q->queue_lock); q 5482 block/bfq-iosched.c static inline void bfq_update_insert_stats(struct request_queue *q, q 5491 block/bfq-iosched.c struct request_queue *q = hctx->queue; q 5492 block/bfq-iosched.c struct bfq_data *bfqd = q->elevator->elevator_data; q 5498 block/bfq-iosched.c if (blk_mq_sched_try_insert_merge(q, rq)) { q 5524 block/bfq-iosched.c elv_rqhash_add(q, rq); q 5525 block/bfq-iosched.c if (!q->last_merge) q 5526 block/bfq-iosched.c q->last_merge = rq; q 5539 block/bfq-iosched.c bfq_update_insert_stats(q, bfqq, idle_timer_disabled, q 5948 block/bfq-iosched.c bfq_remove_request(rq->q, rq); q 6104 block/bfq-iosched.c struct request_queue *q = rq->q; q 6106 block/bfq-iosched.c struct bfq_data *bfqd = q->elevator->elevator_data; q 6423 block/bfq-iosched.c static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) q 6428 block/bfq-iosched.c eq = elevator_alloc(q, e); q 6432 block/bfq-iosched.c bfqd = kzalloc_node(sizeof(*bfqd), GFP_KERNEL, q->node); q 6439 block/bfq-iosched.c spin_lock_irq(&q->queue_lock); q 6440 block/bfq-iosched.c q->elevator = eq; q 6441 block/bfq-iosched.c spin_unlock_irq(&q->queue_lock); q 6465 block/bfq-iosched.c bfqd->queue = q; q 6540 block/bfq-iosched.c bfqd->root_group = bfq_create_group_hierarchy(bfqd, q->node); q 6546 block/bfq-iosched.c wbt_disable_default(q); q 203 block/bio-integrity.c struct request_queue *q = bio->bi_disk->queue; q 237 block/bio-integrity.c buf = kmalloc(len, GFP_NOIO | q->bounce_gfp); q 701 block/bio.c static bool bio_try_merge_pc_page(struct request_queue *q, struct bio *bio, q 706 block/bio.c unsigned long mask = queue_segment_boundary(q); q 712 block/bio.c if (bv->bv_len + len > queue_max_segment_size(q)) q 733 block/bio.c static int __bio_add_pc_page(struct request_queue *q, struct bio *bio, q 745 block/bio.c if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q)) q 749 block/bio.c if (bio_try_merge_pc_page(q, bio, page, len, offset, same_page)) q 757 block/bio.c if (bvec_gap_to_prev(q, bvec, offset)) q 764 block/bio.c if (bio->bi_vcnt >= queue_max_segments(q)) q 776 block/bio.c int bio_add_pc_page(struct request_queue *q, struct bio *bio, q 780 block/bio.c return __bio_add_pc_page(q, bio, page, len, offset, &same_page); q 1273 block/bio.c struct bio *bio_copy_user_iov(struct request_queue *q, q 1331 block/bio.c page = alloc_page(q->bounce_gfp | gfp_mask); q 1338 block/bio.c if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes) { q 1390 block/bio.c struct bio *bio_map_user_iov(struct request_queue *q, q 1419 block/bio.c if (unlikely(offs & queue_dma_alignment(q))) { q 1431 block/bio.c if (!__bio_add_pc_page(q, bio, page, n, offs, q 1517 block/bio.c struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len, q 1552 block/bio.c if (bio_add_pc_page(q, bio, page, bytes, q 1599 block/bio.c struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, q 1627 block/bio.c page = alloc_page(q->bounce_gfp | gfp_mask); q 1634 block/bio.c if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) q 1771 block/bio.c void generic_start_io_acct(struct request_queue *q, int op, q 1781 block/bio.c part_inc_in_flight(q, part, op_is_write(op)); q 1787 block/bio.c void generic_end_io_acct(struct request_queue *q, int req_op, q 1799 block/bio.c part_dec_in_flight(q, part, op_is_write(req_op)); q 2101 block/bio.c struct request_queue *q = bio->bi_disk->queue; q 2107 block/bio.c blkg = q->root_blkg; q 2109 block/bio.c blkg = blkg_lookup_create(css_to_blkcg(css), q); q 60 block/blk-cgroup.c static bool blkcg_policy_enabled(struct request_queue *q, q 63 block/blk-cgroup.c return pol && test_bit(pol->plid, q->blkcg_pols); q 145 block/blk-cgroup.c static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, q 152 block/blk-cgroup.c blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); q 163 block/blk-cgroup.c blkg->q = q; q 174 block/blk-cgroup.c if (!blkcg_policy_enabled(q, pol)) q 178 block/blk-cgroup.c pd = pol->pd_alloc_fn(gfp_mask, q, blkcg); q 195 block/blk-cgroup.c struct request_queue *q, bool update_hint) q 205 block/blk-cgroup.c blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); q 206 block/blk-cgroup.c if (blkg && blkg->q == q) { q 208 block/blk-cgroup.c lockdep_assert_held(&q->queue_lock); q 223 block/blk-cgroup.c struct request_queue *q, q 231 block/blk-cgroup.c lockdep_assert_held(&q->queue_lock); q 234 block/blk-cgroup.c if (blk_queue_dying(q)) { q 245 block/blk-cgroup.c wb_congested = wb_congested_get_create(q->backing_dev_info, q 255 block/blk-cgroup.c new_blkg = blkg_alloc(blkcg, q, GFP_NOWAIT | __GFP_NOWARN); q 266 block/blk-cgroup.c blkg->parent = __blkg_lookup(blkcg_parent(blkcg), q, false); q 284 block/blk-cgroup.c ret = radix_tree_insert(&blkcg->blkg_tree, q->id, blkg); q 287 block/blk-cgroup.c list_add(&blkg->q_node, &q->blkg_list); q 329 block/blk-cgroup.c struct request_queue *q) q 334 block/blk-cgroup.c lockdep_assert_held(&q->queue_lock); q 336 block/blk-cgroup.c blkg = __blkg_lookup(blkcg, q, true); q 348 block/blk-cgroup.c struct blkcg_gq *ret_blkg = q->root_blkg; q 351 block/blk-cgroup.c blkg = __blkg_lookup(parent, q, false); q 361 block/blk-cgroup.c blkg = blkg_create(pos, q, NULL); q 378 block/blk-cgroup.c struct request_queue *q) q 380 block/blk-cgroup.c struct blkcg_gq *blkg = blkg_lookup(blkcg, q); q 385 block/blk-cgroup.c spin_lock_irqsave(&q->queue_lock, flags); q 386 block/blk-cgroup.c blkg = __blkg_lookup_create(blkcg, q); q 387 block/blk-cgroup.c spin_unlock_irqrestore(&q->queue_lock, flags); q 399 block/blk-cgroup.c lockdep_assert_held(&blkg->q->queue_lock); q 420 block/blk-cgroup.c radix_tree_delete(&blkcg->blkg_tree, blkg->q->id); q 445 block/blk-cgroup.c static void blkg_destroy_all(struct request_queue *q) q 449 block/blk-cgroup.c spin_lock_irq(&q->queue_lock); q 450 block/blk-cgroup.c list_for_each_entry_safe(blkg, n, &q->blkg_list, q_node) { q 458 block/blk-cgroup.c q->root_blkg = NULL; q 459 block/blk-cgroup.c spin_unlock_irq(&q->queue_lock); q 497 block/blk-cgroup.c if (blkg->q->backing_dev_info->dev) q 498 block/blk-cgroup.c return dev_name(blkg->q->backing_dev_info->dev); q 531 block/blk-cgroup.c spin_lock_irq(&blkg->q->queue_lock); q 532 block/blk-cgroup.c if (blkcg_policy_enabled(blkg->q, pol)) q 534 block/blk-cgroup.c spin_unlock_irq(&blkg->q->queue_lock); q 722 block/blk-cgroup.c lockdep_assert_held(&blkg->q->queue_lock); q 746 block/blk-cgroup.c struct request_queue *q) q 749 block/blk-cgroup.c lockdep_assert_held(&q->queue_lock); q 751 block/blk-cgroup.c if (!blkcg_policy_enabled(q, pol)) q 753 block/blk-cgroup.c return __blkg_lookup(blkcg, q, true /* update_hint */); q 811 block/blk-cgroup.c struct request_queue *q; q 819 block/blk-cgroup.c q = disk->queue; q 822 block/blk-cgroup.c spin_lock_irq(&q->queue_lock); q 824 block/blk-cgroup.c blkg = blkg_lookup_check(blkcg, pol, q); q 843 block/blk-cgroup.c while (parent && !__blkg_lookup(parent, q, false)) { q 849 block/blk-cgroup.c spin_unlock_irq(&q->queue_lock); q 852 block/blk-cgroup.c new_blkg = blkg_alloc(pos, q, GFP_KERNEL); q 859 block/blk-cgroup.c spin_lock_irq(&q->queue_lock); q 861 block/blk-cgroup.c blkg = blkg_lookup_check(pos, pol, q); q 870 block/blk-cgroup.c blkg = blkg_create(pos, q, new_blkg); q 887 block/blk-cgroup.c spin_unlock_irq(&q->queue_lock); q 937 block/blk-cgroup.c spin_lock_irq(&blkg->q->queue_lock); q 1004 block/blk-cgroup.c spin_unlock_irq(&blkg->q->queue_lock); q 1086 block/blk-cgroup.c struct request_queue *q = blkg->q; q 1088 block/blk-cgroup.c if (spin_trylock(&q->queue_lock)) { q 1090 block/blk-cgroup.c spin_unlock(&q->queue_lock); q 1197 block/blk-cgroup.c int blkcg_init_queue(struct request_queue *q) q 1203 block/blk-cgroup.c new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); q 1211 block/blk-cgroup.c spin_lock_irq(&q->queue_lock); q 1212 block/blk-cgroup.c blkg = blkg_create(&blkcg_root, q, new_blkg); q 1215 block/blk-cgroup.c q->root_blkg = blkg; q 1216 block/blk-cgroup.c spin_unlock_irq(&q->queue_lock); q 1222 block/blk-cgroup.c ret = blk_iolatency_init(q); q 1226 block/blk-cgroup.c ret = blk_throtl_init(q); q 1232 block/blk-cgroup.c blkg_destroy_all(q); q 1235 block/blk-cgroup.c spin_unlock_irq(&q->queue_lock); q 1248 block/blk-cgroup.c void blkcg_drain_queue(struct request_queue *q) q 1250 block/blk-cgroup.c lockdep_assert_held(&q->queue_lock); q 1256 block/blk-cgroup.c if (!q->root_blkg) q 1259 block/blk-cgroup.c blk_throtl_drain(q); q 1268 block/blk-cgroup.c void blkcg_exit_queue(struct request_queue *q) q 1270 block/blk-cgroup.c blkg_destroy_all(q); q 1271 block/blk-cgroup.c blk_throtl_exit(q); q 1364 block/blk-cgroup.c int blkcg_activate_policy(struct request_queue *q, q 1371 block/blk-cgroup.c if (blkcg_policy_enabled(q, pol)) q 1374 block/blk-cgroup.c if (queue_is_mq(q)) q 1375 block/blk-cgroup.c blk_mq_freeze_queue(q); q 1377 block/blk-cgroup.c spin_lock_irq(&q->queue_lock); q 1380 block/blk-cgroup.c list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) { q 1391 block/blk-cgroup.c pd = pol->pd_alloc_fn(GFP_NOWAIT | __GFP_NOWARN, q, q 1405 block/blk-cgroup.c spin_unlock_irq(&q->queue_lock); q 1409 block/blk-cgroup.c pd_prealloc = pol->pd_alloc_fn(GFP_KERNEL, q, q 1424 block/blk-cgroup.c list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) q 1427 block/blk-cgroup.c __set_bit(pol->plid, q->blkcg_pols); q 1430 block/blk-cgroup.c spin_unlock_irq(&q->queue_lock); q 1432 block/blk-cgroup.c if (queue_is_mq(q)) q 1433 block/blk-cgroup.c blk_mq_unfreeze_queue(q); q 1442 block/blk-cgroup.c spin_lock_irq(&q->queue_lock); q 1443 block/blk-cgroup.c list_for_each_entry(blkg, &q->blkg_list, q_node) { q 1449 block/blk-cgroup.c spin_unlock_irq(&q->queue_lock); q 1463 block/blk-cgroup.c void blkcg_deactivate_policy(struct request_queue *q, q 1468 block/blk-cgroup.c if (!blkcg_policy_enabled(q, pol)) q 1471 block/blk-cgroup.c if (queue_is_mq(q)) q 1472 block/blk-cgroup.c blk_mq_freeze_queue(q); q 1474 block/blk-cgroup.c spin_lock_irq(&q->queue_lock); q 1476 block/blk-cgroup.c __clear_bit(pol->plid, q->blkcg_pols); q 1478 block/blk-cgroup.c list_for_each_entry(blkg, &q->blkg_list, q_node) { q 1487 block/blk-cgroup.c spin_unlock_irq(&q->queue_lock); q 1489 block/blk-cgroup.c if (queue_is_mq(q)) q 1490 block/blk-cgroup.c blk_mq_unfreeze_queue(q); q 1749 block/blk-cgroup.c struct request_queue *q = current->throttle_queue; q 1755 block/blk-cgroup.c if (!q) q 1770 block/blk-cgroup.c blkg = blkg_lookup(blkcg, q); q 1779 block/blk-cgroup.c blk_put_queue(q); q 1783 block/blk-cgroup.c blk_put_queue(q); q 1803 block/blk-cgroup.c void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) q 1808 block/blk-cgroup.c if (!blk_get_queue(q)) q 1813 block/blk-cgroup.c current->throttle_queue = q; q 78 block/blk-core.c void blk_queue_flag_set(unsigned int flag, struct request_queue *q) q 80 block/blk-core.c set_bit(flag, &q->queue_flags); q 89 block/blk-core.c void blk_queue_flag_clear(unsigned int flag, struct request_queue *q) q 91 block/blk-core.c clear_bit(flag, &q->queue_flags); q 103 block/blk-core.c bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q) q 105 block/blk-core.c return test_and_set_bit(flag, &q->queue_flags); q 109 block/blk-core.c void blk_rq_init(struct request_queue *q, struct request *rq) q 114 block/blk-core.c rq->q = q; q 277 block/blk-core.c void blk_sync_queue(struct request_queue *q) q 279 block/blk-core.c del_timer_sync(&q->timeout); q 280 block/blk-core.c cancel_work_sync(&q->timeout_work); q 288 block/blk-core.c void blk_set_pm_only(struct request_queue *q) q 290 block/blk-core.c atomic_inc(&q->pm_only); q 294 block/blk-core.c void blk_clear_pm_only(struct request_queue *q) q 298 block/blk-core.c pm_only = atomic_dec_return(&q->pm_only); q 301 block/blk-core.c wake_up_all(&q->mq_freeze_wq); q 305 block/blk-core.c void blk_put_queue(struct request_queue *q) q 307 block/blk-core.c kobject_put(&q->kobj); q 311 block/blk-core.c void blk_set_queue_dying(struct request_queue *q) q 313 block/blk-core.c blk_queue_flag_set(QUEUE_FLAG_DYING, q); q 320 block/blk-core.c blk_freeze_queue_start(q); q 322 block/blk-core.c if (queue_is_mq(q)) q 323 block/blk-core.c blk_mq_wake_waiters(q); q 326 block/blk-core.c wake_up_all(&q->mq_freeze_wq); q 337 block/blk-core.c void blk_cleanup_queue(struct request_queue *q) q 340 block/blk-core.c mutex_lock(&q->sysfs_lock); q 341 block/blk-core.c blk_set_queue_dying(q); q 343 block/blk-core.c blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); q 344 block/blk-core.c blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); q 345 block/blk-core.c blk_queue_flag_set(QUEUE_FLAG_DYING, q); q 346 block/blk-core.c mutex_unlock(&q->sysfs_lock); q 353 block/blk-core.c blk_freeze_queue(q); q 355 block/blk-core.c rq_qos_exit(q); q 357 block/blk-core.c blk_queue_flag_set(QUEUE_FLAG_DEAD, q); q 363 block/blk-core.c del_timer_sync(&q->backing_dev_info->laptop_mode_wb_timer); q 364 block/blk-core.c blk_sync_queue(q); q 366 block/blk-core.c if (queue_is_mq(q)) q 367 block/blk-core.c blk_mq_exit_queue(q); q 377 block/blk-core.c mutex_lock(&q->sysfs_lock); q 378 block/blk-core.c if (q->elevator) q 379 block/blk-core.c blk_mq_sched_free_requests(q); q 380 block/blk-core.c mutex_unlock(&q->sysfs_lock); q 382 block/blk-core.c percpu_ref_exit(&q->q_usage_counter); q 385 block/blk-core.c blk_put_queue(q); q 400 block/blk-core.c int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) q 408 block/blk-core.c if (percpu_ref_tryget_live(&q->q_usage_counter)) { q 414 block/blk-core.c if (pm || !blk_queue_pm_only(q)) { q 417 block/blk-core.c percpu_ref_put(&q->q_usage_counter); q 437 block/blk-core.c wait_event(q->mq_freeze_wq, q 438 block/blk-core.c (!q->mq_freeze_depth && q 439 block/blk-core.c (pm || (blk_pm_request_resume(q), q 440 block/blk-core.c !blk_queue_pm_only(q)))) || q 441 block/blk-core.c blk_queue_dying(q)); q 442 block/blk-core.c if (blk_queue_dying(q)) q 447 block/blk-core.c void blk_queue_exit(struct request_queue *q) q 449 block/blk-core.c percpu_ref_put(&q->q_usage_counter); q 454 block/blk-core.c struct request_queue *q = q 457 block/blk-core.c wake_up_all(&q->mq_freeze_wq); q 462 block/blk-core.c struct request_queue *q = from_timer(q, t, timeout); q 464 block/blk-core.c kblockd_schedule_work(&q->timeout_work); q 478 block/blk-core.c struct request_queue *q; q 481 block/blk-core.c q = kmem_cache_alloc_node(blk_requestq_cachep, q 483 block/blk-core.c if (!q) q 486 block/blk-core.c q->last_merge = NULL; q 488 block/blk-core.c q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask); q 489 block/blk-core.c if (q->id < 0) q 492 block/blk-core.c ret = bioset_init(&q->bio_split, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); q 496 block/blk-core.c q->backing_dev_info = bdi_alloc_node(gfp_mask, node_id); q 497 block/blk-core.c if (!q->backing_dev_info) q 500 block/blk-core.c q->stats = blk_alloc_queue_stats(); q 501 block/blk-core.c if (!q->stats) q 504 block/blk-core.c q->backing_dev_info->ra_pages = VM_READAHEAD_PAGES; q 505 block/blk-core.c q->backing_dev_info->capabilities = BDI_CAP_CGROUP_WRITEBACK; q 506 block/blk-core.c q->backing_dev_info->name = "block"; q 507 block/blk-core.c q->node = node_id; q 509 block/blk-core.c timer_setup(&q->backing_dev_info->laptop_mode_wb_timer, q 511 block/blk-core.c timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); q 512 block/blk-core.c INIT_WORK(&q->timeout_work, blk_timeout_work); q 513 block/blk-core.c INIT_LIST_HEAD(&q->icq_list); q 515 block/blk-core.c INIT_LIST_HEAD(&q->blkg_list); q 518 block/blk-core.c kobject_init(&q->kobj, &blk_queue_ktype); q 521 block/blk-core.c mutex_init(&q->blk_trace_mutex); q 523 block/blk-core.c mutex_init(&q->sysfs_lock); q 524 block/blk-core.c mutex_init(&q->sysfs_dir_lock); q 525 block/blk-core.c spin_lock_init(&q->queue_lock); q 527 block/blk-core.c init_waitqueue_head(&q->mq_freeze_wq); q 528 block/blk-core.c mutex_init(&q->mq_freeze_lock); q 534 block/blk-core.c if (percpu_ref_init(&q->q_usage_counter, q 539 block/blk-core.c if (blkcg_init_queue(q)) q 542 block/blk-core.c return q; q 545 block/blk-core.c percpu_ref_exit(&q->q_usage_counter); q 547 block/blk-core.c blk_free_queue_stats(q->stats); q 549 block/blk-core.c bdi_put(q->backing_dev_info); q 551 block/blk-core.c bioset_exit(&q->bio_split); q 553 block/blk-core.c ida_simple_remove(&blk_queue_ida, q->id); q 555 block/blk-core.c kmem_cache_free(blk_requestq_cachep, q); q 560 block/blk-core.c bool blk_get_queue(struct request_queue *q) q 562 block/blk-core.c if (likely(!blk_queue_dying(q))) { q 563 block/blk-core.c __blk_get_queue(q); q 577 block/blk-core.c struct request *blk_get_request(struct request_queue *q, unsigned int op, q 585 block/blk-core.c req = blk_mq_alloc_request(q, op, flags); q 586 block/blk-core.c if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn) q 587 block/blk-core.c q->mq_ops->initialize_rq_fn(req); q 607 block/blk-core.c trace_block_bio_backmerge(req->q, req, bio); q 608 block/blk-core.c rq_qos_merge(req->q, req, bio); q 629 block/blk-core.c trace_block_bio_frontmerge(req->q, req, bio); q 630 block/blk-core.c rq_qos_merge(req->q, req, bio); q 645 block/blk-core.c bool bio_attempt_discard_merge(struct request_queue *q, struct request *req, q 650 block/blk-core.c if (segments >= queue_max_discard_segments(q)) q 656 block/blk-core.c rq_qos_merge(q, req, bio); q 666 block/blk-core.c req_set_nomerge(q, req); q 692 block/blk-core.c bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, q 699 block/blk-core.c plug = blk_mq_plug(q, bio); q 708 block/blk-core.c if (rq->q == q && same_queue_rq) { q 717 block/blk-core.c if (rq->q != q || !blk_rq_merge_ok(rq, bio)) q 728 block/blk-core.c merged = bio_attempt_discard_merge(q, rq, bio); q 872 block/blk-core.c struct request_queue *q; q 879 block/blk-core.c q = bio->bi_disk->queue; q 880 block/blk-core.c if (unlikely(!q)) { q 892 block/blk-core.c if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) q 914 block/blk-core.c !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { q 922 block/blk-core.c if (!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) q 927 block/blk-core.c if (!blk_queue_discard(q)) q 931 block/blk-core.c if (!blk_queue_secure_erase(q)) q 935 block/blk-core.c if (!q->limits.max_write_same_sectors) q 939 block/blk-core.c if (!blk_queue_is_zoned(q)) q 943 block/blk-core.c if (!blk_queue_is_zoned(q) || !blk_queue_zone_resetall(q)) q 947 block/blk-core.c if (!q->limits.max_write_zeroes_sectors) q 960 block/blk-core.c create_io_context(GFP_ATOMIC, q->node); q 962 block/blk-core.c if (!blkcg_bio_issue_check(q, bio)) q 966 block/blk-core.c trace_block_bio_queue(q, bio); q 1054 block/blk-core.c struct request_queue *q = bio->bi_disk->queue; q 1058 block/blk-core.c if (likely(blk_queue_enter(q, flags) == 0)) { q 1064 block/blk-core.c ret = q->make_request_fn(q, bio); q 1066 block/blk-core.c blk_queue_exit(q); q 1074 block/blk-core.c if (q == bio->bi_disk->queue) q 1083 block/blk-core.c if (unlikely(!blk_queue_dying(q) && q 1110 block/blk-core.c struct request_queue *q = bio->bi_disk->queue; q 1117 block/blk-core.c if (unlikely(blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0))) { q 1118 block/blk-core.c if (nowait && !blk_queue_dying(q)) q 1126 block/blk-core.c ret = q->make_request_fn(q, bio); q 1127 block/blk-core.c blk_queue_exit(q); q 1216 block/blk-core.c static int blk_cloned_rq_check_limits(struct request_queue *q, q 1219 block/blk-core.c if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, req_op(rq))) { q 1222 block/blk-core.c blk_queue_get_max_sectors(q, req_op(rq))); q 1233 block/blk-core.c if (rq->nr_phys_segments > queue_max_segments(q)) { q 1235 block/blk-core.c __func__, rq->nr_phys_segments, queue_max_segments(q)); q 1247 block/blk-core.c blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq) q 1249 block/blk-core.c if (blk_cloned_rq_check_limits(q, rq)) q 1256 block/blk-core.c if (blk_queue_io_stat(q)) q 1340 block/blk-core.c part_dec_in_flight(req->q, part, rq_data_dir(req)); q 1374 block/blk-core.c part_inc_in_flight(rq->q, part, rw); q 1443 block/blk-core.c req->q->integrity.profile->complete_fn(req, nr_bytes); q 1552 block/blk-core.c int blk_lld_busy(struct request_queue *q) q 1554 block/blk-core.c if (queue_is_mq(q) && q->mq_ops->busy) q 1555 block/blk-core.c return q->mq_ops->busy(q); q 48 block/blk-exec.c void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, q 77 block/blk-exec.c void blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk, q 84 block/blk-exec.c blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq); q 96 block/blk-flush.c static void blk_kick_flush(struct request_queue *q, q 160 block/blk-flush.c struct request_queue *q = rq->q; q 204 block/blk-flush.c blk_kick_flush(q, fq, cmd_flags); q 209 block/blk-flush.c struct request_queue *q = flush_rq->q; q 213 block/blk-flush.c struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx); q 229 block/blk-flush.c if (!q->elevator) { q 268 block/blk-flush.c static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq, q 286 block/blk-flush.c if (!list_empty(&fq->flush_data_in_flight) && q->elevator && q 297 block/blk-flush.c blk_rq_init(q, flush_rq); q 310 block/blk-flush.c if (!q->elevator) { q 329 block/blk-flush.c struct request_queue *q = rq->q; q 333 block/blk-flush.c struct blk_flush_queue *fq = blk_get_flush_queue(q, ctx); q 335 block/blk-flush.c if (q->elevator) { q 362 block/blk-flush.c struct request_queue *q = rq->q; q 363 block/blk-flush.c unsigned long fflags = q->queue_flags; /* may change, cache */ q 365 block/blk-flush.c struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx); q 436 block/blk-flush.c struct request_queue *q; q 443 block/blk-flush.c q = bdev_get_queue(bdev); q 444 block/blk-flush.c if (!q) q 453 block/blk-flush.c if (!q->make_request_fn) q 475 block/blk-flush.c struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, q 27 block/blk-integrity.c int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio) q 38 block/blk-integrity.c if (!biovec_phys_mergeable(q, &ivprv, &iv)) q 40 block/blk-integrity.c if (seg_size + iv.bv_len > queue_max_segment_size(q)) q 68 block/blk-integrity.c int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio, q 80 block/blk-integrity.c if (!biovec_phys_mergeable(q, &ivprv, &iv)) q 82 block/blk-integrity.c if (sg->length + iv.bv_len > queue_max_segment_size(q)) q 164 block/blk-integrity.c bool blk_integrity_merge_rq(struct request_queue *q, struct request *req, q 178 block/blk-integrity.c q->limits.max_integrity_segments) q 188 block/blk-integrity.c bool blk_integrity_merge_bio(struct request_queue *q, struct request *req, q 204 block/blk-integrity.c nr_integrity_segs = blk_rq_count_integrity_sg(q, bio); q 208 block/blk-integrity.c q->limits.max_integrity_segments) q 45 block/blk-ioc.c struct elevator_type *et = icq->q->elevator->type; q 63 block/blk-ioc.c struct request_queue *q = icq->q; q 64 block/blk-ioc.c struct elevator_type *et = q->elevator->type; q 68 block/blk-ioc.c radix_tree_delete(&ioc->icq_tree, icq->q->id); q 112 block/blk-ioc.c struct request_queue *q = icq->q; q 114 block/blk-ioc.c if (spin_trylock(&q->queue_lock)) { q 116 block/blk-ioc.c spin_unlock(&q->queue_lock); q 239 block/blk-ioc.c void ioc_clear_queue(struct request_queue *q) q 243 block/blk-ioc.c spin_lock_irq(&q->queue_lock); q 244 block/blk-ioc.c list_splice_init(&q->icq_list, &icq_list); q 245 block/blk-ioc.c spin_unlock_irq(&q->queue_lock); q 332 block/blk-ioc.c struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q) q 336 block/blk-ioc.c lockdep_assert_held(&q->queue_lock); q 346 block/blk-ioc.c if (icq && icq->q == q) q 349 block/blk-ioc.c icq = radix_tree_lookup(&ioc->icq_tree, q->id); q 350 block/blk-ioc.c if (icq && icq->q == q) q 372 block/blk-ioc.c struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, q 375 block/blk-ioc.c struct elevator_type *et = q->elevator->type; q 380 block/blk-ioc.c q->node); q 390 block/blk-ioc.c icq->q = q; q 395 block/blk-ioc.c spin_lock_irq(&q->queue_lock); q 398 block/blk-ioc.c if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) { q 400 block/blk-ioc.c list_add(&icq->q_node, &q->icq_list); q 405 block/blk-ioc.c icq = ioc_lookup_icq(ioc, q); q 411 block/blk-ioc.c spin_unlock_irq(&q->queue_lock); q 616 block/blk-iocost.c static struct ioc *q_to_ioc(struct request_queue *q) q 618 block/blk-iocost.c return rqos_to_ioc(rq_qos_id(q, RQ_QOS_COST)); q 621 block/blk-iocost.c static const char *q_name(struct request_queue *q) q 623 block/blk-iocost.c if (test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags)) q 624 block/blk-iocost.c return kobject_name(q->kobj.parent); q 631 block/blk-iocost.c return q_name(ioc->rqos.q); q 727 block/blk-iocost.c if (!blk_queue_nonrot(ioc->rqos.q)) q 731 block/blk-iocost.c if (blk_queue_depth(ioc->rqos.q) == 1) q 1769 block/blk-iocost.c blkcg_schedule_throttle(rqos->q, q 1917 block/blk-iocost.c blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost); q 1937 block/blk-iocost.c static int blk_iocost_init(struct request_queue *q) q 1956 block/blk-iocost.c rqos->q = q; q 1974 block/blk-iocost.c rq_qos_add(q, rqos); q 1975 block/blk-iocost.c ret = blkcg_activate_policy(q, &blkcg_policy_iocost); q 1977 block/blk-iocost.c rq_qos_del(q, rqos); q 2002 block/blk-iocost.c static struct blkg_policy_data *ioc_pd_alloc(gfp_t gfp, struct request_queue *q, q 2009 block/blk-iocost.c gfp, q->node); q 2020 block/blk-iocost.c struct ioc *ioc = q_to_ioc(blkg->q); q 2303 block/blk-iocost.c blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q); q 2306 block/blk-iocost.c blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q); q 289 block/blk-iolatency.c blkcg_schedule_throttle(rqos->q, use_memdelay); q 327 block/blk-iolatency.c unsigned long qd = blkiolat->rqos.q->nr_requests; q 367 block/blk-iolatency.c unsigned long qd = iolat->blkiolat->rqos.q->nr_requests; q 646 block/blk-iolatency.c blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency); q 665 block/blk-iolatency.c blkiolat->rqos.q->root_blkg) { q 717 block/blk-iolatency.c int blk_iolatency_init(struct request_queue *q) q 730 block/blk-iolatency.c rqos->q = q; q 732 block/blk-iolatency.c rq_qos_add(q, rqos); q 734 block/blk-iolatency.c ret = blkcg_activate_policy(q, &blkcg_policy_iolatency); q 736 block/blk-iolatency.c rq_qos_del(q, rqos); q 835 block/blk-iolatency.c WARN_ON_ONCE(!blk_get_queue(blkg->q)); q 850 block/blk-iolatency.c blk_mq_freeze_queue(blkg->q); q 859 block/blk-iolatency.c blk_mq_unfreeze_queue(blkg->q); q 862 block/blk-iolatency.c blk_put_queue(blkg->q); q 938 block/blk-iolatency.c struct request_queue *q, q 943 block/blk-iolatency.c iolat = kzalloc_node(sizeof(*iolat), gfp, q->node); q 959 block/blk-iolatency.c struct rq_qos *rqos = blkcg_rq_qos(blkg->q); q 964 block/blk-iolatency.c if (blk_queue_nonrot(blkg->q)) q 978 block/blk-iolatency.c iolat->rq_depth.queue_depth = blkg->q->nr_requests; q 29 block/blk-lib.c struct request_queue *q = bdev_get_queue(bdev); q 34 block/blk-lib.c if (!q) q 41 block/blk-lib.c if (!blk_queue_secure_erase(q)) q 45 block/blk-lib.c if (!blk_queue_discard(q)) q 59 block/blk-lib.c bio_allowed_max_sectors(q)); q 135 block/blk-lib.c struct request_queue *q = bdev_get_queue(bdev); q 140 block/blk-lib.c if (!q) q 154 block/blk-lib.c max_write_same_sectors = bio_allowed_max_sectors(q); q 218 block/blk-lib.c struct request_queue *q = bdev_get_queue(bdev); q 220 block/blk-lib.c if (!q) q 272 block/blk-lib.c struct request_queue *q = bdev_get_queue(bdev); q 277 block/blk-lib.c if (!q) q 25 block/blk-map.c blk_queue_bounce(rq->q, bio); q 68 block/blk-map.c struct request_queue *q = rq->q; q 73 block/blk-map.c bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); q 75 block/blk-map.c bio = bio_map_user_iov(q, iter, gfp_mask); q 120 block/blk-map.c int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, q 125 block/blk-map.c unsigned long align = q->dma_pad_mask | queue_dma_alignment(q); q 137 block/blk-map.c else if (queue_virt_boundary(q)) q 138 block/blk-map.c copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter); q 161 block/blk-map.c int blk_rq_map_user(struct request_queue *q, struct request *rq, q 172 block/blk-map.c return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); q 221 block/blk-map.c int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, q 230 block/blk-map.c if (len > (queue_max_hw_sectors(q) << 9)) q 235 block/blk-map.c do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf); q 237 block/blk-map.c bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); q 239 block/blk-map.c bio = bio_map_kern(q, kbuf, len, gfp_mask); q 15 block/blk-merge.c static inline bool bio_will_gap(struct request_queue *q, q 20 block/blk-merge.c if (!bio_has_data(prev) || !queue_virt_boundary(q)) q 32 block/blk-merge.c if (pb.bv_offset & queue_virt_boundary(q)) q 46 block/blk-merge.c if (biovec_phys_mergeable(q, &pb, &nb)) q 48 block/blk-merge.c return __bvec_gap_to_prev(q, &pb, nb.bv_offset); q 53 block/blk-merge.c return bio_will_gap(req->q, req, req->biotail, bio); q 58 block/blk-merge.c return bio_will_gap(req->q, NULL, bio, req->bio); q 61 block/blk-merge.c static struct bio *blk_bio_discard_split(struct request_queue *q, q 74 block/blk-merge.c granularity = max(q->limits.discard_granularity >> 9, 1U); q 76 block/blk-merge.c max_discard_sectors = min(q->limits.max_discard_sectors, q 77 block/blk-merge.c bio_allowed_max_sectors(q)); q 94 block/blk-merge.c alignment = (q->limits.discard_alignment >> 9) % granularity; q 105 block/blk-merge.c static struct bio *blk_bio_write_zeroes_split(struct request_queue *q, q 110 block/blk-merge.c if (!q->limits.max_write_zeroes_sectors) q 113 block/blk-merge.c if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors) q 116 block/blk-merge.c return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs); q 119 block/blk-merge.c static struct bio *blk_bio_write_same_split(struct request_queue *q, q 126 block/blk-merge.c if (!q->limits.max_write_same_sectors) q 129 block/blk-merge.c if (bio_sectors(bio) <= q->limits.max_write_same_sectors) q 132 block/blk-merge.c return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); q 143 block/blk-merge.c static inline unsigned get_max_io_size(struct request_queue *q, q 146 block/blk-merge.c unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector); q 148 block/blk-merge.c unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT; q 149 block/blk-merge.c unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT; q 160 block/blk-merge.c static unsigned get_max_segment_size(const struct request_queue *q, q 163 block/blk-merge.c unsigned long mask = queue_segment_boundary(q); q 167 block/blk-merge.c return queue_max_segment_size(q); q 170 block/blk-merge.c queue_max_segment_size(q)); q 193 block/blk-merge.c static bool bvec_split_segs(const struct request_queue *q, q 204 block/blk-merge.c seg_size = get_max_segment_size(q, bv->bv_offset + total_len); q 211 block/blk-merge.c if ((bv->bv_offset + total_len) & queue_virt_boundary(q)) q 240 block/blk-merge.c static struct bio *blk_bio_segment_split(struct request_queue *q, q 248 block/blk-merge.c const unsigned max_sectors = get_max_io_size(q, bio); q 249 block/blk-merge.c const unsigned max_segs = queue_max_segments(q); q 256 block/blk-merge.c if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset)) q 264 block/blk-merge.c } else if (bvec_split_segs(q, &bv, &nsegs, §ors, max_segs, q 293 block/blk-merge.c void __blk_queue_split(struct request_queue *q, struct bio **bio, q 301 block/blk-merge.c split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs); q 304 block/blk-merge.c split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split, q 308 block/blk-merge.c split = blk_bio_write_same_split(q, *bio, &q->bio_split, q 312 block/blk-merge.c split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs); q 331 block/blk-merge.c trace_block_split(q, split, (*bio)->bi_iter.bi_sector); q 348 block/blk-merge.c void blk_queue_split(struct request_queue *q, struct bio **bio) q 352 block/blk-merge.c __blk_queue_split(q, bio, &nr_segs); q 376 block/blk-merge.c bvec_split_segs(rq->q, &bv, &nr_phys_segs, &nr_sectors, q 398 block/blk-merge.c static unsigned blk_bvec_map_sg(struct request_queue *q, q 407 block/blk-merge.c unsigned len = min(get_max_segment_size(q, offset), nbytes); q 442 block/blk-merge.c __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec, q 451 block/blk-merge.c if ((*sg)->length + nbytes > queue_max_segment_size(q)) q 454 block/blk-merge.c if (!biovec_phys_mergeable(q, bvprv, bvec)) q 462 block/blk-merge.c static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio, q 479 block/blk-merge.c __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg)) q 485 block/blk-merge.c nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg); q 502 block/blk-merge.c int blk_rq_map_sg(struct request_queue *q, struct request *rq, q 513 block/blk-merge.c nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); q 516 block/blk-merge.c (blk_rq_bytes(rq) & q->dma_pad_mask)) { q 518 block/blk-merge.c (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; q 524 block/blk-merge.c if (q->dma_drain_size && q->dma_drain_needed(rq)) { q 526 block/blk-merge.c memset(q->dma_drain_buffer, 0, q->dma_drain_size); q 530 block/blk-merge.c sg_set_page(sg, virt_to_page(q->dma_drain_buffer), q 531 block/blk-merge.c q->dma_drain_size, q 532 block/blk-merge.c ((unsigned long)q->dma_drain_buffer) & q 535 block/blk-merge.c rq->extra_len += q->dma_drain_size; q 554 block/blk-merge.c if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(req->q)) q 557 block/blk-merge.c if (blk_integrity_merge_bio(req->q, req, bio) == false) q 568 block/blk-merge.c req_set_nomerge(req->q, req); q 581 block/blk-merge.c req_set_nomerge(req->q, req); q 597 block/blk-merge.c req_set_nomerge(req->q, req); q 604 block/blk-merge.c static bool req_attempt_discard_merge(struct request_queue *q, struct request *req, q 609 block/blk-merge.c if (segments >= queue_max_discard_segments(q)) q 618 block/blk-merge.c req_set_nomerge(q, req); q 622 block/blk-merge.c static int ll_merge_requests_fn(struct request_queue *q, struct request *req, q 638 block/blk-merge.c if (total_phys_segments > queue_max_segments(q)) q 641 block/blk-merge.c if (blk_integrity_merge_rq(q, req, next) == false) q 687 block/blk-merge.c part_dec_in_flight(req->q, part, rq_data_dir(req)); q 704 block/blk-merge.c queue_max_discard_segments(req->q) > 1) q 724 block/blk-merge.c static struct request *attempt_merge(struct request_queue *q, q 761 block/blk-merge.c if (!req_attempt_discard_merge(q, req, next)) q 765 block/blk-merge.c if (!ll_merge_requests_fn(q, req, next)) q 799 block/blk-merge.c elv_merge_requests(q, req, next); q 814 block/blk-merge.c struct request *attempt_back_merge(struct request_queue *q, struct request *rq) q 816 block/blk-merge.c struct request *next = elv_latter_request(q, rq); q 819 block/blk-merge.c return attempt_merge(q, rq, next); q 824 block/blk-merge.c struct request *attempt_front_merge(struct request_queue *q, struct request *rq) q 826 block/blk-merge.c struct request *prev = elv_former_request(q, rq); q 829 block/blk-merge.c return attempt_merge(q, prev, rq); q 834 block/blk-merge.c int blk_attempt_req_merge(struct request_queue *q, struct request *rq, q 839 block/blk-merge.c free = attempt_merge(q, rq, next); q 865 block/blk-merge.c if (blk_integrity_merge_bio(rq->q, rq, bio) == false) q 19 block/blk-mq-cpumap.c unsigned int nr_queues, const int q) q 21 block/blk-mq-cpumap.c return qmap->queue_offset + (q % nr_queues); q 39 block/blk-mq-cpumap.c unsigned int cpu, first_sibling, q = 0; q 49 block/blk-mq-cpumap.c if (q >= nr_queues) q 51 block/blk-mq-cpumap.c map[cpu] = queue_index(qmap, nr_queues, q++); q 63 block/blk-mq-cpumap.c if (q < nr_queues) { q 64 block/blk-mq-cpumap.c map[cpu] = queue_index(qmap, nr_queues, q++); q 68 block/blk-mq-cpumap.c map[cpu] = queue_index(qmap, nr_queues, q++); q 11 block/blk-mq-debugfs-zoned.c struct request_queue *q = data; q 14 block/blk-mq-debugfs-zoned.c if (!q->seq_zones_wlock) q 17 block/blk-mq-debugfs-zoned.c for (i = 0; i < q->nr_zones; i++) q 18 block/blk-mq-debugfs-zoned.c if (test_bit(i, q->seq_zones_wlock)) q 29 block/blk-mq-debugfs.c struct request_queue *q = data; q 34 block/blk-mq-debugfs.c print_stat(m, &q->poll_stat[2 * bucket]); q 38 block/blk-mq-debugfs.c print_stat(m, &q->poll_stat[2 * bucket + 1]); q 45 block/blk-mq-debugfs.c __acquires(&q->requeue_lock) q 47 block/blk-mq-debugfs.c struct request_queue *q = m->private; q 49 block/blk-mq-debugfs.c spin_lock_irq(&q->requeue_lock); q 50 block/blk-mq-debugfs.c return seq_list_start(&q->requeue_list, *pos); q 55 block/blk-mq-debugfs.c struct request_queue *q = m->private; q 57 block/blk-mq-debugfs.c return seq_list_next(v, &q->requeue_list, pos); q 61 block/blk-mq-debugfs.c __releases(&q->requeue_lock) q 63 block/blk-mq-debugfs.c struct request_queue *q = m->private; q 65 block/blk-mq-debugfs.c spin_unlock_irq(&q->requeue_lock); q 97 block/blk-mq-debugfs.c struct request_queue *q = data; q 99 block/blk-mq-debugfs.c seq_printf(m, "%d\n", atomic_read(&q->pm_only)); q 133 block/blk-mq-debugfs.c struct request_queue *q = data; q 135 block/blk-mq-debugfs.c blk_flags_show(m, q->queue_flags, blk_queue_flag_name, q 144 block/blk-mq-debugfs.c struct request_queue *q = data; q 152 block/blk-mq-debugfs.c if (blk_queue_dead(q)) q 164 block/blk-mq-debugfs.c blk_mq_run_hw_queues(q, true); q 166 block/blk-mq-debugfs.c blk_mq_start_stopped_hw_queues(q, true); q 168 block/blk-mq-debugfs.c blk_mq_kick_requeue_list(q); q 180 block/blk-mq-debugfs.c struct request_queue *q = data; q 184 block/blk-mq-debugfs.c seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]); q 192 block/blk-mq-debugfs.c struct request_queue *q = data; q 196 block/blk-mq-debugfs.c q->write_hints[i] = 0; q 326 block/blk-mq-debugfs.c const struct blk_mq_ops *const mq_ops = rq->q->mq_ops; q 463 block/blk-mq-debugfs.c struct request_queue *q = hctx->queue; q 466 block/blk-mq-debugfs.c res = mutex_lock_interruptible(&q->sysfs_lock); q 471 block/blk-mq-debugfs.c mutex_unlock(&q->sysfs_lock); q 480 block/blk-mq-debugfs.c struct request_queue *q = hctx->queue; q 483 block/blk-mq-debugfs.c res = mutex_lock_interruptible(&q->sysfs_lock); q 488 block/blk-mq-debugfs.c mutex_unlock(&q->sysfs_lock); q 497 block/blk-mq-debugfs.c struct request_queue *q = hctx->queue; q 500 block/blk-mq-debugfs.c res = mutex_lock_interruptible(&q->sysfs_lock); q 505 block/blk-mq-debugfs.c mutex_unlock(&q->sysfs_lock); q 514 block/blk-mq-debugfs.c struct request_queue *q = hctx->queue; q 517 block/blk-mq-debugfs.c res = mutex_lock_interruptible(&q->sysfs_lock); q 522 block/blk-mq-debugfs.c mutex_unlock(&q->sysfs_lock); q 821 block/blk-mq-debugfs.c void blk_mq_debugfs_register(struct request_queue *q) q 826 block/blk-mq-debugfs.c q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent), q 829 block/blk-mq-debugfs.c debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs); q 836 block/blk-mq-debugfs.c if (q->elevator && !q->sched_debugfs_dir) q 837 block/blk-mq-debugfs.c blk_mq_debugfs_register_sched(q); q 840 block/blk-mq-debugfs.c queue_for_each_hw_ctx(q, hctx, i) { q 842 block/blk-mq-debugfs.c blk_mq_debugfs_register_hctx(q, hctx); q 843 block/blk-mq-debugfs.c if (q->elevator && !hctx->sched_debugfs_dir) q 844 block/blk-mq-debugfs.c blk_mq_debugfs_register_sched_hctx(q, hctx); q 847 block/blk-mq-debugfs.c if (q->rq_qos) { q 848 block/blk-mq-debugfs.c struct rq_qos *rqos = q->rq_qos; q 857 block/blk-mq-debugfs.c void blk_mq_debugfs_unregister(struct request_queue *q) q 859 block/blk-mq-debugfs.c debugfs_remove_recursive(q->debugfs_dir); q 860 block/blk-mq-debugfs.c q->sched_debugfs_dir = NULL; q 861 block/blk-mq-debugfs.c q->debugfs_dir = NULL; q 876 block/blk-mq-debugfs.c void blk_mq_debugfs_register_hctx(struct request_queue *q, q 884 block/blk-mq-debugfs.c hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir); q 899 block/blk-mq-debugfs.c void blk_mq_debugfs_register_hctxs(struct request_queue *q) q 904 block/blk-mq-debugfs.c queue_for_each_hw_ctx(q, hctx, i) q 905 block/blk-mq-debugfs.c blk_mq_debugfs_register_hctx(q, hctx); q 908 block/blk-mq-debugfs.c void blk_mq_debugfs_unregister_hctxs(struct request_queue *q) q 913 block/blk-mq-debugfs.c queue_for_each_hw_ctx(q, hctx, i) q 917 block/blk-mq-debugfs.c void blk_mq_debugfs_register_sched(struct request_queue *q) q 919 block/blk-mq-debugfs.c struct elevator_type *e = q->elevator->type; q 925 block/blk-mq-debugfs.c if (!q->debugfs_dir) q 931 block/blk-mq-debugfs.c q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir); q 933 block/blk-mq-debugfs.c debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs); q 936 block/blk-mq-debugfs.c void blk_mq_debugfs_unregister_sched(struct request_queue *q) q 938 block/blk-mq-debugfs.c debugfs_remove_recursive(q->sched_debugfs_dir); q 939 block/blk-mq-debugfs.c q->sched_debugfs_dir = NULL; q 950 block/blk-mq-debugfs.c struct request_queue *q = rqos->q; q 956 block/blk-mq-debugfs.c if (!q->rqos_debugfs_dir) q 957 block/blk-mq-debugfs.c q->rqos_debugfs_dir = debugfs_create_dir("rqos", q 958 block/blk-mq-debugfs.c q->debugfs_dir); q 961 block/blk-mq-debugfs.c rqos->q->rqos_debugfs_dir); q 966 block/blk-mq-debugfs.c void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q) q 968 block/blk-mq-debugfs.c debugfs_remove_recursive(q->rqos_debugfs_dir); q 969 block/blk-mq-debugfs.c q->rqos_debugfs_dir = NULL; q 972 block/blk-mq-debugfs.c void blk_mq_debugfs_register_sched_hctx(struct request_queue *q, q 975 block/blk-mq-debugfs.c struct elevator_type *e = q->elevator->type; q 21 block/blk-mq-debugfs.h void blk_mq_debugfs_register(struct request_queue *q); q 22 block/blk-mq-debugfs.h void blk_mq_debugfs_unregister(struct request_queue *q); q 23 block/blk-mq-debugfs.h void blk_mq_debugfs_register_hctx(struct request_queue *q, q 26 block/blk-mq-debugfs.h void blk_mq_debugfs_register_hctxs(struct request_queue *q); q 27 block/blk-mq-debugfs.h void blk_mq_debugfs_unregister_hctxs(struct request_queue *q); q 29 block/blk-mq-debugfs.h void blk_mq_debugfs_register_sched(struct request_queue *q); q 30 block/blk-mq-debugfs.h void blk_mq_debugfs_unregister_sched(struct request_queue *q); q 31 block/blk-mq-debugfs.h void blk_mq_debugfs_register_sched_hctx(struct request_queue *q, q 37 block/blk-mq-debugfs.h void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q); q 39 block/blk-mq-debugfs.h static inline void blk_mq_debugfs_register(struct request_queue *q) q 43 block/blk-mq-debugfs.h static inline void blk_mq_debugfs_unregister(struct request_queue *q) q 47 block/blk-mq-debugfs.h static inline void blk_mq_debugfs_register_hctx(struct request_queue *q, q 56 block/blk-mq-debugfs.h static inline void blk_mq_debugfs_register_hctxs(struct request_queue *q) q 60 block/blk-mq-debugfs.h static inline void blk_mq_debugfs_unregister_hctxs(struct request_queue *q) q 64 block/blk-mq-debugfs.h static inline void blk_mq_debugfs_register_sched(struct request_queue *q) q 68 block/blk-mq-debugfs.h static inline void blk_mq_debugfs_unregister_sched(struct request_queue *q) q 72 block/blk-mq-debugfs.h static inline void blk_mq_debugfs_register_sched_hctx(struct request_queue *q, q 89 block/blk-mq-debugfs.h static inline void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q) q 20 block/blk-mq-sched.c void blk_mq_sched_free_hctx_data(struct request_queue *q, q 26 block/blk-mq-sched.c queue_for_each_hw_ctx(q, hctx, i) { q 37 block/blk-mq-sched.c struct request_queue *q = rq->q; q 48 block/blk-mq-sched.c spin_lock_irq(&q->queue_lock); q 49 block/blk-mq-sched.c icq = ioc_lookup_icq(ioc, q); q 50 block/blk-mq-sched.c spin_unlock_irq(&q->queue_lock); q 53 block/blk-mq-sched.c icq = ioc_create_icq(ioc, q, GFP_ATOMIC); q 90 block/blk-mq-sched.c struct request_queue *q = hctx->queue; q 91 block/blk-mq-sched.c struct elevator_queue *e = q->elevator; q 115 block/blk-mq-sched.c } while (blk_mq_dispatch_rq_list(q, &rq_list, true)); q 136 block/blk-mq-sched.c struct request_queue *q = hctx->queue; q 165 block/blk-mq-sched.c } while (blk_mq_dispatch_rq_list(q, &rq_list, true)); q 172 block/blk-mq-sched.c struct request_queue *q = hctx->queue; q 173 block/blk-mq-sched.c struct elevator_queue *e = q->elevator; q 178 block/blk-mq-sched.c if (unlikely(blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q))) q 209 block/blk-mq-sched.c if (blk_mq_dispatch_rq_list(q, &rq_list, false)) { q 222 block/blk-mq-sched.c blk_mq_dispatch_rq_list(q, &rq_list, false); q 226 block/blk-mq-sched.c bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, q 231 block/blk-mq-sched.c switch (elv_merge(q, &rq, bio)) { q 233 block/blk-mq-sched.c if (!blk_mq_sched_allow_merge(q, rq, bio)) q 237 block/blk-mq-sched.c *merged_request = attempt_back_merge(q, rq); q 239 block/blk-mq-sched.c elv_merged_request(q, rq, ELEVATOR_BACK_MERGE); q 242 block/blk-mq-sched.c if (!blk_mq_sched_allow_merge(q, rq, bio)) q 246 block/blk-mq-sched.c *merged_request = attempt_front_merge(q, rq); q 248 block/blk-mq-sched.c elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE); q 251 block/blk-mq-sched.c return bio_attempt_discard_merge(q, rq, bio); q 262 block/blk-mq-sched.c bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, q 279 block/blk-mq-sched.c if (blk_mq_sched_allow_merge(q, rq, bio)) q 284 block/blk-mq-sched.c if (blk_mq_sched_allow_merge(q, rq, bio)) q 289 block/blk-mq-sched.c merged = bio_attempt_discard_merge(q, rq, bio); q 307 block/blk-mq-sched.c static bool blk_mq_attempt_merge(struct request_queue *q, q 316 block/blk-mq-sched.c if (blk_mq_bio_list_merge(q, &ctx->rq_lists[type], bio, nr_segs)) { q 324 block/blk-mq-sched.c bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, q 327 block/blk-mq-sched.c struct elevator_queue *e = q->elevator; q 328 block/blk-mq-sched.c struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); q 329 block/blk-mq-sched.c struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); q 341 block/blk-mq-sched.c ret = blk_mq_attempt_merge(q, hctx, ctx, bio, nr_segs); q 348 block/blk-mq-sched.c bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq) q 350 block/blk-mq-sched.c return rq_mergeable(rq) && elv_attempt_insert_merge(q, rq); q 356 block/blk-mq-sched.c trace_block_rq_insert(rq->q, rq); q 387 block/blk-mq-sched.c struct request_queue *q = rq->q; q 388 block/blk-mq-sched.c struct elevator_queue *e = q->elevator; q 448 block/blk-mq-sched.c struct request_queue *q = hctx->queue; q 455 block/blk-mq-sched.c percpu_ref_get(&q->q_usage_counter); q 476 block/blk-mq-sched.c percpu_ref_put(&q->q_usage_counter); q 490 block/blk-mq-sched.c static int blk_mq_sched_alloc_tags(struct request_queue *q, q 494 block/blk-mq-sched.c struct blk_mq_tag_set *set = q->tag_set; q 497 block/blk-mq-sched.c hctx->sched_tags = blk_mq_alloc_rq_map(set, hctx_idx, q->nr_requests, q 502 block/blk-mq-sched.c ret = blk_mq_alloc_rqs(set, hctx->sched_tags, hctx_idx, q->nr_requests); q 510 block/blk-mq-sched.c static void blk_mq_sched_tags_teardown(struct request_queue *q) q 515 block/blk-mq-sched.c queue_for_each_hw_ctx(q, hctx, i) { q 523 block/blk-mq-sched.c int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e) q 531 block/blk-mq-sched.c q->elevator = NULL; q 532 block/blk-mq-sched.c q->nr_requests = q->tag_set->queue_depth; q 541 block/blk-mq-sched.c q->nr_requests = 2 * min_t(unsigned int, q->tag_set->queue_depth, q 544 block/blk-mq-sched.c queue_for_each_hw_ctx(q, hctx, i) { q 545 block/blk-mq-sched.c ret = blk_mq_sched_alloc_tags(q, hctx, i); q 550 block/blk-mq-sched.c ret = e->ops.init_sched(q, e); q 554 block/blk-mq-sched.c blk_mq_debugfs_register_sched(q); q 556 block/blk-mq-sched.c queue_for_each_hw_ctx(q, hctx, i) { q 560 block/blk-mq-sched.c eq = q->elevator; q 561 block/blk-mq-sched.c blk_mq_sched_free_requests(q); q 562 block/blk-mq-sched.c blk_mq_exit_sched(q, eq); q 567 block/blk-mq-sched.c blk_mq_debugfs_register_sched_hctx(q, hctx); q 573 block/blk-mq-sched.c blk_mq_sched_free_requests(q); q 574 block/blk-mq-sched.c blk_mq_sched_tags_teardown(q); q 575 block/blk-mq-sched.c q->elevator = NULL; q 583 block/blk-mq-sched.c void blk_mq_sched_free_requests(struct request_queue *q) q 588 block/blk-mq-sched.c queue_for_each_hw_ctx(q, hctx, i) { q 590 block/blk-mq-sched.c blk_mq_free_rqs(q->tag_set, hctx->sched_tags, i); q 594 block/blk-mq-sched.c void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e) q 599 block/blk-mq-sched.c queue_for_each_hw_ctx(q, hctx, i) { q 606 block/blk-mq-sched.c blk_mq_debugfs_unregister_sched(q); q 609 block/blk-mq-sched.c blk_mq_sched_tags_teardown(q); q 610 block/blk-mq-sched.c q->elevator = NULL; q 8 block/blk-mq-sched.h void blk_mq_sched_free_hctx_data(struct request_queue *q, q 14 block/blk-mq-sched.h bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, q 16 block/blk-mq-sched.h bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, q 18 block/blk-mq-sched.h bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq); q 30 block/blk-mq-sched.h int blk_mq_init_sched(struct request_queue *q, struct elevator_type *e); q 31 block/blk-mq-sched.h void blk_mq_exit_sched(struct request_queue *q, struct elevator_queue *e); q 32 block/blk-mq-sched.h void blk_mq_sched_free_requests(struct request_queue *q); q 35 block/blk-mq-sched.h blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, q 38 block/blk-mq-sched.h if (blk_queue_nomerges(q) || !bio_mergeable(bio)) q 41 block/blk-mq-sched.h return __blk_mq_sched_bio_merge(q, bio, nr_segs); q 45 block/blk-mq-sched.h blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq, q 48 block/blk-mq-sched.h struct elevator_queue *e = q->elevator; q 51 block/blk-mq-sched.h return e->type->ops.allow_merge(q, rq, bio); q 58 block/blk-mq-sched.h struct elevator_queue *e = rq->q->elevator; q 66 block/blk-mq-sched.h struct request_queue *q = rq->q; q 67 block/blk-mq-sched.h struct elevator_queue *e = q->elevator; q 67 block/blk-mq-sysfs.c struct request_queue *q; q 72 block/blk-mq-sysfs.c q = ctx->queue; q 78 block/blk-mq-sysfs.c mutex_lock(&q->sysfs_lock); q 79 block/blk-mq-sysfs.c if (!blk_queue_dying(q)) q 81 block/blk-mq-sysfs.c mutex_unlock(&q->sysfs_lock); q 90 block/blk-mq-sysfs.c struct request_queue *q; q 95 block/blk-mq-sysfs.c q = ctx->queue; q 101 block/blk-mq-sysfs.c mutex_lock(&q->sysfs_lock); q 102 block/blk-mq-sysfs.c if (!blk_queue_dying(q)) q 104 block/blk-mq-sysfs.c mutex_unlock(&q->sysfs_lock); q 113 block/blk-mq-sysfs.c struct request_queue *q; q 118 block/blk-mq-sysfs.c q = hctx->queue; q 124 block/blk-mq-sysfs.c mutex_lock(&q->sysfs_lock); q 125 block/blk-mq-sysfs.c if (!blk_queue_dying(q)) q 127 block/blk-mq-sysfs.c mutex_unlock(&q->sysfs_lock); q 137 block/blk-mq-sysfs.c struct request_queue *q; q 142 block/blk-mq-sysfs.c q = hctx->queue; q 148 block/blk-mq-sysfs.c mutex_lock(&q->sysfs_lock); q 149 block/blk-mq-sysfs.c if (!blk_queue_dying(q)) q 151 block/blk-mq-sysfs.c mutex_unlock(&q->sysfs_lock); q 253 block/blk-mq-sysfs.c struct request_queue *q = hctx->queue; q 260 block/blk-mq-sysfs.c ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num); q 273 block/blk-mq-sysfs.c void blk_mq_unregister_dev(struct device *dev, struct request_queue *q) q 278 block/blk-mq-sysfs.c lockdep_assert_held(&q->sysfs_dir_lock); q 280 block/blk-mq-sysfs.c queue_for_each_hw_ctx(q, hctx, i) q 283 block/blk-mq-sysfs.c kobject_uevent(q->mq_kobj, KOBJ_REMOVE); q 284 block/blk-mq-sysfs.c kobject_del(q->mq_kobj); q 287 block/blk-mq-sysfs.c q->mq_sysfs_init_done = false; q 295 block/blk-mq-sysfs.c void blk_mq_sysfs_deinit(struct request_queue *q) q 301 block/blk-mq-sysfs.c ctx = per_cpu_ptr(q->queue_ctx, cpu); q 304 block/blk-mq-sysfs.c kobject_put(q->mq_kobj); q 307 block/blk-mq-sysfs.c void blk_mq_sysfs_init(struct request_queue *q) q 312 block/blk-mq-sysfs.c kobject_init(q->mq_kobj, &blk_mq_ktype); q 315 block/blk-mq-sysfs.c ctx = per_cpu_ptr(q->queue_ctx, cpu); q 317 block/blk-mq-sysfs.c kobject_get(q->mq_kobj); q 322 block/blk-mq-sysfs.c int __blk_mq_register_dev(struct device *dev, struct request_queue *q) q 327 block/blk-mq-sysfs.c WARN_ON_ONCE(!q->kobj.parent); q 328 block/blk-mq-sysfs.c lockdep_assert_held(&q->sysfs_dir_lock); q 330 block/blk-mq-sysfs.c ret = kobject_add(q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); q 334 block/blk-mq-sysfs.c kobject_uevent(q->mq_kobj, KOBJ_ADD); q 336 block/blk-mq-sysfs.c queue_for_each_hw_ctx(q, hctx, i) { q 342 block/blk-mq-sysfs.c q->mq_sysfs_init_done = true; q 349 block/blk-mq-sysfs.c blk_mq_unregister_hctx(q->queue_hw_ctx[i]); q 351 block/blk-mq-sysfs.c kobject_uevent(q->mq_kobj, KOBJ_REMOVE); q 352 block/blk-mq-sysfs.c kobject_del(q->mq_kobj); q 357 block/blk-mq-sysfs.c void blk_mq_sysfs_unregister(struct request_queue *q) q 362 block/blk-mq-sysfs.c mutex_lock(&q->sysfs_dir_lock); q 363 block/blk-mq-sysfs.c if (!q->mq_sysfs_init_done) q 366 block/blk-mq-sysfs.c queue_for_each_hw_ctx(q, hctx, i) q 370 block/blk-mq-sysfs.c mutex_unlock(&q->sysfs_dir_lock); q 373 block/blk-mq-sysfs.c int blk_mq_sysfs_register(struct request_queue *q) q 378 block/blk-mq-sysfs.c mutex_lock(&q->sysfs_dir_lock); q 379 block/blk-mq-sysfs.c if (!q->mq_sysfs_init_done) q 382 block/blk-mq-sysfs.c queue_for_each_hw_ctx(q, hctx, i) { q 389 block/blk-mq-sysfs.c mutex_unlock(&q->sysfs_dir_lock); q 168 block/blk-mq-tag.c data->ctx = blk_mq_get_ctx(data->q); q 169 block/blk-mq-tag.c data->hctx = blk_mq_map_queue(data->q, data->cmd_flags, q 231 block/blk-mq-tag.c if (rq && rq->q == hctx->queue) q 403 block/blk-mq-tag.c void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, q 416 block/blk-mq-tag.c if (!percpu_ref_tryget(&q->q_usage_counter)) q 419 block/blk-mq-tag.c queue_for_each_hw_ctx(q, hctx, i) { q 433 block/blk-mq-tag.c blk_queue_exit(q); q 36 block/blk-mq-tag.h void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, q 43 block/blk-mq.c static void blk_mq_poll_stats_start(struct request_queue *q); q 114 block/blk-mq.c unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part) q 120 block/blk-mq.c blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); q 137 block/blk-mq.c void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part, q 143 block/blk-mq.c blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight_rw, &mi); q 146 block/blk-mq.c void blk_freeze_queue_start(struct request_queue *q) q 148 block/blk-mq.c mutex_lock(&q->mq_freeze_lock); q 149 block/blk-mq.c if (++q->mq_freeze_depth == 1) { q 150 block/blk-mq.c percpu_ref_kill(&q->q_usage_counter); q 151 block/blk-mq.c mutex_unlock(&q->mq_freeze_lock); q 152 block/blk-mq.c if (queue_is_mq(q)) q 153 block/blk-mq.c blk_mq_run_hw_queues(q, false); q 155 block/blk-mq.c mutex_unlock(&q->mq_freeze_lock); q 160 block/blk-mq.c void blk_mq_freeze_queue_wait(struct request_queue *q) q 162 block/blk-mq.c wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); q 166 block/blk-mq.c int blk_mq_freeze_queue_wait_timeout(struct request_queue *q, q 169 block/blk-mq.c return wait_event_timeout(q->mq_freeze_wq, q 170 block/blk-mq.c percpu_ref_is_zero(&q->q_usage_counter), q 179 block/blk-mq.c void blk_freeze_queue(struct request_queue *q) q 188 block/blk-mq.c blk_freeze_queue_start(q); q 189 block/blk-mq.c blk_mq_freeze_queue_wait(q); q 192 block/blk-mq.c void blk_mq_freeze_queue(struct request_queue *q) q 198 block/blk-mq.c blk_freeze_queue(q); q 202 block/blk-mq.c void blk_mq_unfreeze_queue(struct request_queue *q) q 204 block/blk-mq.c mutex_lock(&q->mq_freeze_lock); q 205 block/blk-mq.c q->mq_freeze_depth--; q 206 block/blk-mq.c WARN_ON_ONCE(q->mq_freeze_depth < 0); q 207 block/blk-mq.c if (!q->mq_freeze_depth) { q 208 block/blk-mq.c percpu_ref_resurrect(&q->q_usage_counter); q 209 block/blk-mq.c wake_up_all(&q->mq_freeze_wq); q 211 block/blk-mq.c mutex_unlock(&q->mq_freeze_lock); q 219 block/blk-mq.c void blk_mq_quiesce_queue_nowait(struct request_queue *q) q 221 block/blk-mq.c blk_queue_flag_set(QUEUE_FLAG_QUIESCED, q); q 234 block/blk-mq.c void blk_mq_quiesce_queue(struct request_queue *q) q 240 block/blk-mq.c blk_mq_quiesce_queue_nowait(q); q 242 block/blk-mq.c queue_for_each_hw_ctx(q, hctx, i) { q 260 block/blk-mq.c void blk_mq_unquiesce_queue(struct request_queue *q) q 262 block/blk-mq.c blk_queue_flag_clear(QUEUE_FLAG_QUIESCED, q); q 265 block/blk-mq.c blk_mq_run_hw_queues(q, true); q 269 block/blk-mq.c void blk_mq_wake_waiters(struct request_queue *q) q 274 block/blk-mq.c queue_for_each_hw_ctx(q, hctx, i) q 291 block/blk-mq.c return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS)) || rq->q->elevator; q 315 block/blk-mq.c rq->q = data->q; q 322 block/blk-mq.c if (blk_queue_io_stat(data->q)) q 356 block/blk-mq.c static struct request *blk_mq_get_request(struct request_queue *q, q 360 block/blk-mq.c struct elevator_queue *e = q->elevator; q 366 block/blk-mq.c blk_queue_enter_live(q); q 369 block/blk-mq.c if (blk_queue_rq_alloc_time(q)) q 372 block/blk-mq.c data->q = q; q 374 block/blk-mq.c data->ctx = blk_mq_get_ctx(q); q 378 block/blk-mq.c data->hctx = blk_mq_map_queue(q, data->cmd_flags, q 403 block/blk-mq.c blk_queue_exit(q); q 422 block/blk-mq.c struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op, q 429 block/blk-mq.c ret = blk_queue_enter(q, flags); q 433 block/blk-mq.c rq = blk_mq_get_request(q, NULL, &alloc_data); q 434 block/blk-mq.c blk_queue_exit(q); q 446 block/blk-mq.c struct request *blk_mq_alloc_request_hctx(struct request_queue *q, q 463 block/blk-mq.c if (hctx_idx >= q->nr_hw_queues) q 466 block/blk-mq.c ret = blk_queue_enter(q, flags); q 474 block/blk-mq.c alloc_data.hctx = q->queue_hw_ctx[hctx_idx]; q 476 block/blk-mq.c blk_queue_exit(q); q 480 block/blk-mq.c alloc_data.ctx = __blk_mq_get_ctx(q, cpu); q 482 block/blk-mq.c rq = blk_mq_get_request(q, NULL, &alloc_data); q 483 block/blk-mq.c blk_queue_exit(q); q 494 block/blk-mq.c struct request_queue *q = rq->q; q 506 block/blk-mq.c blk_queue_exit(q); q 511 block/blk-mq.c struct request_queue *q = rq->q; q 512 block/blk-mq.c struct elevator_queue *e = q->elevator; q 530 block/blk-mq.c laptop_io_completion(q->backing_dev_info); q 532 block/blk-mq.c rq_qos_done(q, rq); q 548 block/blk-mq.c blk_mq_poll_stats_start(rq->q); q 558 block/blk-mq.c rq_qos_done(rq->q, rq); q 577 block/blk-mq.c struct request_queue *q = rq->q; q 579 block/blk-mq.c q->mq_ops->complete(rq); q 585 block/blk-mq.c struct request_queue *q = rq->q; q 599 block/blk-mq.c if (q->nr_hw_queues == 1) { q 609 block/blk-mq.c !test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) { q 610 block/blk-mq.c q->mq_ops->complete(rq); q 615 block/blk-mq.c if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) q 624 block/blk-mq.c q->mq_ops->complete(rq); q 659 block/blk-mq.c if (unlikely(blk_should_fake_timeout(rq->q))) q 680 block/blk-mq.c struct request_queue *q = rq->q; q 682 block/blk-mq.c trace_block_rq_issue(q, rq); q 684 block/blk-mq.c if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) { q 688 block/blk-mq.c rq_qos_issue(q, rq); q 696 block/blk-mq.c if (q->dma_drain_size && blk_rq_bytes(rq)) { q 707 block/blk-mq.c q->integrity.profile->prepare_fn(rq); q 714 block/blk-mq.c struct request_queue *q = rq->q; q 718 block/blk-mq.c trace_block_rq_requeue(q, rq); q 719 block/blk-mq.c rq_qos_requeue(q, rq); q 724 block/blk-mq.c if (q->dma_drain_size && blk_rq_bytes(rq)) q 743 block/blk-mq.c struct request_queue *q = q 748 block/blk-mq.c spin_lock_irq(&q->requeue_lock); q 749 block/blk-mq.c list_splice_init(&q->requeue_list, &rq_list); q 750 block/blk-mq.c spin_unlock_irq(&q->requeue_lock); q 775 block/blk-mq.c blk_mq_run_hw_queues(q, false); q 781 block/blk-mq.c struct request_queue *q = rq->q; q 790 block/blk-mq.c spin_lock_irqsave(&q->requeue_lock, flags); q 793 block/blk-mq.c list_add(&rq->queuelist, &q->requeue_list); q 795 block/blk-mq.c list_add_tail(&rq->queuelist, &q->requeue_list); q 797 block/blk-mq.c spin_unlock_irqrestore(&q->requeue_lock, flags); q 800 block/blk-mq.c blk_mq_kick_requeue_list(q); q 803 block/blk-mq.c void blk_mq_kick_requeue_list(struct request_queue *q) q 805 block/blk-mq.c kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, 0); q 809 block/blk-mq.c void blk_mq_delay_kick_requeue_list(struct request_queue *q, q 812 block/blk-mq.c kblockd_mod_delayed_work_on(WORK_CPU_UNBOUND, &q->requeue_work, q 835 block/blk-mq.c if (rq->state == MQ_RQ_IN_FLIGHT && rq->q == hctx->queue) { q 845 block/blk-mq.c bool blk_mq_queue_inflight(struct request_queue *q) q 849 block/blk-mq.c blk_mq_queue_tag_busy_iter(q, blk_mq_rq_inflight, &busy); q 857 block/blk-mq.c if (req->q->mq_ops->timeout) { q 860 block/blk-mq.c ret = req->q->mq_ops->timeout(req, reserved); q 932 block/blk-mq.c struct request_queue *q = q 951 block/blk-mq.c if (!percpu_ref_tryget(&q->q_usage_counter)) q 954 block/blk-mq.c blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next); q 957 block/blk-mq.c mod_timer(&q->timeout, next); q 965 block/blk-mq.c queue_for_each_hw_ctx(q, hctx, i) { q 971 block/blk-mq.c blk_queue_exit(q); q 1059 block/blk-mq.c .q = rq->q, q 1211 block/blk-mq.c bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list, q 1275 block/blk-mq.c ret = q->mq_ops->queue_rq(hctx, &bd); q 1314 block/blk-mq.c if (q->mq_ops->commit_rqs) q 1315 block/blk-mq.c q->mq_ops->commit_rqs(hctx); q 1518 block/blk-mq.c void blk_mq_run_hw_queues(struct request_queue *q, bool async) q 1523 block/blk-mq.c queue_for_each_hw_ctx(q, hctx, i) { q 1539 block/blk-mq.c bool blk_mq_queue_stopped(struct request_queue *q) q 1544 block/blk-mq.c queue_for_each_hw_ctx(q, hctx, i) q 1578 block/blk-mq.c void blk_mq_stop_hw_queues(struct request_queue *q) q 1583 block/blk-mq.c queue_for_each_hw_ctx(q, hctx, i) q 1596 block/blk-mq.c void blk_mq_start_hw_queues(struct request_queue *q) q 1601 block/blk-mq.c queue_for_each_hw_ctx(q, hctx, i) q 1616 block/blk-mq.c void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async) q 1621 block/blk-mq.c queue_for_each_hw_ctx(q, hctx, i) q 1753 block/blk-mq.c BUG_ON(!rq->q); q 1762 block/blk-mq.c this_q = rq->q; q 1800 block/blk-mq.c struct request_queue *q = rq->q; q 1815 block/blk-mq.c ret = q->mq_ops->queue_rq(hctx, &bd); q 1840 block/blk-mq.c struct request_queue *q = rq->q; q 1850 block/blk-mq.c if (blk_mq_hctx_stopped(hctx) || blk_queue_quiesced(q)) { q 1856 block/blk-mq.c if (q->elevator && !bypass_insert) q 1948 block/blk-mq.c if (tmp->q != rq->q) q 1953 block/blk-mq.c static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) q 1964 block/blk-mq.c blk_queue_bounce(q, &bio); q 1965 block/blk-mq.c __blk_queue_split(q, &bio, &nr_segs); q 1970 block/blk-mq.c if (!is_flush_fua && !blk_queue_nomerges(q) && q 1971 block/blk-mq.c blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq)) q 1974 block/blk-mq.c if (blk_mq_sched_bio_merge(q, bio, nr_segs)) q 1977 block/blk-mq.c rq_qos_throttle(q, bio); q 1980 block/blk-mq.c rq = blk_mq_get_request(q, bio, &data); q 1982 block/blk-mq.c rq_qos_cleanup(q, bio); q 1988 block/blk-mq.c trace_block_getrq(q, bio, bio->bi_opf); q 1990 block/blk-mq.c rq_qos_track(q, rq, bio); q 1996 block/blk-mq.c plug = blk_mq_plug(q, bio); q 2001 block/blk-mq.c } else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs || q 2002 block/blk-mq.c !blk_queue_nonrot(q))) { q 2014 block/blk-mq.c trace_block_plug(q); q 2021 block/blk-mq.c trace_block_plug(q); q 2025 block/blk-mq.c } else if (q->elevator) { q 2027 block/blk-mq.c } else if (plug && !blk_queue_nomerges(q)) { q 2042 block/blk-mq.c trace_block_plug(q); q 2046 block/blk-mq.c trace_block_unplug(q, 1, true); q 2050 block/blk-mq.c } else if ((q->nr_hw_queues > 1 && is_sync) || q 2275 block/blk-mq.c static void blk_mq_exit_hctx(struct request_queue *q, q 2290 block/blk-mq.c spin_lock(&q->unused_hctx_lock); q 2291 block/blk-mq.c list_add(&hctx->hctx_list, &q->unused_hctx_list); q 2292 block/blk-mq.c spin_unlock(&q->unused_hctx_lock); q 2295 block/blk-mq.c static void blk_mq_exit_hw_queues(struct request_queue *q, q 2301 block/blk-mq.c queue_for_each_hw_ctx(q, hctx, i) { q 2305 block/blk-mq.c blk_mq_exit_hctx(q, set, hctx, i); q 2323 block/blk-mq.c static int blk_mq_init_hctx(struct request_queue *q, q 2351 block/blk-mq.c blk_mq_alloc_hctx(struct request_queue *q, struct blk_mq_tag_set *set, q 2372 block/blk-mq.c hctx->queue = q; q 2395 block/blk-mq.c hctx->fq = blk_alloc_flush_queue(q, hctx->numa_node, set->cmd_size, q 2418 block/blk-mq.c static void blk_mq_init_cpu_queues(struct request_queue *q, q 2421 block/blk-mq.c struct blk_mq_tag_set *set = q->tag_set; q 2425 block/blk-mq.c struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i); q 2434 block/blk-mq.c __ctx->queue = q; q 2441 block/blk-mq.c hctx = blk_mq_map_queue_type(q, j, i); q 2477 block/blk-mq.c static void blk_mq_map_swqueue(struct request_queue *q) q 2482 block/blk-mq.c struct blk_mq_tag_set *set = q->tag_set; q 2484 block/blk-mq.c queue_for_each_hw_ctx(q, hctx, i) { q 2509 block/blk-mq.c ctx = per_cpu_ptr(q->queue_ctx, i); q 2512 block/blk-mq.c ctx->hctxs[j] = blk_mq_map_queue_type(q, q 2517 block/blk-mq.c hctx = blk_mq_map_queue_type(q, j, i); q 2540 block/blk-mq.c ctx->hctxs[j] = blk_mq_map_queue_type(q, q 2544 block/blk-mq.c queue_for_each_hw_ctx(q, hctx, i) { q 2583 block/blk-mq.c static void queue_set_hctx_shared(struct request_queue *q, bool shared) q 2588 block/blk-mq.c queue_for_each_hw_ctx(q, hctx, i) { q 2599 block/blk-mq.c struct request_queue *q; q 2603 block/blk-mq.c list_for_each_entry(q, &set->tag_list, tag_set_list) { q 2604 block/blk-mq.c blk_mq_freeze_queue(q); q 2605 block/blk-mq.c queue_set_hctx_shared(q, shared); q 2606 block/blk-mq.c blk_mq_unfreeze_queue(q); q 2610 block/blk-mq.c static void blk_mq_del_queue_tag_set(struct request_queue *q) q 2612 block/blk-mq.c struct blk_mq_tag_set *set = q->tag_set; q 2615 block/blk-mq.c list_del_rcu(&q->tag_set_list); q 2623 block/blk-mq.c INIT_LIST_HEAD(&q->tag_set_list); q 2627 block/blk-mq.c struct request_queue *q) q 2641 block/blk-mq.c queue_set_hctx_shared(q, true); q 2642 block/blk-mq.c list_add_tail_rcu(&q->tag_set_list, &set->tag_list); q 2648 block/blk-mq.c static int blk_mq_alloc_ctxs(struct request_queue *q) q 2666 block/blk-mq.c q->mq_kobj = &ctxs->kobj; q 2667 block/blk-mq.c q->queue_ctx = ctxs->queue_ctx; q 2681 block/blk-mq.c void blk_mq_release(struct request_queue *q) q 2686 block/blk-mq.c queue_for_each_hw_ctx(q, hctx, i) q 2690 block/blk-mq.c list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) { q 2695 block/blk-mq.c kfree(q->queue_hw_ctx); q 2701 block/blk-mq.c blk_mq_sysfs_deinit(q); q 2706 block/blk-mq.c struct request_queue *uninit_q, *q; q 2716 block/blk-mq.c q = blk_mq_init_allocated_queue(set, uninit_q, false); q 2717 block/blk-mq.c if (IS_ERR(q)) q 2720 block/blk-mq.c return q; q 2733 block/blk-mq.c struct request_queue *q; q 2748 block/blk-mq.c q = blk_mq_init_queue(set); q 2749 block/blk-mq.c if (IS_ERR(q)) { q 2751 block/blk-mq.c return q; q 2754 block/blk-mq.c return q; q 2759 block/blk-mq.c struct blk_mq_tag_set *set, struct request_queue *q, q 2765 block/blk-mq.c spin_lock(&q->unused_hctx_lock); q 2766 block/blk-mq.c list_for_each_entry(tmp, &q->unused_hctx_list, hctx_list) { q 2774 block/blk-mq.c spin_unlock(&q->unused_hctx_lock); q 2777 block/blk-mq.c hctx = blk_mq_alloc_hctx(q, set, node); q 2781 block/blk-mq.c if (blk_mq_init_hctx(q, set, hctx, hctx_idx)) q 2793 block/blk-mq.c struct request_queue *q) q 2796 block/blk-mq.c struct blk_mq_hw_ctx **hctxs = q->queue_hw_ctx; q 2799 block/blk-mq.c mutex_lock(&q->sysfs_lock); q 2813 block/blk-mq.c hctx = blk_mq_alloc_and_init_hctx(set, q, i, node); q 2816 block/blk-mq.c blk_mq_exit_hctx(q, set, hctxs[i], i); q 2832 block/blk-mq.c j = q->nr_hw_queues; q 2836 block/blk-mq.c end = q->nr_hw_queues; q 2837 block/blk-mq.c q->nr_hw_queues = set->nr_hw_queues; q 2846 block/blk-mq.c blk_mq_exit_hctx(q, set, hctx, j); q 2850 block/blk-mq.c mutex_unlock(&q->sysfs_lock); q 2867 block/blk-mq.c struct request_queue *q, q 2871 block/blk-mq.c q->mq_ops = set->ops; q 2873 block/blk-mq.c q->poll_cb = blk_stat_alloc_callback(blk_mq_poll_stats_fn, q 2875 block/blk-mq.c BLK_MQ_POLL_STATS_BKTS, q); q 2876 block/blk-mq.c if (!q->poll_cb) q 2879 block/blk-mq.c if (blk_mq_alloc_ctxs(q)) q 2883 block/blk-mq.c blk_mq_sysfs_init(q); q 2885 block/blk-mq.c q->nr_queues = nr_hw_queues(set); q 2886 block/blk-mq.c q->queue_hw_ctx = kcalloc_node(q->nr_queues, sizeof(*(q->queue_hw_ctx)), q 2888 block/blk-mq.c if (!q->queue_hw_ctx) q 2891 block/blk-mq.c INIT_LIST_HEAD(&q->unused_hctx_list); q 2892 block/blk-mq.c spin_lock_init(&q->unused_hctx_lock); q 2894 block/blk-mq.c blk_mq_realloc_hw_ctxs(set, q); q 2895 block/blk-mq.c if (!q->nr_hw_queues) q 2898 block/blk-mq.c INIT_WORK(&q->timeout_work, blk_mq_timeout_work); q 2899 block/blk-mq.c blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ); q 2901 block/blk-mq.c q->tag_set = set; q 2903 block/blk-mq.c q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT; q 2906 block/blk-mq.c blk_queue_flag_set(QUEUE_FLAG_POLL, q); q 2908 block/blk-mq.c q->sg_reserved_size = INT_MAX; q 2910 block/blk-mq.c INIT_DELAYED_WORK(&q->requeue_work, blk_mq_requeue_work); q 2911 block/blk-mq.c INIT_LIST_HEAD(&q->requeue_list); q 2912 block/blk-mq.c spin_lock_init(&q->requeue_lock); q 2914 block/blk-mq.c blk_queue_make_request(q, blk_mq_make_request); q 2919 block/blk-mq.c q->nr_requests = set->queue_depth; q 2924 block/blk-mq.c q->poll_nsec = BLK_MQ_POLL_CLASSIC; q 2926 block/blk-mq.c blk_mq_init_cpu_queues(q, set->nr_hw_queues); q 2927 block/blk-mq.c blk_mq_add_queue_tag_set(set, q); q 2928 block/blk-mq.c blk_mq_map_swqueue(q); q 2931 block/blk-mq.c elevator_init_mq(q); q 2933 block/blk-mq.c return q; q 2936 block/blk-mq.c kfree(q->queue_hw_ctx); q 2937 block/blk-mq.c q->nr_hw_queues = 0; q 2939 block/blk-mq.c blk_mq_sysfs_deinit(q); q 2941 block/blk-mq.c blk_stat_free_callback(q->poll_cb); q 2942 block/blk-mq.c q->poll_cb = NULL; q 2944 block/blk-mq.c q->mq_ops = NULL; q 2950 block/blk-mq.c void blk_mq_exit_queue(struct request_queue *q) q 2952 block/blk-mq.c struct blk_mq_tag_set *set = q->tag_set; q 2954 block/blk-mq.c blk_mq_del_queue_tag_set(q); q 2955 block/blk-mq.c blk_mq_exit_hw_queues(q, set, set->nr_hw_queues); q 3156 block/blk-mq.c int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) q 3158 block/blk-mq.c struct blk_mq_tag_set *set = q->tag_set; q 3165 block/blk-mq.c if (q->nr_requests == nr) q 3168 block/blk-mq.c blk_mq_freeze_queue(q); q 3169 block/blk-mq.c blk_mq_quiesce_queue(q); q 3172 block/blk-mq.c queue_for_each_hw_ctx(q, hctx, i) { q 3188 block/blk-mq.c if (q->elevator && q->elevator->type->ops.depth_updated) q 3189 block/blk-mq.c q->elevator->type->ops.depth_updated(hctx); q 3193 block/blk-mq.c q->nr_requests = nr; q 3195 block/blk-mq.c blk_mq_unquiesce_queue(q); q 3196 block/blk-mq.c blk_mq_unfreeze_queue(q); q 3208 block/blk-mq.c struct request_queue *q; q 3217 block/blk-mq.c struct request_queue *q) q 3221 block/blk-mq.c if (!q->elevator) q 3229 block/blk-mq.c qe->q = q; q 3230 block/blk-mq.c qe->type = q->elevator->type; q 3233 block/blk-mq.c mutex_lock(&q->sysfs_lock); q 3242 block/blk-mq.c elevator_switch_mq(q, NULL); q 3243 block/blk-mq.c mutex_unlock(&q->sysfs_lock); q 3249 block/blk-mq.c struct request_queue *q) q 3255 block/blk-mq.c if (qe->q == q) { q 3266 block/blk-mq.c mutex_lock(&q->sysfs_lock); q 3267 block/blk-mq.c elevator_switch_mq(q, t); q 3268 block/blk-mq.c mutex_unlock(&q->sysfs_lock); q 3274 block/blk-mq.c struct request_queue *q; q 3285 block/blk-mq.c list_for_each_entry(q, &set->tag_list, tag_set_list) q 3286 block/blk-mq.c blk_mq_freeze_queue(q); q 3296 block/blk-mq.c list_for_each_entry(q, &set->tag_list, tag_set_list) q 3297 block/blk-mq.c if (!blk_mq_elv_switch_none(&head, q)) q 3300 block/blk-mq.c list_for_each_entry(q, &set->tag_list, tag_set_list) { q 3301 block/blk-mq.c blk_mq_debugfs_unregister_hctxs(q); q 3302 block/blk-mq.c blk_mq_sysfs_unregister(q); q 3309 block/blk-mq.c list_for_each_entry(q, &set->tag_list, tag_set_list) { q 3310 block/blk-mq.c blk_mq_realloc_hw_ctxs(set, q); q 3311 block/blk-mq.c if (q->nr_hw_queues != set->nr_hw_queues) { q 3318 block/blk-mq.c blk_mq_map_swqueue(q); q 3321 block/blk-mq.c list_for_each_entry(q, &set->tag_list, tag_set_list) { q 3322 block/blk-mq.c blk_mq_sysfs_register(q); q 3323 block/blk-mq.c blk_mq_debugfs_register_hctxs(q); q 3327 block/blk-mq.c list_for_each_entry(q, &set->tag_list, tag_set_list) q 3328 block/blk-mq.c blk_mq_elv_switch_back(&head, q); q 3330 block/blk-mq.c list_for_each_entry(q, &set->tag_list, tag_set_list) q 3331 block/blk-mq.c blk_mq_unfreeze_queue(q); q 3343 block/blk-mq.c static bool blk_poll_stats_enable(struct request_queue *q) q 3345 block/blk-mq.c if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) || q 3346 block/blk-mq.c blk_queue_flag_test_and_set(QUEUE_FLAG_POLL_STATS, q)) q 3348 block/blk-mq.c blk_stat_add_callback(q, q->poll_cb); q 3352 block/blk-mq.c static void blk_mq_poll_stats_start(struct request_queue *q) q 3358 block/blk-mq.c if (!test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags) || q 3359 block/blk-mq.c blk_stat_is_active(q->poll_cb)) q 3362 block/blk-mq.c blk_stat_activate_msecs(q->poll_cb, 100); q 3367 block/blk-mq.c struct request_queue *q = cb->data; q 3372 block/blk-mq.c q->poll_stat[bucket] = cb->stat[bucket]; q 3376 block/blk-mq.c static unsigned long blk_mq_poll_nsecs(struct request_queue *q, q 3387 block/blk-mq.c if (!blk_poll_stats_enable(q)) q 3403 block/blk-mq.c if (q->poll_stat[bucket].nr_samples) q 3404 block/blk-mq.c ret = (q->poll_stat[bucket].mean + 1) / 2; q 3409 block/blk-mq.c static bool blk_mq_poll_hybrid_sleep(struct request_queue *q, q 3427 block/blk-mq.c if (q->poll_nsec > 0) q 3428 block/blk-mq.c nsecs = q->poll_nsec; q 3430 block/blk-mq.c nsecs = blk_mq_poll_nsecs(q, hctx, rq); q 3463 block/blk-mq.c static bool blk_mq_poll_hybrid(struct request_queue *q, q 3468 block/blk-mq.c if (q->poll_nsec == BLK_MQ_POLL_CLASSIC) q 3485 block/blk-mq.c return blk_mq_poll_hybrid_sleep(q, hctx, rq); q 3500 block/blk-mq.c int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin) q 3506 block/blk-mq.c !test_bit(QUEUE_FLAG_POLL, &q->queue_flags)) q 3512 block/blk-mq.c hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)]; q 3521 block/blk-mq.c if (blk_mq_poll_hybrid(q, hctx, cookie)) q 3532 block/blk-mq.c ret = q->mq_ops->poll(hctx); q 40 block/blk-mq.h void blk_mq_exit_queue(struct request_queue *q); q 41 block/blk-mq.h int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); q 42 block/blk-mq.h void blk_mq_wake_waiters(struct request_queue *q); q 90 block/blk-mq.h static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *q, q 94 block/blk-mq.h return q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]; q 103 block/blk-mq.h static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, q 123 block/blk-mq.h extern void blk_mq_sysfs_init(struct request_queue *q); q 124 block/blk-mq.h extern void blk_mq_sysfs_deinit(struct request_queue *q); q 125 block/blk-mq.h extern int __blk_mq_register_dev(struct device *dev, struct request_queue *q); q 126 block/blk-mq.h extern int blk_mq_sysfs_register(struct request_queue *q); q 127 block/blk-mq.h extern void blk_mq_sysfs_unregister(struct request_queue *q); q 130 block/blk-mq.h void blk_mq_release(struct request_queue *q); q 141 block/blk-mq.h static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, q 144 block/blk-mq.h return per_cpu_ptr(q->queue_ctx, cpu); q 153 block/blk-mq.h static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) q 155 block/blk-mq.h return __blk_mq_get_ctx(q, raw_smp_processor_id()); q 160 block/blk-mq.h struct request_queue *q; q 188 block/blk-mq.h unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part); q 189 block/blk-mq.h void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part, q 194 block/blk-mq.h struct request_queue *q = hctx->queue; q 196 block/blk-mq.h if (q->mq_ops->put_budget) q 197 block/blk-mq.h q->mq_ops->put_budget(hctx); q 202 block/blk-mq.h struct request_queue *q = hctx->queue; q 204 block/blk-mq.h if (q->mq_ops->get_budget) q 205 block/blk-mq.h return q->mq_ops->get_budget(hctx); q 255 block/blk-mq.h static inline struct blk_plug *blk_mq_plug(struct request_queue *q, q 262 block/blk-mq.h if (!blk_queue_is_zoned(q) || !op_is_write(bio_op(bio))) q 31 block/blk-pm.c void blk_pm_runtime_init(struct request_queue *q, struct device *dev) q 33 block/blk-pm.c q->dev = dev; q 34 block/blk-pm.c q->rpm_status = RPM_ACTIVE; q 35 block/blk-pm.c pm_runtime_set_autosuspend_delay(q->dev, -1); q 36 block/blk-pm.c pm_runtime_use_autosuspend(q->dev); q 61 block/blk-pm.c int blk_pre_runtime_suspend(struct request_queue *q) q 65 block/blk-pm.c if (!q->dev) q 68 block/blk-pm.c WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE); q 76 block/blk-pm.c blk_set_pm_only(q); q 79 block/blk-pm.c blk_freeze_queue_start(q); q 86 block/blk-pm.c percpu_ref_switch_to_atomic_sync(&q->q_usage_counter); q 87 block/blk-pm.c if (percpu_ref_is_zero(&q->q_usage_counter)) q 90 block/blk-pm.c blk_mq_unfreeze_queue(q); q 92 block/blk-pm.c spin_lock_irq(&q->queue_lock); q 94 block/blk-pm.c pm_runtime_mark_last_busy(q->dev); q 96 block/blk-pm.c q->rpm_status = RPM_SUSPENDING; q 97 block/blk-pm.c spin_unlock_irq(&q->queue_lock); q 100 block/blk-pm.c blk_clear_pm_only(q); q 119 block/blk-pm.c void blk_post_runtime_suspend(struct request_queue *q, int err) q 121 block/blk-pm.c if (!q->dev) q 124 block/blk-pm.c spin_lock_irq(&q->queue_lock); q 126 block/blk-pm.c q->rpm_status = RPM_SUSPENDED; q 128 block/blk-pm.c q->rpm_status = RPM_ACTIVE; q 129 block/blk-pm.c pm_runtime_mark_last_busy(q->dev); q 131 block/blk-pm.c spin_unlock_irq(&q->queue_lock); q 134 block/blk-pm.c blk_clear_pm_only(q); q 149 block/blk-pm.c void blk_pre_runtime_resume(struct request_queue *q) q 151 block/blk-pm.c if (!q->dev) q 154 block/blk-pm.c spin_lock_irq(&q->queue_lock); q 155 block/blk-pm.c q->rpm_status = RPM_RESUMING; q 156 block/blk-pm.c spin_unlock_irq(&q->queue_lock); q 174 block/blk-pm.c void blk_post_runtime_resume(struct request_queue *q, int err) q 176 block/blk-pm.c if (!q->dev) q 179 block/blk-pm.c spin_lock_irq(&q->queue_lock); q 181 block/blk-pm.c q->rpm_status = RPM_ACTIVE; q 182 block/blk-pm.c pm_runtime_mark_last_busy(q->dev); q 183 block/blk-pm.c pm_request_autosuspend(q->dev); q 185 block/blk-pm.c q->rpm_status = RPM_SUSPENDED; q 187 block/blk-pm.c spin_unlock_irq(&q->queue_lock); q 190 block/blk-pm.c blk_clear_pm_only(q); q 208 block/blk-pm.c void blk_set_runtime_active(struct request_queue *q) q 210 block/blk-pm.c if (q->dev) { q 211 block/blk-pm.c spin_lock_irq(&q->queue_lock); q 212 block/blk-pm.c q->rpm_status = RPM_ACTIVE; q 213 block/blk-pm.c pm_runtime_mark_last_busy(q->dev); q 214 block/blk-pm.c pm_request_autosuspend(q->dev); q 215 block/blk-pm.c spin_unlock_irq(&q->queue_lock); q 9 block/blk-pm.h static inline void blk_pm_request_resume(struct request_queue *q) q 11 block/blk-pm.h if (q->dev && (q->rpm_status == RPM_SUSPENDED || q 12 block/blk-pm.h q->rpm_status == RPM_SUSPENDING)) q 13 block/blk-pm.h pm_request_resume(q->dev); q 18 block/blk-pm.h if (rq->q->dev && !(rq->rq_flags & RQF_PM)) q 19 block/blk-pm.h pm_runtime_mark_last_busy(rq->q->dev); q 24 block/blk-pm.h lockdep_assert_held(&rq->q->queue_lock); q 26 block/blk-pm.h if (rq->q->dev && !(rq->rq_flags & RQF_PM)) q 27 block/blk-pm.h rq->q->nr_pending--; q 30 block/blk-pm.h static inline void blk_pm_add_request(struct request_queue *q, q 33 block/blk-pm.h lockdep_assert_held(&q->queue_lock); q 35 block/blk-pm.h if (q->dev && !(rq->rq_flags & RQF_PM)) q 36 block/blk-pm.h q->nr_pending++; q 41 block/blk-pm.h lockdep_assert_held(&rq->q->queue_lock); q 43 block/blk-pm.h if (rq->q->dev && !(rq->rq_flags & RQF_PM)) q 44 block/blk-pm.h --rq->q->nr_pending; q 47 block/blk-pm.h static inline void blk_pm_request_resume(struct request_queue *q) q 59 block/blk-pm.h static inline void blk_pm_add_request(struct request_queue *q, q 295 block/blk-rq-qos.c void rq_qos_exit(struct request_queue *q) q 297 block/blk-rq-qos.c blk_mq_debugfs_unregister_queue_rqos(q); q 299 block/blk-rq-qos.c while (q->rq_qos) { q 300 block/blk-rq-qos.c struct rq_qos *rqos = q->rq_qos; q 301 block/blk-rq-qos.c q->rq_qos = rqos->next; q 28 block/blk-rq-qos.h struct request_queue *q; q 60 block/blk-rq-qos.h static inline struct rq_qos *rq_qos_id(struct request_queue *q, q 64 block/blk-rq-qos.h for (rqos = q->rq_qos; rqos; rqos = rqos->next) { q 71 block/blk-rq-qos.h static inline struct rq_qos *wbt_rq_qos(struct request_queue *q) q 73 block/blk-rq-qos.h return rq_qos_id(q, RQ_QOS_WBT); q 76 block/blk-rq-qos.h static inline struct rq_qos *blkcg_rq_qos(struct request_queue *q) q 78 block/blk-rq-qos.h return rq_qos_id(q, RQ_QOS_LATENCY); q 100 block/blk-rq-qos.h static inline void rq_qos_add(struct request_queue *q, struct rq_qos *rqos) q 102 block/blk-rq-qos.h rqos->next = q->rq_qos; q 103 block/blk-rq-qos.h q->rq_qos = rqos; q 109 block/blk-rq-qos.h static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos) q 113 block/blk-rq-qos.h for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) { q 144 block/blk-rq-qos.h static inline void rq_qos_cleanup(struct request_queue *q, struct bio *bio) q 146 block/blk-rq-qos.h if (q->rq_qos) q 147 block/blk-rq-qos.h __rq_qos_cleanup(q->rq_qos, bio); q 150 block/blk-rq-qos.h static inline void rq_qos_done(struct request_queue *q, struct request *rq) q 152 block/blk-rq-qos.h if (q->rq_qos) q 153 block/blk-rq-qos.h __rq_qos_done(q->rq_qos, rq); q 156 block/blk-rq-qos.h static inline void rq_qos_issue(struct request_queue *q, struct request *rq) q 158 block/blk-rq-qos.h if (q->rq_qos) q 159 block/blk-rq-qos.h __rq_qos_issue(q->rq_qos, rq); q 162 block/blk-rq-qos.h static inline void rq_qos_requeue(struct request_queue *q, struct request *rq) q 164 block/blk-rq-qos.h if (q->rq_qos) q 165 block/blk-rq-qos.h __rq_qos_requeue(q->rq_qos, rq); q 168 block/blk-rq-qos.h static inline void rq_qos_done_bio(struct request_queue *q, struct bio *bio) q 170 block/blk-rq-qos.h if (q->rq_qos) q 171 block/blk-rq-qos.h __rq_qos_done_bio(q->rq_qos, bio); q 174 block/blk-rq-qos.h static inline void rq_qos_throttle(struct request_queue *q, struct bio *bio) q 181 block/blk-rq-qos.h if (q->rq_qos) q 182 block/blk-rq-qos.h __rq_qos_throttle(q->rq_qos, bio); q 185 block/blk-rq-qos.h static inline void rq_qos_track(struct request_queue *q, struct request *rq, q 188 block/blk-rq-qos.h if (q->rq_qos) q 189 block/blk-rq-qos.h __rq_qos_track(q->rq_qos, rq, bio); q 192 block/blk-rq-qos.h static inline void rq_qos_merge(struct request_queue *q, struct request *rq, q 195 block/blk-rq-qos.h if (q->rq_qos) q 196 block/blk-rq-qos.h __rq_qos_merge(q->rq_qos, rq, bio); q 199 block/blk-rq-qos.h static inline void rq_qos_queue_depth_changed(struct request_queue *q) q 201 block/blk-rq-qos.h if (q->rq_qos) q 202 block/blk-rq-qos.h __rq_qos_queue_depth_changed(q->rq_qos); q 25 block/blk-settings.c void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) q 27 block/blk-settings.c q->rq_timeout = timeout; q 111 block/blk-settings.c void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) q 116 block/blk-settings.c q->nr_requests = BLKDEV_MAX_RQ; q 118 block/blk-settings.c q->make_request_fn = mfn; q 119 block/blk-settings.c blk_queue_dma_alignment(q, 511); q 121 block/blk-settings.c blk_set_default_limits(&q->limits); q 136 block/blk-settings.c void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr) q 141 block/blk-settings.c q->bounce_gfp = GFP_NOIO; q 150 block/blk-settings.c q->limits.bounce_pfn = max(max_low_pfn, b_pfn); q 154 block/blk-settings.c q->limits.bounce_pfn = b_pfn; q 158 block/blk-settings.c q->bounce_gfp = GFP_NOIO | GFP_DMA; q 159 block/blk-settings.c q->limits.bounce_pfn = b_pfn; q 183 block/blk-settings.c void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) q 185 block/blk-settings.c struct queue_limits *limits = &q->limits; q 198 block/blk-settings.c q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9); q 215 block/blk-settings.c void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors) q 218 block/blk-settings.c q->limits.chunk_sectors = chunk_sectors; q 227 block/blk-settings.c void blk_queue_max_discard_sectors(struct request_queue *q, q 230 block/blk-settings.c q->limits.max_hw_discard_sectors = max_discard_sectors; q 231 block/blk-settings.c q->limits.max_discard_sectors = max_discard_sectors; q 240 block/blk-settings.c void blk_queue_max_write_same_sectors(struct request_queue *q, q 243 block/blk-settings.c q->limits.max_write_same_sectors = max_write_same_sectors; q 253 block/blk-settings.c void blk_queue_max_write_zeroes_sectors(struct request_queue *q, q 256 block/blk-settings.c q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors; q 269 block/blk-settings.c void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) q 277 block/blk-settings.c q->limits.max_segments = max_segments; q 290 block/blk-settings.c void blk_queue_max_discard_segments(struct request_queue *q, q 293 block/blk-settings.c q->limits.max_discard_segments = max_segments; q 306 block/blk-settings.c void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size) q 315 block/blk-settings.c WARN_ON_ONCE(q->limits.virt_boundary_mask); q 317 block/blk-settings.c q->limits.max_segment_size = max_size; q 331 block/blk-settings.c void blk_queue_logical_block_size(struct request_queue *q, unsigned int size) q 333 block/blk-settings.c q->limits.logical_block_size = size; q 335 block/blk-settings.c if (q->limits.physical_block_size < size) q 336 block/blk-settings.c q->limits.physical_block_size = size; q 338 block/blk-settings.c if (q->limits.io_min < q->limits.physical_block_size) q 339 block/blk-settings.c q->limits.io_min = q->limits.physical_block_size; q 353 block/blk-settings.c void blk_queue_physical_block_size(struct request_queue *q, unsigned int size) q 355 block/blk-settings.c q->limits.physical_block_size = size; q 357 block/blk-settings.c if (q->limits.physical_block_size < q->limits.logical_block_size) q 358 block/blk-settings.c q->limits.physical_block_size = q->limits.logical_block_size; q 360 block/blk-settings.c if (q->limits.io_min < q->limits.physical_block_size) q 361 block/blk-settings.c q->limits.io_min = q->limits.physical_block_size; q 376 block/blk-settings.c void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) q 378 block/blk-settings.c q->limits.alignment_offset = q 379 block/blk-settings.c offset & (q->limits.physical_block_size - 1); q 380 block/blk-settings.c q->limits.misaligned = 0; q 421 block/blk-settings.c void blk_queue_io_min(struct request_queue *q, unsigned int min) q 423 block/blk-settings.c blk_limits_io_min(&q->limits, min); q 459 block/blk-settings.c void blk_queue_io_opt(struct request_queue *q, unsigned int opt) q 461 block/blk-settings.c blk_limits_io_opt(&q->limits, opt); q 683 block/blk-settings.c void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask) q 685 block/blk-settings.c if (mask > q->dma_pad_mask) q 686 block/blk-settings.c q->dma_pad_mask = mask; q 711 block/blk-settings.c int blk_queue_dma_drain(struct request_queue *q, q 715 block/blk-settings.c if (queue_max_segments(q) < 2) q 718 block/blk-settings.c blk_queue_max_segments(q, queue_max_segments(q) - 1); q 719 block/blk-settings.c q->dma_drain_needed = dma_drain_needed; q 720 block/blk-settings.c q->dma_drain_buffer = buf; q 721 block/blk-settings.c q->dma_drain_size = size; q 732 block/blk-settings.c void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask) q 740 block/blk-settings.c q->limits.seg_boundary_mask = mask; q 749 block/blk-settings.c void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask) q 751 block/blk-settings.c q->limits.virt_boundary_mask = mask; q 760 block/blk-settings.c q->limits.max_segment_size = UINT_MAX; q 774 block/blk-settings.c void blk_queue_dma_alignment(struct request_queue *q, int mask) q 776 block/blk-settings.c q->dma_alignment = mask; q 794 block/blk-settings.c void blk_queue_update_dma_alignment(struct request_queue *q, int mask) q 798 block/blk-settings.c if (mask > q->dma_alignment) q 799 block/blk-settings.c q->dma_alignment = mask; q 809 block/blk-settings.c void blk_set_queue_depth(struct request_queue *q, unsigned int depth) q 811 block/blk-settings.c q->queue_depth = depth; q 812 block/blk-settings.c rq_qos_queue_depth_changed(q); q 824 block/blk-settings.c void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua) q 827 block/blk-settings.c blk_queue_flag_set(QUEUE_FLAG_WC, q); q 829 block/blk-settings.c blk_queue_flag_clear(QUEUE_FLAG_WC, q); q 831 block/blk-settings.c blk_queue_flag_set(QUEUE_FLAG_FUA, q); q 833 block/blk-settings.c blk_queue_flag_clear(QUEUE_FLAG_FUA, q); q 835 block/blk-settings.c wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags)); q 848 block/blk-settings.c void blk_queue_required_elevator_features(struct request_queue *q, q 851 block/blk-settings.c q->required_elevator_features = features; q 862 block/blk-settings.c bool blk_queue_can_use_dma_map_merging(struct request_queue *q, q 871 block/blk-settings.c blk_queue_virt_boundary(q, boundary); q 37 block/blk-softirq.c rq->q->mq_ops->complete(rq); q 100 block/blk-softirq.c struct request_queue *q = req->q; q 105 block/blk-softirq.c BUG_ON(!q->mq_ops->complete); q 113 block/blk-softirq.c if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) && ccpu != -1) { q 114 block/blk-softirq.c if (!test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags)) q 53 block/blk-stat.c struct request_queue *q = rq->q; q 64 block/blk-stat.c list_for_each_entry_rcu(cb, &q->stats->callbacks, list) { q 135 block/blk-stat.c void blk_stat_add_callback(struct request_queue *q, q 149 block/blk-stat.c spin_lock(&q->stats->lock); q 150 block/blk-stat.c list_add_tail_rcu(&cb->list, &q->stats->callbacks); q 151 block/blk-stat.c blk_queue_flag_set(QUEUE_FLAG_STATS, q); q 152 block/blk-stat.c spin_unlock(&q->stats->lock); q 155 block/blk-stat.c void blk_stat_remove_callback(struct request_queue *q, q 158 block/blk-stat.c spin_lock(&q->stats->lock); q 160 block/blk-stat.c if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting) q 161 block/blk-stat.c blk_queue_flag_clear(QUEUE_FLAG_STATS, q); q 162 block/blk-stat.c spin_unlock(&q->stats->lock); q 183 block/blk-stat.c void blk_stat_enable_accounting(struct request_queue *q) q 185 block/blk-stat.c spin_lock(&q->stats->lock); q 186 block/blk-stat.c q->stats->enable_accounting = true; q 187 block/blk-stat.c blk_queue_flag_set(QUEUE_FLAG_STATS, q); q 188 block/blk-stat.c spin_unlock(&q->stats->lock); q 71 block/blk-stat.h void blk_stat_enable_accounting(struct request_queue *q); q 98 block/blk-stat.h void blk_stat_add_callback(struct request_queue *q, q 110 block/blk-stat.h void blk_stat_remove_callback(struct request_queue *q, q 60 block/blk-sysfs.c static ssize_t queue_requests_show(struct request_queue *q, char *page) q 62 block/blk-sysfs.c return queue_var_show(q->nr_requests, (page)); q 66 block/blk-sysfs.c queue_requests_store(struct request_queue *q, const char *page, size_t count) q 71 block/blk-sysfs.c if (!queue_is_mq(q)) q 81 block/blk-sysfs.c err = blk_mq_update_nr_requests(q, nr); q 88 block/blk-sysfs.c static ssize_t queue_ra_show(struct request_queue *q, char *page) q 90 block/blk-sysfs.c unsigned long ra_kb = q->backing_dev_info->ra_pages << q 97 block/blk-sysfs.c queue_ra_store(struct request_queue *q, const char *page, size_t count) q 105 block/blk-sysfs.c q->backing_dev_info->ra_pages = ra_kb >> (PAGE_SHIFT - 10); q 110 block/blk-sysfs.c static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) q 112 block/blk-sysfs.c int max_sectors_kb = queue_max_sectors(q) >> 1; q 117 block/blk-sysfs.c static ssize_t queue_max_segments_show(struct request_queue *q, char *page) q 119 block/blk-sysfs.c return queue_var_show(queue_max_segments(q), (page)); q 122 block/blk-sysfs.c static ssize_t queue_max_discard_segments_show(struct request_queue *q, q 125 block/blk-sysfs.c return queue_var_show(queue_max_discard_segments(q), (page)); q 128 block/blk-sysfs.c static ssize_t queue_max_integrity_segments_show(struct request_queue *q, char *page) q 130 block/blk-sysfs.c return queue_var_show(q->limits.max_integrity_segments, (page)); q 133 block/blk-sysfs.c static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) q 135 block/blk-sysfs.c return queue_var_show(queue_max_segment_size(q), (page)); q 138 block/blk-sysfs.c static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) q 140 block/blk-sysfs.c return queue_var_show(queue_logical_block_size(q), page); q 143 block/blk-sysfs.c static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) q 145 block/blk-sysfs.c return queue_var_show(queue_physical_block_size(q), page); q 148 block/blk-sysfs.c static ssize_t queue_chunk_sectors_show(struct request_queue *q, char *page) q 150 block/blk-sysfs.c return queue_var_show(q->limits.chunk_sectors, page); q 153 block/blk-sysfs.c static ssize_t queue_io_min_show(struct request_queue *q, char *page) q 155 block/blk-sysfs.c return queue_var_show(queue_io_min(q), page); q 158 block/blk-sysfs.c static ssize_t queue_io_opt_show(struct request_queue *q, char *page) q 160 block/blk-sysfs.c return queue_var_show(queue_io_opt(q), page); q 163 block/blk-sysfs.c static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) q 165 block/blk-sysfs.c return queue_var_show(q->limits.discard_granularity, page); q 168 block/blk-sysfs.c static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page) q 172 block/blk-sysfs.c (unsigned long long)q->limits.max_hw_discard_sectors << 9); q 175 block/blk-sysfs.c static ssize_t queue_discard_max_show(struct request_queue *q, char *page) q 178 block/blk-sysfs.c (unsigned long long)q->limits.max_discard_sectors << 9); q 181 block/blk-sysfs.c static ssize_t queue_discard_max_store(struct request_queue *q, q 190 block/blk-sysfs.c if (max_discard & (q->limits.discard_granularity - 1)) q 197 block/blk-sysfs.c if (max_discard > q->limits.max_hw_discard_sectors) q 198 block/blk-sysfs.c max_discard = q->limits.max_hw_discard_sectors; q 200 block/blk-sysfs.c q->limits.max_discard_sectors = max_discard; q 204 block/blk-sysfs.c static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) q 209 block/blk-sysfs.c static ssize_t queue_write_same_max_show(struct request_queue *q, char *page) q 212 block/blk-sysfs.c (unsigned long long)q->limits.max_write_same_sectors << 9); q 215 block/blk-sysfs.c static ssize_t queue_write_zeroes_max_show(struct request_queue *q, char *page) q 218 block/blk-sysfs.c (unsigned long long)q->limits.max_write_zeroes_sectors << 9); q 222 block/blk-sysfs.c queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) q 225 block/blk-sysfs.c max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, q 233 block/blk-sysfs.c q->limits.max_dev_sectors >> 1); q 238 block/blk-sysfs.c spin_lock_irq(&q->queue_lock); q 239 block/blk-sysfs.c q->limits.max_sectors = max_sectors_kb << 1; q 240 block/blk-sysfs.c q->backing_dev_info->io_pages = max_sectors_kb >> (PAGE_SHIFT - 10); q 241 block/blk-sysfs.c spin_unlock_irq(&q->queue_lock); q 246 block/blk-sysfs.c static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) q 248 block/blk-sysfs.c int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; q 255 block/blk-sysfs.c queue_show_##name(struct request_queue *q, char *page) \ q 258 block/blk-sysfs.c bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ q 262 block/blk-sysfs.c queue_store_##name(struct request_queue *q, const char *page, size_t count) \ q 273 block/blk-sysfs.c blk_queue_flag_set(QUEUE_FLAG_##flag, q); \ q 275 block/blk-sysfs.c blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \ q 284 block/blk-sysfs.c static ssize_t queue_zoned_show(struct request_queue *q, char *page) q 286 block/blk-sysfs.c switch (blk_queue_zoned_model(q)) { q 296 block/blk-sysfs.c static ssize_t queue_nr_zones_show(struct request_queue *q, char *page) q 298 block/blk-sysfs.c return queue_var_show(blk_queue_nr_zones(q), page); q 301 block/blk-sysfs.c static ssize_t queue_nomerges_show(struct request_queue *q, char *page) q 303 block/blk-sysfs.c return queue_var_show((blk_queue_nomerges(q) << 1) | q 304 block/blk-sysfs.c blk_queue_noxmerges(q), page); q 307 block/blk-sysfs.c static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, q 316 block/blk-sysfs.c blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, q); q 317 block/blk-sysfs.c blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); q 319 block/blk-sysfs.c blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); q 321 block/blk-sysfs.c blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); q 326 block/blk-sysfs.c static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) q 328 block/blk-sysfs.c bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); q 329 block/blk-sysfs.c bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags); q 335 block/blk-sysfs.c queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) q 346 block/blk-sysfs.c blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); q 347 block/blk-sysfs.c blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE, q); q 349 block/blk-sysfs.c blk_queue_flag_set(QUEUE_FLAG_SAME_COMP, q); q 350 block/blk-sysfs.c blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); q 352 block/blk-sysfs.c blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); q 353 block/blk-sysfs.c blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q); q 359 block/blk-sysfs.c static ssize_t queue_poll_delay_show(struct request_queue *q, char *page) q 363 block/blk-sysfs.c if (q->poll_nsec == BLK_MQ_POLL_CLASSIC) q 366 block/blk-sysfs.c val = q->poll_nsec / 1000; q 371 block/blk-sysfs.c static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page, q 376 block/blk-sysfs.c if (!q->mq_ops || !q->mq_ops->poll) q 384 block/blk-sysfs.c q->poll_nsec = BLK_MQ_POLL_CLASSIC; q 386 block/blk-sysfs.c q->poll_nsec = val * 1000; q 393 block/blk-sysfs.c static ssize_t queue_poll_show(struct request_queue *q, char *page) q 395 block/blk-sysfs.c return queue_var_show(test_bit(QUEUE_FLAG_POLL, &q->queue_flags), page); q 398 block/blk-sysfs.c static ssize_t queue_poll_store(struct request_queue *q, const char *page, q 404 block/blk-sysfs.c if (!q->tag_set || q->tag_set->nr_maps <= HCTX_TYPE_POLL || q 405 block/blk-sysfs.c !q->tag_set->map[HCTX_TYPE_POLL].nr_queues) q 413 block/blk-sysfs.c blk_queue_flag_set(QUEUE_FLAG_POLL, q); q 415 block/blk-sysfs.c blk_queue_flag_clear(QUEUE_FLAG_POLL, q); q 420 block/blk-sysfs.c static ssize_t queue_io_timeout_show(struct request_queue *q, char *page) q 422 block/blk-sysfs.c return sprintf(page, "%u\n", jiffies_to_msecs(q->rq_timeout)); q 425 block/blk-sysfs.c static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, q 435 block/blk-sysfs.c blk_queue_rq_timeout(q, msecs_to_jiffies(val)); q 440 block/blk-sysfs.c static ssize_t queue_wb_lat_show(struct request_queue *q, char *page) q 442 block/blk-sysfs.c if (!wbt_rq_qos(q)) q 445 block/blk-sysfs.c return sprintf(page, "%llu\n", div_u64(wbt_get_min_lat(q), 1000)); q 448 block/blk-sysfs.c static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page, q 461 block/blk-sysfs.c rqos = wbt_rq_qos(q); q 463 block/blk-sysfs.c ret = wbt_init(q); q 469 block/blk-sysfs.c val = wbt_default_latency_nsec(q); q 473 block/blk-sysfs.c if (wbt_get_min_lat(q) == val) q 481 block/blk-sysfs.c blk_mq_freeze_queue(q); q 482 block/blk-sysfs.c blk_mq_quiesce_queue(q); q 484 block/blk-sysfs.c wbt_set_min_lat(q, val); q 486 block/blk-sysfs.c blk_mq_unquiesce_queue(q); q 487 block/blk-sysfs.c blk_mq_unfreeze_queue(q); q 492 block/blk-sysfs.c static ssize_t queue_wc_show(struct request_queue *q, char *page) q 494 block/blk-sysfs.c if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) q 500 block/blk-sysfs.c static ssize_t queue_wc_store(struct request_queue *q, const char *page, q 515 block/blk-sysfs.c blk_queue_flag_set(QUEUE_FLAG_WC, q); q 517 block/blk-sysfs.c blk_queue_flag_clear(QUEUE_FLAG_WC, q); q 522 block/blk-sysfs.c static ssize_t queue_fua_show(struct request_queue *q, char *page) q 524 block/blk-sysfs.c return sprintf(page, "%u\n", test_bit(QUEUE_FLAG_FUA, &q->queue_flags)); q 527 block/blk-sysfs.c static ssize_t queue_dax_show(struct request_queue *q, char *page) q 529 block/blk-sysfs.c return queue_var_show(blk_queue_dax(q), page); q 775 block/blk-sysfs.c struct request_queue *q = q 779 block/blk-sysfs.c (!q->mq_ops || !q->mq_ops->timeout)) q 797 block/blk-sysfs.c struct request_queue *q = q 803 block/blk-sysfs.c mutex_lock(&q->sysfs_lock); q 804 block/blk-sysfs.c if (blk_queue_dying(q)) { q 805 block/blk-sysfs.c mutex_unlock(&q->sysfs_lock); q 808 block/blk-sysfs.c res = entry->show(q, page); q 809 block/blk-sysfs.c mutex_unlock(&q->sysfs_lock); q 818 block/blk-sysfs.c struct request_queue *q; q 824 block/blk-sysfs.c q = container_of(kobj, struct request_queue, kobj); q 825 block/blk-sysfs.c mutex_lock(&q->sysfs_lock); q 826 block/blk-sysfs.c if (blk_queue_dying(q)) { q 827 block/blk-sysfs.c mutex_unlock(&q->sysfs_lock); q 830 block/blk-sysfs.c res = entry->store(q, page, length); q 831 block/blk-sysfs.c mutex_unlock(&q->sysfs_lock); q 837 block/blk-sysfs.c struct request_queue *q = container_of(rcu_head, struct request_queue, q 839 block/blk-sysfs.c kmem_cache_free(blk_requestq_cachep, q); q 843 block/blk-sysfs.c static void blk_exit_queue(struct request_queue *q) q 850 block/blk-sysfs.c if (q->elevator) { q 851 block/blk-sysfs.c ioc_clear_queue(q); q 852 block/blk-sysfs.c __elevator_exit(q, q->elevator); q 853 block/blk-sysfs.c q->elevator = NULL; q 861 block/blk-sysfs.c blkcg_exit_queue(q); q 868 block/blk-sysfs.c bdi_put(q->backing_dev_info); q 886 block/blk-sysfs.c struct request_queue *q = container_of(work, typeof(*q), release_work); q 888 block/blk-sysfs.c if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) q 889 block/blk-sysfs.c blk_stat_remove_callback(q, q->poll_cb); q 890 block/blk-sysfs.c blk_stat_free_callback(q->poll_cb); q 892 block/blk-sysfs.c blk_free_queue_stats(q->stats); q 894 block/blk-sysfs.c if (queue_is_mq(q)) q 895 block/blk-sysfs.c cancel_delayed_work_sync(&q->requeue_work); q 897 block/blk-sysfs.c blk_exit_queue(q); q 899 block/blk-sysfs.c blk_queue_free_zone_bitmaps(q); q 901 block/blk-sysfs.c if (queue_is_mq(q)) q 902 block/blk-sysfs.c blk_mq_release(q); q 904 block/blk-sysfs.c blk_trace_shutdown(q); q 906 block/blk-sysfs.c if (queue_is_mq(q)) q 907 block/blk-sysfs.c blk_mq_debugfs_unregister(q); q 909 block/blk-sysfs.c bioset_exit(&q->bio_split); q 911 block/blk-sysfs.c ida_simple_remove(&blk_queue_ida, q->id); q 912 block/blk-sysfs.c call_rcu(&q->rcu_head, blk_free_queue_rcu); q 917 block/blk-sysfs.c struct request_queue *q = q 920 block/blk-sysfs.c INIT_WORK(&q->release_work, __blk_release_queue); q 921 block/blk-sysfs.c schedule_work(&q->release_work); q 942 block/blk-sysfs.c struct request_queue *q = disk->queue; q 945 block/blk-sysfs.c if (WARN_ON(!q)) q 948 block/blk-sysfs.c WARN_ONCE(blk_queue_registered(q), q 961 block/blk-sysfs.c if (!blk_queue_init_done(q)) { q 962 block/blk-sysfs.c blk_queue_flag_set(QUEUE_FLAG_INIT_DONE, q); q 963 block/blk-sysfs.c percpu_ref_switch_to_percpu(&q->q_usage_counter); q 970 block/blk-sysfs.c mutex_lock(&q->sysfs_dir_lock); q 972 block/blk-sysfs.c ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); q 978 block/blk-sysfs.c ret = sysfs_create_group(&q->kobj, &queue_attr_group); q 981 block/blk-sysfs.c kobject_del(&q->kobj); q 986 block/blk-sysfs.c if (queue_is_mq(q)) { q 987 block/blk-sysfs.c __blk_mq_register_dev(dev, q); q 988 block/blk-sysfs.c blk_mq_debugfs_register(q); q 991 block/blk-sysfs.c mutex_lock(&q->sysfs_lock); q 992 block/blk-sysfs.c if (q->elevator) { q 993 block/blk-sysfs.c ret = elv_register_queue(q, false); q 995 block/blk-sysfs.c mutex_unlock(&q->sysfs_lock); q 996 block/blk-sysfs.c mutex_unlock(&q->sysfs_dir_lock); q 997 block/blk-sysfs.c kobject_del(&q->kobj); q 1005 block/blk-sysfs.c blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q); q 1006 block/blk-sysfs.c wbt_enable_default(q); q 1007 block/blk-sysfs.c blk_throtl_register_queue(q); q 1010 block/blk-sysfs.c kobject_uevent(&q->kobj, KOBJ_ADD); q 1012 block/blk-sysfs.c kobject_uevent(&q->elevator->kobj, KOBJ_ADD); q 1013 block/blk-sysfs.c mutex_unlock(&q->sysfs_lock); q 1017 block/blk-sysfs.c mutex_unlock(&q->sysfs_dir_lock); q 1031 block/blk-sysfs.c struct request_queue *q = disk->queue; q 1033 block/blk-sysfs.c if (WARN_ON(!q)) q 1037 block/blk-sysfs.c if (!blk_queue_registered(q)) q 1045 block/blk-sysfs.c mutex_lock(&q->sysfs_lock); q 1046 block/blk-sysfs.c blk_queue_flag_clear(QUEUE_FLAG_REGISTERED, q); q 1047 block/blk-sysfs.c mutex_unlock(&q->sysfs_lock); q 1049 block/blk-sysfs.c mutex_lock(&q->sysfs_dir_lock); q 1054 block/blk-sysfs.c if (queue_is_mq(q)) q 1055 block/blk-sysfs.c blk_mq_unregister_dev(disk_to_dev(disk), q); q 1057 block/blk-sysfs.c kobject_uevent(&q->kobj, KOBJ_REMOVE); q 1058 block/blk-sysfs.c kobject_del(&q->kobj); q 1061 block/blk-sysfs.c mutex_lock(&q->sysfs_lock); q 1062 block/blk-sysfs.c if (q->elevator) q 1063 block/blk-sysfs.c elv_unregister_queue(q); q 1064 block/blk-sysfs.c mutex_unlock(&q->sysfs_lock); q 1065 block/blk-sysfs.c mutex_unlock(&q->sysfs_dir_lock); q 482 block/blk-throttle.c struct request_queue *q, q 488 block/blk-throttle.c tg = kzalloc_node(sizeof(*tg), gfp, q->node); q 522 block/blk-throttle.c struct throtl_data *td = blkg->q->td; q 1240 block/blk-throttle.c struct request_queue *q = td->queue; q 1245 block/blk-throttle.c spin_lock_irq(&q->queue_lock); q 1268 block/blk-throttle.c spin_unlock_irq(&q->queue_lock); q 1270 block/blk-throttle.c spin_lock_irq(&q->queue_lock); q 1292 block/blk-throttle.c spin_unlock_irq(&q->queue_lock); q 1308 block/blk-throttle.c struct request_queue *q = td->queue; q 1316 block/blk-throttle.c spin_lock_irq(&q->queue_lock); q 1320 block/blk-throttle.c spin_unlock_irq(&q->queue_lock); q 1721 block/blk-throttle.c static void throtl_shutdown_wq(struct request_queue *q) q 1723 block/blk-throttle.c struct throtl_data *td = q->td; q 2117 block/blk-throttle.c bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, q 2121 block/blk-throttle.c struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg); q 2133 block/blk-throttle.c spin_lock_irq(&q->queue_lock); q 2215 block/blk-throttle.c spin_unlock_irq(&q->queue_lock); q 2248 block/blk-throttle.c struct request_queue *q = rq->q; q 2249 block/blk-throttle.c struct throtl_data *td = q->td; q 2336 block/blk-throttle.c void blk_throtl_drain(struct request_queue *q) q 2337 block/blk-throttle.c __releases(&q->queue_lock) __acquires(&q->queue_lock) q 2339 block/blk-throttle.c struct throtl_data *td = q->td; q 2360 block/blk-throttle.c spin_unlock_irq(&q->queue_lock); q 2368 block/blk-throttle.c spin_lock_irq(&q->queue_lock); q 2371 block/blk-throttle.c int blk_throtl_init(struct request_queue *q) q 2376 block/blk-throttle.c td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node); q 2396 block/blk-throttle.c q->td = td; q 2397 block/blk-throttle.c td->queue = q; q 2405 block/blk-throttle.c ret = blkcg_activate_policy(q, &blkcg_policy_throtl); q 2414 block/blk-throttle.c void blk_throtl_exit(struct request_queue *q) q 2416 block/blk-throttle.c BUG_ON(!q->td); q 2417 block/blk-throttle.c throtl_shutdown_wq(q); q 2418 block/blk-throttle.c blkcg_deactivate_policy(q, &blkcg_policy_throtl); q 2419 block/blk-throttle.c free_percpu(q->td->latency_buckets[READ]); q 2420 block/blk-throttle.c free_percpu(q->td->latency_buckets[WRITE]); q 2421 block/blk-throttle.c kfree(q->td); q 2424 block/blk-throttle.c void blk_throtl_register_queue(struct request_queue *q) q 2429 block/blk-throttle.c td = q->td; q 2432 block/blk-throttle.c if (blk_queue_nonrot(q)) { q 2448 block/blk-throttle.c td->track_bio_latency = !queue_is_mq(q); q 2450 block/blk-throttle.c blk_stat_enable_accounting(q); q 2454 block/blk-throttle.c ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page) q 2456 block/blk-throttle.c if (!q->td) q 2458 block/blk-throttle.c return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice)); q 2461 block/blk-throttle.c ssize_t blk_throtl_sample_time_store(struct request_queue *q, q 2467 block/blk-throttle.c if (!q->td) q 2474 block/blk-throttle.c q->td->throtl_slice = t; q 23 block/blk-timeout.c int blk_should_fake_timeout(struct request_queue *q) q 25 block/blk-timeout.c if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags)) q 57 block/blk-timeout.c struct request_queue *q = disk->queue; q 62 block/blk-timeout.c blk_queue_flag_set(QUEUE_FLAG_FAIL_IO, q); q 64 block/blk-timeout.c blk_queue_flag_clear(QUEUE_FLAG_FAIL_IO, q); q 89 block/blk-timeout.c kblockd_schedule_work(&req->q->timeout_work); q 114 block/blk-timeout.c struct request_queue *q = req->q; q 122 block/blk-timeout.c req->timeout = q->rq_timeout; q 136 block/blk-timeout.c if (!timer_pending(&q->timeout) || q 137 block/blk-timeout.c time_before(expiry, q->timeout.expires)) { q 138 block/blk-timeout.c unsigned long diff = q->timeout.expires - expiry; q 147 block/blk-timeout.c if (!timer_pending(&q->timeout) || (diff >= HZ / 2)) q 148 block/blk-timeout.c mod_timer(&q->timeout, expiry); q 99 block/blk-wbt.c struct bdi_writeback *wb = &rwb->rqos.q->backing_dev_info->wb; q 236 block/blk-wbt.c struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info; q 289 block/blk-wbt.c struct backing_dev_info *bdi = rwb->rqos.q->backing_dev_info; q 361 block/blk-wbt.c trace_wbt_timer(rwb->rqos.q->backing_dev_info, status, rqd->scale_step, q 421 block/blk-wbt.c void wbt_update_limits(struct request_queue *q) q 423 block/blk-wbt.c struct rq_qos *rqos = wbt_rq_qos(q); q 429 block/blk-wbt.c u64 wbt_get_min_lat(struct request_queue *q) q 431 block/blk-wbt.c struct rq_qos *rqos = wbt_rq_qos(q); q 437 block/blk-wbt.c void wbt_set_min_lat(struct request_queue *q, u64 val) q 439 block/blk-wbt.c struct rq_qos *rqos = wbt_rq_qos(q); q 634 block/blk-wbt.c void wbt_set_write_cache(struct request_queue *q, bool write_cache_on) q 636 block/blk-wbt.c struct rq_qos *rqos = wbt_rq_qos(q); q 644 block/blk-wbt.c void wbt_enable_default(struct request_queue *q) q 646 block/blk-wbt.c struct rq_qos *rqos = wbt_rq_qos(q); q 652 block/blk-wbt.c if (!blk_queue_registered(q)) q 655 block/blk-wbt.c if (queue_is_mq(q) && IS_ENABLED(CONFIG_BLK_WBT_MQ)) q 656 block/blk-wbt.c wbt_init(q); q 660 block/blk-wbt.c u64 wbt_default_latency_nsec(struct request_queue *q) q 666 block/blk-wbt.c if (blk_queue_nonrot(q)) q 687 block/blk-wbt.c RQWB(rqos)->rq_depth.queue_depth = blk_queue_depth(rqos->q); q 694 block/blk-wbt.c struct request_queue *q = rqos->q; q 696 block/blk-wbt.c blk_stat_remove_callback(q, rwb->cb); q 704 block/blk-wbt.c void wbt_disable_default(struct request_queue *q) q 706 block/blk-wbt.c struct rq_qos *rqos = wbt_rq_qos(q); q 820 block/blk-wbt.c int wbt_init(struct request_queue *q) q 840 block/blk-wbt.c rwb->rqos.q = q; q 851 block/blk-wbt.c rq_qos_add(q, &rwb->rqos); q 852 block/blk-wbt.c blk_stat_add_callback(q, rwb->cb); q 854 block/blk-wbt.c rwb->min_lat_nsec = wbt_default_latency_nsec(q); q 857 block/blk-wbt.c wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags)); q 95 block/blk-wbt.h u64 wbt_get_min_lat(struct request_queue *q); q 96 block/blk-wbt.h void wbt_set_min_lat(struct request_queue *q, u64 val); q 107 block/blk-wbt.h static inline int wbt_init(struct request_queue *q) q 111 block/blk-wbt.h static inline void wbt_update_limits(struct request_queue *q) q 114 block/blk-wbt.h static inline void wbt_disable_default(struct request_queue *q) q 117 block/blk-wbt.h static inline void wbt_enable_default(struct request_queue *q) q 120 block/blk-wbt.h static inline void wbt_set_write_cache(struct request_queue *q, bool wc) q 123 block/blk-wbt.h static inline u64 wbt_get_min_lat(struct request_queue *q) q 127 block/blk-wbt.h static inline void wbt_set_min_lat(struct request_queue *q, u64 val) q 130 block/blk-wbt.h static inline u64 wbt_default_latency_nsec(struct request_queue *q) q 23 block/blk-zoned.c static inline sector_t blk_zone_start(struct request_queue *q, q 26 block/blk-zoned.c sector_t zone_mask = blk_queue_zone_sectors(q) - 1; q 36 block/blk-zoned.c if (!rq->q->seq_zones_wlock) q 56 block/blk-zoned.c rq->q->seq_zones_wlock))) q 67 block/blk-zoned.c if (rq->q->seq_zones_wlock) q 69 block/blk-zoned.c rq->q->seq_zones_wlock)); q 73 block/blk-zoned.c static inline unsigned int __blkdev_nr_zones(struct request_queue *q, q 76 block/blk-zoned.c sector_t zone_sectors = blk_queue_zone_sectors(q); q 91 block/blk-zoned.c struct request_queue *q = bdev_get_queue(bdev); q 93 block/blk-zoned.c if (!blk_queue_is_zoned(q)) q 96 block/blk-zoned.c return __blkdev_nr_zones(q, bdev->bd_part->nr_sects); q 125 block/blk-zoned.c struct request_queue *q = disk->queue; q 137 block/blk-zoned.c sector += blk_queue_zone_sectors(q) * n; q 166 block/blk-zoned.c struct request_queue *q = bdev_get_queue(bdev); q 170 block/blk-zoned.c if (!blk_queue_is_zoned(q)) q 187 block/blk-zoned.c __blkdev_nr_zones(q, bdev->bd_part->nr_sects - sector)); q 257 block/blk-zoned.c struct request_queue *q = bdev_get_queue(bdev); q 264 block/blk-zoned.c if (!blk_queue_is_zoned(q)) q 278 block/blk-zoned.c zone_sectors = blk_queue_zone_sectors(q); q 318 block/blk-zoned.c struct request_queue *q; q 326 block/blk-zoned.c q = bdev_get_queue(bdev); q 327 block/blk-zoned.c if (!q) q 330 block/blk-zoned.c if (!blk_queue_is_zoned(q)) q 378 block/blk-zoned.c struct request_queue *q; q 384 block/blk-zoned.c q = bdev_get_queue(bdev); q 385 block/blk-zoned.c if (!q) q 388 block/blk-zoned.c if (!blk_queue_is_zoned(q)) q 436 block/blk-zoned.c void blk_queue_free_zone_bitmaps(struct request_queue *q) q 438 block/blk-zoned.c kfree(q->seq_zones_bitmap); q 439 block/blk-zoned.c q->seq_zones_bitmap = NULL; q 440 block/blk-zoned.c kfree(q->seq_zones_wlock); q 441 block/blk-zoned.c q->seq_zones_wlock = NULL; q 455 block/blk-zoned.c struct request_queue *q = disk->queue; q 456 block/blk-zoned.c unsigned int nr_zones = __blkdev_nr_zones(q, get_capacity(disk)); q 468 block/blk-zoned.c if (!queue_is_mq(q)) { q 469 block/blk-zoned.c q->nr_zones = nr_zones; q 479 block/blk-zoned.c if (!blk_queue_is_zoned(q) || !nr_zones) { q 486 block/blk-zoned.c seq_zones_wlock = blk_alloc_zone_bitmap(q->node, nr_zones); q 489 block/blk-zoned.c seq_zones_bitmap = blk_alloc_zone_bitmap(q->node, nr_zones); q 511 block/blk-zoned.c sector += nrz * blk_queue_zone_sectors(q); q 525 block/blk-zoned.c blk_mq_freeze_queue(q); q 526 block/blk-zoned.c q->nr_zones = nr_zones; q 527 block/blk-zoned.c swap(q->seq_zones_wlock, seq_zones_wlock); q 528 block/blk-zoned.c swap(q->seq_zones_bitmap, seq_zones_bitmap); q 529 block/blk-zoned.c blk_mq_unfreeze_queue(q); q 540 block/blk-zoned.c blk_mq_freeze_queue(q); q 541 block/blk-zoned.c blk_queue_free_zone_bitmaps(q); q 542 block/blk-zoned.c blk_mq_unfreeze_queue(q); q 42 block/blk.h blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx) q 44 block/blk.h return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq; q 47 block/blk.h static inline void __blk_get_queue(struct request_queue *q) q 49 block/blk.h kobject_get(&q->kobj); q 58 block/blk.h struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q, q 60 block/blk.h void blk_free_flush_queue(struct blk_flush_queue *q); q 62 block/blk.h void blk_freeze_queue(struct request_queue *q); q 64 block/blk.h static inline void blk_queue_enter_live(struct request_queue *q) q 72 block/blk.h percpu_ref_get(&q->q_usage_counter); q 75 block/blk.h static inline bool biovec_phys_mergeable(struct request_queue *q, q 78 block/blk.h unsigned long mask = queue_segment_boundary(q); q 91 block/blk.h static inline bool __bvec_gap_to_prev(struct request_queue *q, q 94 block/blk.h return (offset & queue_virt_boundary(q)) || q 95 block/blk.h ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); q 102 block/blk.h static inline bool bvec_gap_to_prev(struct request_queue *q, q 105 block/blk.h if (!queue_virt_boundary(q)) q 107 block/blk.h return __bvec_gap_to_prev(q, bprv, offset); q 139 block/blk.h return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], q 149 block/blk.h return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1], q 183 block/blk.h bool bio_attempt_discard_merge(struct request_queue *q, struct request *req, q 185 block/blk.h bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, q 199 block/blk.h void elevator_init_mq(struct request_queue *q); q 200 block/blk.h int elevator_switch_mq(struct request_queue *q, q 203 block/blk.h int elv_register_queue(struct request_queue *q, bool uevent); q 204 block/blk.h void elv_unregister_queue(struct request_queue *q); q 206 block/blk.h static inline void elevator_exit(struct request_queue *q, q 209 block/blk.h lockdep_assert_held(&q->sysfs_lock); q 211 block/blk.h blk_mq_sched_free_requests(q); q 212 block/blk.h __elevator_exit(q, e); q 223 block/blk.h static inline int blk_should_fake_timeout(struct request_queue *q) q 229 block/blk.h void __blk_queue_split(struct request_queue *q, struct bio **bio, q 235 block/blk.h struct request *attempt_back_merge(struct request_queue *q, struct request *rq); q 236 block/blk.h struct request *attempt_front_merge(struct request_queue *q, struct request *rq); q 237 block/blk.h int blk_attempt_req_merge(struct request_queue *q, struct request *rq, q 260 block/blk.h static inline void req_set_nomerge(struct request_queue *q, struct request *req) q 263 block/blk.h if (req == q->last_merge) q 264 block/blk.h q->last_merge = NULL; q 272 block/blk.h static inline unsigned int bio_allowed_max_sectors(struct request_queue *q) q 274 block/blk.h return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9; q 281 block/blk.h struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q); q 282 block/blk.h struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, q 284 block/blk.h void ioc_clear_queue(struct request_queue *q); q 312 block/blk.h extern void blk_throtl_drain(struct request_queue *q); q 313 block/blk.h extern int blk_throtl_init(struct request_queue *q); q 314 block/blk.h extern void blk_throtl_exit(struct request_queue *q); q 315 block/blk.h extern void blk_throtl_register_queue(struct request_queue *q); q 317 block/blk.h static inline void blk_throtl_drain(struct request_queue *q) { } q 318 block/blk.h static inline int blk_throtl_init(struct request_queue *q) { return 0; } q 319 block/blk.h static inline void blk_throtl_exit(struct request_queue *q) { } q 320 block/blk.h static inline void blk_throtl_register_queue(struct request_queue *q) { } q 323 block/blk.h extern ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page); q 324 block/blk.h extern ssize_t blk_throtl_sample_time_store(struct request_queue *q, q 335 block/blk.h extern void blk_queue_bounce(struct request_queue *q, struct bio **bio); q 341 block/blk.h static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio) q 347 block/blk.h extern int blk_iolatency_init(struct request_queue *q); q 349 block/blk.h static inline int blk_iolatency_init(struct request_queue *q) { return 0; } q 355 block/blk.h void blk_queue_free_zone_bitmaps(struct request_queue *q); q 357 block/blk.h static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {} q 286 block/bounce.c static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, q 301 block/bounce.c if (page_to_pfn(from.bv_page) > q->limits.bounce_pfn) q 324 block/bounce.c if (page_to_pfn(page) <= q->limits.bounce_pfn) q 327 block/bounce.c to->bv_page = mempool_alloc(pool, q->bounce_gfp); q 342 block/bounce.c trace_block_bio_bounce(q, *bio_orig); q 360 block/bounce.c void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) q 375 block/bounce.c if (!(q->bounce_gfp & GFP_DMA)) { q 376 block/bounce.c if (q->limits.bounce_pfn >= blk_max_pfn) q 387 block/bounce.c __blk_queue_bounce(q, bio_orig, pool); q 48 block/bsg-lib.c job->bidi_rq = blk_get_request(rq->q, REQ_OP_SCSI_IN, 0); q 54 block/bsg-lib.c ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL, q 211 block/bsg-lib.c buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list); q 264 block/bsg-lib.c struct request_queue *q = hctx->queue; q 265 block/bsg-lib.c struct device *dev = q->queuedata; q 268 block/bsg-lib.c container_of(q->tag_set, struct bsg_set, tag_set); q 321 block/bsg-lib.c void bsg_remove_queue(struct request_queue *q) q 323 block/bsg-lib.c if (q) { q 325 block/bsg-lib.c container_of(q->tag_set, struct bsg_set, tag_set); q 327 block/bsg-lib.c bsg_unregister_queue(q); q 328 block/bsg-lib.c blk_cleanup_queue(q); q 338 block/bsg-lib.c container_of(rq->q->tag_set, struct bsg_set, tag_set); q 367 block/bsg-lib.c struct request_queue *q; q 387 block/bsg-lib.c q = blk_mq_init_queue(set); q 388 block/bsg-lib.c if (IS_ERR(q)) { q 389 block/bsg-lib.c ret = PTR_ERR(q); q 393 block/bsg-lib.c q->queuedata = dev; q 394 block/bsg-lib.c blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); q 396 block/bsg-lib.c ret = bsg_register_queue(q, dev, name, &bsg_transport_ops); q 403 block/bsg-lib.c return q; q 405 block/bsg-lib.c blk_cleanup_queue(q); q 135 block/bsg.c static int bsg_sg_io(struct request_queue *q, fmode_t mode, void __user *uarg) q 145 block/bsg.c if (!q->bsg_dev.class_dev) q 150 block/bsg.c ret = q->bsg_dev.ops->check_proto(&hdr); q 154 block/bsg.c rq = blk_get_request(q, hdr.dout_xfer_len ? q 159 block/bsg.c ret = q->bsg_dev.ops->fill_hdr(rq, &hdr, mode); q 165 block/bsg.c rq->timeout = q->sg_timeout; q 172 block/bsg.c ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.dout_xferp), q 175 block/bsg.c ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr.din_xferp), q 184 block/bsg.c blk_execute_rq(q, NULL, rq, !(hdr.flags & BSG_FLAG_Q_AT_TAIL)); q 185 block/bsg.c ret = rq->q->bsg_dev.ops->complete_rq(rq, &hdr); q 189 block/bsg.c rq->q->bsg_dev.ops->free_rq(rq); q 212 block/bsg.c struct request_queue *q = bd->queue; q 230 block/bsg.c blk_put_queue(q); q 264 block/bsg.c static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) q 271 block/bsg.c if (bd->queue == q) { q 389 block/bsg.c void bsg_unregister_queue(struct request_queue *q) q 391 block/bsg.c struct bsg_class_device *bcd = &q->bsg_dev; q 398 block/bsg.c if (q->kobj.sd) q 399 block/bsg.c sysfs_remove_link(&q->kobj, "bsg"); q 406 block/bsg.c int bsg_register_queue(struct request_queue *q, struct device *parent, q 417 block/bsg.c if (!queue_is_mq(q)) q 420 block/bsg.c bcd = &q->bsg_dev; q 435 block/bsg.c bcd->queue = q; q 445 block/bsg.c if (q->kobj.sd) { q 446 block/bsg.c ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg"); q 463 block/bsg.c int bsg_scsi_register_queue(struct request_queue *q, struct device *parent) q 465 block/bsg.c if (!blk_queue_scsi_passthrough(q)) { q 470 block/bsg.c return bsg_register_queue(q, parent, dev_name(parent), &bsg_scsi_ops); q 62 block/elevator.c struct request_queue *q = rq->q; q 63 block/elevator.c struct elevator_queue *e = q->elevator; q 66 block/elevator.c return e->type->ops.allow_merge(q, rq, bio); q 140 block/elevator.c static struct elevator_type *elevator_get(struct request_queue *q, q 147 block/elevator.c e = elevator_find(name, q->required_elevator_features); q 152 block/elevator.c e = elevator_find(name, q->required_elevator_features); q 164 block/elevator.c struct elevator_queue *elevator_alloc(struct request_queue *q, q 169 block/elevator.c eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node); q 191 block/elevator.c void __elevator_exit(struct request_queue *q, struct elevator_queue *e) q 195 block/elevator.c blk_mq_exit_sched(q, e); q 207 block/elevator.c void elv_rqhash_del(struct request_queue *q, struct request *rq) q 214 block/elevator.c void elv_rqhash_add(struct request_queue *q, struct request *rq) q 216 block/elevator.c struct elevator_queue *e = q->elevator; q 224 block/elevator.c void elv_rqhash_reposition(struct request_queue *q, struct request *rq) q 227 block/elevator.c elv_rqhash_add(q, rq); q 230 block/elevator.c struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) q 232 block/elevator.c struct elevator_queue *e = q->elevator; q 304 block/elevator.c enum elv_merge elv_merge(struct request_queue *q, struct request **req, q 307 block/elevator.c struct elevator_queue *e = q->elevator; q 316 block/elevator.c if (blk_queue_nomerges(q) || !bio_mergeable(bio)) q 322 block/elevator.c if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) { q 323 block/elevator.c enum elv_merge ret = blk_try_merge(q->last_merge, bio); q 326 block/elevator.c *req = q->last_merge; q 331 block/elevator.c if (blk_queue_noxmerges(q)) q 337 block/elevator.c __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector); q 344 block/elevator.c return e->type->ops.request_merge(q, req, bio); q 356 block/elevator.c bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq) q 361 block/elevator.c if (blk_queue_nomerges(q)) q 367 block/elevator.c if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq)) q 370 block/elevator.c if (blk_queue_noxmerges(q)) q 378 block/elevator.c __rq = elv_rqhash_find(q, blk_rq_pos(rq)); q 379 block/elevator.c if (!__rq || !blk_attempt_req_merge(q, __rq, rq)) q 390 block/elevator.c void elv_merged_request(struct request_queue *q, struct request *rq, q 393 block/elevator.c struct elevator_queue *e = q->elevator; q 396 block/elevator.c e->type->ops.request_merged(q, rq, type); q 399 block/elevator.c elv_rqhash_reposition(q, rq); q 401 block/elevator.c q->last_merge = rq; q 404 block/elevator.c void elv_merge_requests(struct request_queue *q, struct request *rq, q 407 block/elevator.c struct elevator_queue *e = q->elevator; q 410 block/elevator.c e->type->ops.requests_merged(q, rq, next); q 412 block/elevator.c elv_rqhash_reposition(q, rq); q 413 block/elevator.c q->last_merge = rq; q 416 block/elevator.c struct request *elv_latter_request(struct request_queue *q, struct request *rq) q 418 block/elevator.c struct elevator_queue *e = q->elevator; q 421 block/elevator.c return e->type->ops.next_request(q, rq); q 426 block/elevator.c struct request *elv_former_request(struct request_queue *q, struct request *rq) q 428 block/elevator.c struct elevator_queue *e = q->elevator; q 431 block/elevator.c return e->type->ops.former_request(q, rq); q 488 block/elevator.c int elv_register_queue(struct request_queue *q, bool uevent) q 490 block/elevator.c struct elevator_queue *e = q->elevator; q 493 block/elevator.c error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched"); q 516 block/elevator.c void elv_unregister_queue(struct request_queue *q) q 518 block/elevator.c if (q) { q 519 block/elevator.c struct elevator_queue *e = q->elevator; q 526 block/elevator.c wbt_enable_default(q); q 581 block/elevator.c int elevator_switch_mq(struct request_queue *q, q 586 block/elevator.c lockdep_assert_held(&q->sysfs_lock); q 588 block/elevator.c if (q->elevator) { q 589 block/elevator.c if (q->elevator->registered) q 590 block/elevator.c elv_unregister_queue(q); q 592 block/elevator.c ioc_clear_queue(q); q 593 block/elevator.c elevator_exit(q, q->elevator); q 596 block/elevator.c ret = blk_mq_init_sched(q, new_e); q 601 block/elevator.c ret = elv_register_queue(q, true); q 603 block/elevator.c elevator_exit(q, q->elevator); q 609 block/elevator.c blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name); q 611 block/elevator.c blk_add_trace_msg(q, "elv switch: none"); q 617 block/elevator.c static inline bool elv_support_iosched(struct request_queue *q) q 619 block/elevator.c if (!q->mq_ops || q 620 block/elevator.c (q->tag_set && (q->tag_set->flags & BLK_MQ_F_NO_SCHED))) q 629 block/elevator.c static struct elevator_type *elevator_get_default(struct request_queue *q) q 631 block/elevator.c if (q->nr_hw_queues != 1) q 634 block/elevator.c return elevator_get(q, "mq-deadline", false); q 641 block/elevator.c static struct elevator_type *elevator_get_by_features(struct request_queue *q) q 649 block/elevator.c q->required_elevator_features)) { q 668 block/elevator.c void elevator_init_mq(struct request_queue *q) q 673 block/elevator.c if (!elv_support_iosched(q)) q 676 block/elevator.c WARN_ON_ONCE(test_bit(QUEUE_FLAG_REGISTERED, &q->queue_flags)); q 678 block/elevator.c if (unlikely(q->elevator)) q 681 block/elevator.c if (!q->required_elevator_features) q 682 block/elevator.c e = elevator_get_default(q); q 684 block/elevator.c e = elevator_get_by_features(q); q 688 block/elevator.c blk_mq_freeze_queue(q); q 689 block/elevator.c blk_mq_quiesce_queue(q); q 691 block/elevator.c err = blk_mq_init_sched(q, e); q 693 block/elevator.c blk_mq_unquiesce_queue(q); q 694 block/elevator.c blk_mq_unfreeze_queue(q); q 710 block/elevator.c static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) q 714 block/elevator.c lockdep_assert_held(&q->sysfs_lock); q 716 block/elevator.c blk_mq_freeze_queue(q); q 717 block/elevator.c blk_mq_quiesce_queue(q); q 719 block/elevator.c err = elevator_switch_mq(q, new_e); q 721 block/elevator.c blk_mq_unquiesce_queue(q); q 722 block/elevator.c blk_mq_unfreeze_queue(q); q 730 block/elevator.c static int __elevator_change(struct request_queue *q, const char *name) q 736 block/elevator.c if (!blk_queue_registered(q)) q 743 block/elevator.c if (!q->elevator) q 745 block/elevator.c return elevator_switch(q, NULL); q 749 block/elevator.c e = elevator_get(q, strstrip(elevator_name), true); q 753 block/elevator.c if (q->elevator && q 754 block/elevator.c elevator_match(q->elevator->type, elevator_name, 0)) { q 759 block/elevator.c return elevator_switch(q, e); q 762 block/elevator.c ssize_t elv_iosched_store(struct request_queue *q, const char *name, q 767 block/elevator.c if (!queue_is_mq(q) || !elv_support_iosched(q)) q 770 block/elevator.c ret = __elevator_change(q, name); q 777 block/elevator.c ssize_t elv_iosched_show(struct request_queue *q, char *name) q 779 block/elevator.c struct elevator_queue *e = q->elevator; q 784 block/elevator.c if (!queue_is_mq(q)) q 787 block/elevator.c if (!q->elevator) q 798 block/elevator.c if (elv_support_iosched(q) && q 800 block/elevator.c q->required_elevator_features)) q 805 block/elevator.c if (q->elevator) q 812 block/elevator.c struct request *elv_rb_former_request(struct request_queue *q, q 824 block/elevator.c struct request *elv_rb_latter_request(struct request_queue *q, q 49 block/genhd.c void part_inc_in_flight(struct request_queue *q, struct hd_struct *part, int rw) q 51 block/genhd.c if (queue_is_mq(q)) q 59 block/genhd.c void part_dec_in_flight(struct request_queue *q, struct hd_struct *part, int rw) q 61 block/genhd.c if (queue_is_mq(q)) q 69 block/genhd.c unsigned int part_in_flight(struct request_queue *q, struct hd_struct *part) q 74 block/genhd.c if (queue_is_mq(q)) { q 75 block/genhd.c return blk_mq_in_flight(q, part); q 89 block/genhd.c void part_in_flight_rw(struct request_queue *q, struct hd_struct *part, q 94 block/genhd.c if (queue_is_mq(q)) { q 95 block/genhd.c blk_mq_in_flight_rw(q, part, inflight); q 206 block/ioctl.c struct request_queue *q = bdev_get_queue(bdev); q 213 block/ioctl.c if (!blk_queue_discard(q)) q 151 block/kyber-iosched.c struct request_queue *q; q 258 block/kyber-iosched.c trace_kyber_latency(kqd->q, kyber_domain_names[sched_domain], q 271 block/kyber-iosched.c trace_kyber_adjust(kqd->q, kyber_domain_names[sched_domain], q 356 block/kyber-iosched.c static unsigned int kyber_sched_tags_shift(struct request_queue *q) q 362 block/kyber-iosched.c return q->queue_hw_ctx[0]->sched_tags->bitmap_tags.sb.shift; q 365 block/kyber-iosched.c static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q) q 372 block/kyber-iosched.c kqd = kzalloc_node(sizeof(*kqd), GFP_KERNEL, q->node); q 376 block/kyber-iosched.c kqd->q = q; q 390 block/kyber-iosched.c GFP_KERNEL, q->node); q 403 block/kyber-iosched.c shift = kyber_sched_tags_shift(q); q 416 block/kyber-iosched.c static int kyber_init_sched(struct request_queue *q, struct elevator_type *e) q 421 block/kyber-iosched.c eq = elevator_alloc(q, e); q 425 block/kyber-iosched.c kqd = kyber_queue_data_alloc(q); q 431 block/kyber-iosched.c blk_stat_enable_accounting(q); q 434 block/kyber-iosched.c q->elevator = eq; q 559 block/kyber-iosched.c struct kyber_queue_data *kqd = data->q->elevator->elevator_data; q 612 block/kyber-iosched.c struct kyber_queue_data *kqd = rq->q->elevator->elevator_data; q 637 block/kyber-iosched.c struct kyber_queue_data *kqd = rq->q->elevator->elevator_data; q 777 block/kyber-iosched.c trace_kyber_throttled(kqd->q, q 790 block/kyber-iosched.c trace_kyber_throttled(kqd->q, q 899 block/kyber-iosched.c struct request_queue *q = data; \ q 900 block/kyber-iosched.c struct kyber_queue_data *kqd = q->elevator->elevator_data; \ q 958 block/kyber-iosched.c struct request_queue *q = data; q 959 block/kyber-iosched.c struct kyber_queue_data *kqd = q->elevator->elevator_data; q 109 block/mq-deadline.c static void deadline_remove_request(struct request_queue *q, struct request *rq) q 111 block/mq-deadline.c struct deadline_data *dd = q->elevator->elevator_data; q 121 block/mq-deadline.c elv_rqhash_del(q, rq); q 122 block/mq-deadline.c if (q->last_merge == rq) q 123 block/mq-deadline.c q->last_merge = NULL; q 126 block/mq-deadline.c static void dd_request_merged(struct request_queue *q, struct request *req, q 129 block/mq-deadline.c struct deadline_data *dd = q->elevator->elevator_data; q 140 block/mq-deadline.c static void dd_merged_requests(struct request_queue *q, struct request *req, q 158 block/mq-deadline.c deadline_remove_request(q, next); q 176 block/mq-deadline.c deadline_remove_request(rq->q, rq); q 213 block/mq-deadline.c if (data_dir == READ || !blk_queue_is_zoned(rq->q)) q 249 block/mq-deadline.c if (data_dir == READ || !blk_queue_is_zoned(rq->q)) q 406 block/mq-deadline.c static int dd_init_queue(struct request_queue *q, struct elevator_type *e) q 411 block/mq-deadline.c eq = elevator_alloc(q, e); q 415 block/mq-deadline.c dd = kzalloc_node(sizeof(*dd), GFP_KERNEL, q->node); q 435 block/mq-deadline.c q->elevator = eq; q 439 block/mq-deadline.c static int dd_request_merge(struct request_queue *q, struct request **rq, q 442 block/mq-deadline.c struct deadline_data *dd = q->elevator->elevator_data; q 465 block/mq-deadline.c struct request_queue *q = hctx->queue; q 466 block/mq-deadline.c struct deadline_data *dd = q->elevator->elevator_data; q 471 block/mq-deadline.c ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free); q 486 block/mq-deadline.c struct request_queue *q = hctx->queue; q 487 block/mq-deadline.c struct deadline_data *dd = q->elevator->elevator_data; q 496 block/mq-deadline.c if (blk_mq_sched_try_insert_merge(q, rq)) q 510 block/mq-deadline.c elv_rqhash_add(q, rq); q 511 block/mq-deadline.c if (!q->last_merge) q 512 block/mq-deadline.c q->last_merge = rq; q 526 block/mq-deadline.c struct request_queue *q = hctx->queue; q 527 block/mq-deadline.c struct deadline_data *dd = q->elevator->elevator_data; q 564 block/mq-deadline.c struct request_queue *q = rq->q; q 566 block/mq-deadline.c if (blk_queue_is_zoned(q)) { q 567 block/mq-deadline.c struct deadline_data *dd = q->elevator->elevator_data; q 661 block/mq-deadline.c struct request_queue *q = m->private; \ q 662 block/mq-deadline.c struct deadline_data *dd = q->elevator->elevator_data; \ q 671 block/mq-deadline.c struct request_queue *q = m->private; \ q 672 block/mq-deadline.c struct deadline_data *dd = q->elevator->elevator_data; \ q 680 block/mq-deadline.c struct request_queue *q = m->private; \ q 681 block/mq-deadline.c struct deadline_data *dd = q->elevator->elevator_data; \ q 696 block/mq-deadline.c struct request_queue *q = data; \ q 697 block/mq-deadline.c struct deadline_data *dd = q->elevator->elevator_data; \ q 710 block/mq-deadline.c struct request_queue *q = data; q 711 block/mq-deadline.c struct deadline_data *dd = q->elevator->elevator_data; q 719 block/mq-deadline.c struct request_queue *q = data; q 720 block/mq-deadline.c struct deadline_data *dd = q->elevator->elevator_data; q 729 block/mq-deadline.c struct request_queue *q = m->private; q 730 block/mq-deadline.c struct deadline_data *dd = q->elevator->elevator_data; q 738 block/mq-deadline.c struct request_queue *q = m->private; q 739 block/mq-deadline.c struct deadline_data *dd = q->elevator->elevator_data; q 747 block/mq-deadline.c struct request_queue *q = m->private; q 748 block/mq-deadline.c struct deadline_data *dd = q->elevator->elevator_data; q 122 block/partition-generic.c struct request_queue *q = part_to_disk(p)->queue; q 125 block/partition-generic.c inflight = part_in_flight(q, p); q 153 block/partition-generic.c struct request_queue *q = part_to_disk(p)->queue; q 156 block/partition-generic.c part_in_flight_rw(q, p, inflight); q 46 block/scsi_ioctl.c static int scsi_get_idlun(struct request_queue *q, int __user *p) q 51 block/scsi_ioctl.c static int scsi_get_bus(struct request_queue *q, int __user *p) q 56 block/scsi_ioctl.c static int sg_get_timeout(struct request_queue *q) q 58 block/scsi_ioctl.c return jiffies_to_clock_t(q->sg_timeout); q 61 block/scsi_ioctl.c static int sg_set_timeout(struct request_queue *q, int __user *p) q 66 block/scsi_ioctl.c q->sg_timeout = clock_t_to_jiffies(timeout); q 71 block/scsi_ioctl.c static int max_sectors_bytes(struct request_queue *q) q 73 block/scsi_ioctl.c unsigned int max_sectors = queue_max_sectors(q); q 80 block/scsi_ioctl.c static int sg_get_reserved_size(struct request_queue *q, int __user *p) q 82 block/scsi_ioctl.c int val = min_t(int, q->sg_reserved_size, max_sectors_bytes(q)); q 87 block/scsi_ioctl.c static int sg_set_reserved_size(struct request_queue *q, int __user *p) q 97 block/scsi_ioctl.c q->sg_reserved_size = min(size, max_sectors_bytes(q)); q 105 block/scsi_ioctl.c static int sg_emulated_host(struct request_queue *q, int __user *p) q 216 block/scsi_ioctl.c static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, q 233 block/scsi_ioctl.c rq->timeout = q->sg_timeout; q 278 block/scsi_ioctl.c static int sg_io(struct request_queue *q, struct gendisk *bd_disk, q 292 block/scsi_ioctl.c if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9)) q 310 block/scsi_ioctl.c rq = blk_get_request(q, writing ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0); q 321 block/scsi_ioctl.c ret = blk_fill_sghdr_rq(q, rq, hdr, mode); q 339 block/scsi_ioctl.c ret = blk_rq_map_user_iov(q, rq, NULL, &i, GFP_KERNEL); q 342 block/scsi_ioctl.c ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len, q 357 block/scsi_ioctl.c blk_execute_rq(q, bd_disk, rq, at_head); q 404 block/scsi_ioctl.c int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode, q 431 block/scsi_ioctl.c buffer = kzalloc(bytes, q->bounce_gfp | GFP_USER| __GFP_NOWARN); q 437 block/scsi_ioctl.c rq = blk_get_request(q, in_len ? REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0); q 488 block/scsi_ioctl.c if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, GFP_NOIO)) { q 493 block/scsi_ioctl.c blk_execute_rq(q, disk, rq, 0); q 519 block/scsi_ioctl.c static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk, q 525 block/scsi_ioctl.c rq = blk_get_request(q, REQ_OP_SCSI_OUT, 0); q 532 block/scsi_ioctl.c blk_execute_rq(q, bd_disk, rq, 0); q 539 block/scsi_ioctl.c static inline int blk_send_start_stop(struct request_queue *q, q 542 block/scsi_ioctl.c return __blk_send_generic(q, bd_disk, GPCMD_START_STOP_UNIT, data); q 545 block/scsi_ioctl.c int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mode, q 550 block/scsi_ioctl.c if (!q) q 561 block/scsi_ioctl.c err = scsi_get_idlun(q, arg); q 564 block/scsi_ioctl.c err = scsi_get_bus(q, arg); q 567 block/scsi_ioctl.c err = sg_set_timeout(q, arg); q 570 block/scsi_ioctl.c err = sg_get_timeout(q); q 573 block/scsi_ioctl.c err = sg_get_reserved_size(q, arg); q 576 block/scsi_ioctl.c err = sg_set_reserved_size(q, arg); q 579 block/scsi_ioctl.c err = sg_emulated_host(q, arg); q 587 block/scsi_ioctl.c err = sg_io(q, bd_disk, &hdr, mode); q 635 block/scsi_ioctl.c err = sg_io(q, bd_disk, &hdr, mode); q 659 block/scsi_ioctl.c err = sg_scsi_ioctl(q, bd_disk, mode, arg); q 662 block/scsi_ioctl.c err = blk_send_start_stop(q, bd_disk, 0x03); q 665 block/scsi_ioctl.c err = blk_send_start_stop(q, bd_disk, 0x02); q 134 block/t10-pi.c const int tuple_sz = rq->q->integrity.tuple_size; q 185 block/t10-pi.c unsigned intervals = nr_bytes >> rq->q->integrity.interval_exp; q 186 block/t10-pi.c const int tuple_sz = rq->q->integrity.tuple_size; q 192 crypto/algapi.c struct crypto_alg *q; q 206 crypto/algapi.c list_for_each_entry(q, &crypto_alg_list, cra_list) { q 207 crypto/algapi.c if (q == alg) q 210 crypto/algapi.c if (crypto_is_moribund(q)) q 213 crypto/algapi.c if (crypto_is_larval(q)) { q 214 crypto/algapi.c if (!strcmp(alg->cra_driver_name, q->cra_driver_name)) q 219 crypto/algapi.c if (!strcmp(q->cra_driver_name, alg->cra_name) || q 220 crypto/algapi.c !strcmp(q->cra_name, alg->cra_driver_name)) q 258 crypto/algapi.c struct crypto_alg *q; q 263 crypto/algapi.c list_for_each_entry(q, &crypto_alg_list, cra_list) { q 264 crypto/algapi.c if (crypto_is_moribund(q) || !crypto_is_larval(q)) q 267 crypto/algapi.c test = (struct crypto_larval *)q; q 269 crypto/algapi.c if (!strcmp(q->cra_driver_name, name)) q 277 crypto/algapi.c q->cra_flags |= CRYPTO_ALG_DEAD; q 286 crypto/algapi.c list_for_each_entry(q, &crypto_alg_list, cra_list) { q 287 crypto/algapi.c if (crypto_is_moribund(q) || !crypto_is_larval(q)) q 290 crypto/algapi.c if (strcmp(alg->cra_name, q->cra_name)) q 293 crypto/algapi.c if (q->cra_priority > alg->cra_priority) { q 299 crypto/algapi.c list_for_each_entry(q, &crypto_alg_list, cra_list) { q 300 crypto/algapi.c if (q == alg) q 303 crypto/algapi.c if (crypto_is_moribund(q)) q 306 crypto/algapi.c if (crypto_is_larval(q)) { q 307 crypto/algapi.c struct crypto_larval *larval = (void *)q; q 314 crypto/algapi.c if (strcmp(alg->cra_name, q->cra_name) && q 315 crypto/algapi.c strcmp(alg->cra_driver_name, q->cra_name)) q 320 crypto/algapi.c if ((q->cra_flags ^ alg->cra_flags) & larval->mask) q 331 crypto/algapi.c if (strcmp(alg->cra_name, q->cra_name)) q 334 crypto/algapi.c if (strcmp(alg->cra_driver_name, q->cra_driver_name) && q 335 crypto/algapi.c q->cra_priority > alg->cra_priority) q 338 crypto/algapi.c crypto_remove_spawns(q, &list, alg); q 476 crypto/algapi.c struct crypto_template *q; q 483 crypto/algapi.c list_for_each_entry(q, &crypto_template_list, list) { q 484 crypto/algapi.c if (q == tmpl) q 554 crypto/algapi.c struct crypto_template *q, *tmpl = NULL; q 557 crypto/algapi.c list_for_each_entry(q, &crypto_template_list, list) { q 558 crypto/algapi.c if (strcmp(q->name, name)) q 560 crypto/algapi.c if (unlikely(!crypto_tmpl_get(q))) q 563 crypto/algapi.c tmpl = q; q 58 crypto/api.c struct crypto_alg *q, *alg = NULL; q 61 crypto/api.c list_for_each_entry(q, &crypto_alg_list, cra_list) { q 64 crypto/api.c if (crypto_is_moribund(q)) q 67 crypto/api.c if ((q->cra_flags ^ type) & mask) q 70 crypto/api.c if (crypto_is_larval(q) && q 71 crypto/api.c !crypto_is_test_larval((struct crypto_larval *)q) && q 72 crypto/api.c ((struct crypto_larval *)q)->mask != mask) q 75 crypto/api.c exact = !strcmp(q->cra_driver_name, name); q 76 crypto/api.c fuzzy = !strcmp(q->cra_name, name); q 77 crypto/api.c if (!exact && !(fuzzy && q->cra_priority > best)) q 80 crypto/api.c if (unlikely(!crypto_mod_get(q))) q 83 crypto/api.c best = q->cra_priority; q 86 crypto/api.c alg = q; q 160 crypto/asymmetric_keys/x509_public_key.c const char *q; q 199 crypto/asymmetric_keys/x509_public_key.c q = cert->raw_skid; q 202 crypto/asymmetric_keys/x509_public_key.c q = cert->raw_serial; q 213 crypto/asymmetric_keys/x509_public_key.c p = bin2hex(p, q, srlen); q 363 crypto/async_tx/async_pq.c void *p, *q, *s; q 397 crypto/async_tx/async_pq.c q = page_address(q_src) + offset; q 399 crypto/async_tx/async_pq.c *pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q; q 150 crypto/async_tx/async_raid6_recov.c struct page *p, *q, *a, *b; q 159 crypto/async_tx/async_raid6_recov.c q = blocks[disks-1]; q 167 crypto/async_tx/async_raid6_recov.c srcs[1] = q; q 189 crypto/async_tx/async_raid6_recov.c struct page *p, *q, *g, *dp, *dq; q 211 crypto/async_tx/async_raid6_recov.c q = blocks[disks-1]; q 235 crypto/async_tx/async_raid6_recov.c srcs[1] = q; q 263 crypto/async_tx/async_raid6_recov.c struct page *p, *q, *dp, *dq; q 272 crypto/async_tx/async_raid6_recov.c q = blocks[disks-1]; q 292 crypto/async_tx/async_raid6_recov.c blocks[disks-1] = q; q 303 crypto/async_tx/async_raid6_recov.c srcs[1] = q; q 413 crypto/async_tx/async_raid6_recov.c struct page *p, *q, *dq; q 461 crypto/async_tx/async_raid6_recov.c q = blocks[disks-1]; q 491 crypto/async_tx/async_raid6_recov.c blocks[disks-1] = q; q 497 crypto/async_tx/async_raid6_recov.c srcs[1] = q; q 38 crypto/crypto_user_base.c struct crypto_alg *q, *alg = NULL; q 42 crypto/crypto_user_base.c list_for_each_entry(q, &crypto_alg_list, cra_list) { q 45 crypto/crypto_user_base.c if (crypto_is_larval(q)) q 48 crypto/crypto_user_base.c if ((q->cra_flags ^ p->cru_type) & p->cru_mask) q 52 crypto/crypto_user_base.c match = !strcmp(q->cra_driver_name, q 55 crypto/crypto_user_base.c match = !strcmp(q->cra_name, p->cru_name); q 60 crypto/crypto_user_base.c if (unlikely(!crypto_mod_get(q))) q 63 crypto/crypto_user_base.c alg = q; q 16 crypto/dh.c MPI q; /* Value is optional. */ q 24 crypto/dh.c mpi_free(ctx->q); q 61 crypto/dh.c if (params->q && params->q_size) { q 62 crypto/dh.c ctx->q = mpi_read_raw_data(params->q, params->q_size); q 63 crypto/dh.c if (!ctx->q) q 124 crypto/dh.c if (ctx->q) { q 131 crypto/dh.c ret = mpi_powm(val, y, ctx->q, ctx->p); q 60 crypto/dh_helper.c ptr = dh_pack_data(ptr, end, params->q, params->q_size); q 100 crypto/dh_helper.c params->q = (void *)(ptr + params->key_size + params->p_size); q 114 crypto/dh_helper.c params->q = NULL; q 554 crypto/ecc.c u64 q[ECC_MAX_DIGITS]; q 565 crypto/ecc.c vli_set(q, product + ndigits, ndigits); q 570 crypto/ecc.c for (i = 1; carry || !vli_is_zero(q, ndigits); i++) { q 573 crypto/ecc.c vli_umult(qc, q, c2, ndigits); q 576 crypto/ecc.c vli_set(q, qc + ndigits, ndigits); q 652 crypto/ecc.c u64 q[ECC_MAX_DIGITS * 2]; q 656 crypto/ecc.c vli_mult(q, product + ndigits, mu, ndigits); q 658 crypto/ecc.c vli_add(q + ndigits, q + ndigits, product + ndigits, ndigits); q 659 crypto/ecc.c vli_mult(r, mod, q + ndigits, ndigits); q 1207 crypto/ecc.c const struct ecc_point *p, const struct ecc_point *q, q 1215 crypto/ecc.c vli_set(result->x, q->x, ndigits); q 1216 crypto/ecc.c vli_set(result->y, q->y, ndigits); q 1230 crypto/ecc.c const u64 *u2, const struct ecc_point *q, q 1245 crypto/ecc.c ecc_point_add(&sum, p, q, curve); q 1248 crypto/ecc.c points[2] = q; q 243 crypto/ecc.h const u64 *y, const struct ecc_point *q, q 404 crypto/essiv.c const char *p, *q; q 413 crypto/essiv.c q = strchr(p, ')'); q 414 crypto/essiv.c if (!q) q 417 crypto/essiv.c len = q - p; q 56 crypto/gf128mul.c #define gf128mul_dat(q) { \ q 57 crypto/gf128mul.c q(0x00), q(0x01), q(0x02), q(0x03), q(0x04), q(0x05), q(0x06), q(0x07),\ q 58 crypto/gf128mul.c q(0x08), q(0x09), q(0x0a), q(0x0b), q(0x0c), q(0x0d), q(0x0e), q(0x0f),\ q 59 crypto/gf128mul.c q(0x10), q(0x11), q(0x12), q(0x13), q(0x14), q(0x15), q(0x16), q(0x17),\ q 60 crypto/gf128mul.c q(0x18), q(0x19), q(0x1a), q(0x1b), q(0x1c), q(0x1d), q(0x1e), q(0x1f),\ q 61 crypto/gf128mul.c q(0x20), q(0x21), q(0x22), q(0x23), q(0x24), q(0x25), q(0x26), q(0x27),\ q 62 crypto/gf128mul.c q(0x28), q(0x29), q(0x2a), q(0x2b), q(0x2c), q(0x2d), q(0x2e), q(0x2f),\ q 63 crypto/gf128mul.c q(0x30), q(0x31), q(0x32), q(0x33), q(0x34), q(0x35), q(0x36), q(0x37),\ q 64 crypto/gf128mul.c q(0x38), q(0x39), q(0x3a), q(0x3b), q(0x3c), q(0x3d), q(0x3e), q(0x3f),\ q 65 crypto/gf128mul.c q(0x40), q(0x41), q(0x42), q(0x43), q(0x44), q(0x45), q(0x46), q(0x47),\ q 66 crypto/gf128mul.c q(0x48), q(0x49), q(0x4a), q(0x4b), q(0x4c), q(0x4d), q(0x4e), q(0x4f),\ q 67 crypto/gf128mul.c q(0x50), q(0x51), q(0x52), q(0x53), q(0x54), q(0x55), q(0x56), q(0x57),\ q 68 crypto/gf128mul.c q(0x58), q(0x59), q(0x5a), q(0x5b), q(0x5c), q(0x5d), q(0x5e), q(0x5f),\ q 69 crypto/gf128mul.c q(0x60), q(0x61), q(0x62), q(0x63), q(0x64), q(0x65), q(0x66), q(0x67),\ q 70 crypto/gf128mul.c q(0x68), q(0x69), q(0x6a), q(0x6b), q(0x6c), q(0x6d), q(0x6e), q(0x6f),\ q 71 crypto/gf128mul.c q(0x70), q(0x71), q(0x72), q(0x73), q(0x74), q(0x75), q(0x76), q(0x77),\ q 72 crypto/gf128mul.c q(0x78), q(0x79), q(0x7a), q(0x7b), q(0x7c), q(0x7d), q(0x7e), q(0x7f),\ q 73 crypto/gf128mul.c q(0x80), q(0x81), q(0x82), q(0x83), q(0x84), q(0x85), q(0x86), q(0x87),\ q 74 crypto/gf128mul.c q(0x88), q(0x89), q(0x8a), q(0x8b), q(0x8c), q(0x8d), q(0x8e), q(0x8f),\ q 75 crypto/gf128mul.c q(0x90), q(0x91), q(0x92), q(0x93), q(0x94), q(0x95), q(0x96), q(0x97),\ q 76 crypto/gf128mul.c q(0x98), q(0x99), q(0x9a), q(0x9b), q(0x9c), q(0x9d), q(0x9e), q(0x9f),\ q 77 crypto/gf128mul.c q(0xa0), q(0xa1), q(0xa2), q(0xa3), q(0xa4), q(0xa5), q(0xa6), q(0xa7),\ q 78 crypto/gf128mul.c q(0xa8), q(0xa9), q(0xaa), q(0xab), q(0xac), q(0xad), q(0xae), q(0xaf),\ q 79 crypto/gf128mul.c q(0xb0), q(0xb1), q(0xb2), q(0xb3), q(0xb4), q(0xb5), q(0xb6), q(0xb7),\ q 80 crypto/gf128mul.c q(0xb8), q(0xb9), q(0xba), q(0xbb), q(0xbc), q(0xbd), q(0xbe), q(0xbf),\ q 81 crypto/gf128mul.c q(0xc0), q(0xc1), q(0xc2), q(0xc3), q(0xc4), q(0xc5), q(0xc6), q(0xc7),\ q 82 crypto/gf128mul.c q(0xc8), q(0xc9), q(0xca), q(0xcb), q(0xcc), q(0xcd), q(0xce), q(0xcf),\ q 83 crypto/gf128mul.c q(0xd0), q(0xd1), q(0xd2), q(0xd3), q(0xd4), q(0xd5), q(0xd6), q(0xd7),\ q 84 crypto/gf128mul.c q(0xd8), q(0xd9), q(0xda), q(0xdb), q(0xdc), q(0xdd), q(0xde), q(0xdf),\ q 85 crypto/gf128mul.c q(0xe0), q(0xe1), q(0xe2), q(0xe3), q(0xe4), q(0xe5), q(0xe6), q(0xe7),\ q 86 crypto/gf128mul.c q(0xe8), q(0xe9), q(0xea), q(0xeb), q(0xec), q(0xed), q(0xee), q(0xef),\ q 87 crypto/gf128mul.c q(0xf0), q(0xf1), q(0xf2), q(0xf3), q(0xf4), q(0xf5), q(0xf6), q(0xf7),\ q 88 crypto/gf128mul.c q(0xf8), q(0xf9), q(0xfa), q(0xfb), q(0xfc), q(0xfd), q(0xfe), q(0xff) \ q 100 crypto/rsa_helper.c key->q = value; q 2356 crypto/testmgr.c char *q; q 2419 crypto/testmgr.c q = data; q 2420 crypto/testmgr.c if (memcmp(q, result, template[i].len)) { q 2423 crypto/testmgr.c hexdump(q, template[i].len); q 286 crypto/vmac.c u64 p, q, t; q 305 crypto/vmac.c q = MUL32(a1, k3); q 306 crypto/vmac.c q += MUL32(a2, k2); q 307 crypto/vmac.c q += MUL32(a3, k1); q 308 crypto/vmac.c q += q; q 309 crypto/vmac.c p += q; q 315 crypto/vmac.c q = MUL32(a2, k3); q 316 crypto/vmac.c q += MUL32(a3, k2); q 317 crypto/vmac.c q += q; q 318 crypto/vmac.c p += q; q 1132 drivers/acpi/ec.c struct acpi_ec_query *q; q 1135 drivers/acpi/ec.c q = kzalloc(sizeof (struct acpi_ec_query), GFP_KERNEL); q 1136 drivers/acpi/ec.c if (!q) q 1138 drivers/acpi/ec.c INIT_WORK(&q->work, acpi_ec_event_processor); q 1139 drivers/acpi/ec.c t = &q->transaction; q 1143 drivers/acpi/ec.c return q; q 1146 drivers/acpi/ec.c static void acpi_ec_delete_query(struct acpi_ec_query *q) q 1148 drivers/acpi/ec.c if (q) { q 1149 drivers/acpi/ec.c if (q->handler) q 1150 drivers/acpi/ec.c acpi_ec_put_query_handler(q->handler); q 1151 drivers/acpi/ec.c kfree(q); q 1157 drivers/acpi/ec.c struct acpi_ec_query *q = container_of(work, struct acpi_ec_query, work); q 1158 drivers/acpi/ec.c struct acpi_ec_query_handler *handler = q->handler; q 1166 drivers/acpi/ec.c acpi_ec_delete_query(q); q 1173 drivers/acpi/ec.c struct acpi_ec_query *q; q 1175 drivers/acpi/ec.c q = acpi_ec_create_query(&value); q 1176 drivers/acpi/ec.c if (!q) q 1184 drivers/acpi/ec.c result = acpi_ec_transaction(ec, &q->transaction); q 1190 drivers/acpi/ec.c q->handler = acpi_ec_get_query_handler_by_value(ec, value); q 1191 drivers/acpi/ec.c if (!q->handler) { q 1206 drivers/acpi/ec.c if (!queue_work(ec_query_wq, &q->work)) { q 1213 drivers/acpi/ec.c acpi_ec_delete_query(q); q 3225 drivers/ata/libata-core.c static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) q 3227 drivers/ata/libata-core.c q->setup = EZ(t->setup, T); q 3228 drivers/ata/libata-core.c q->act8b = EZ(t->act8b, T); q 3229 drivers/ata/libata-core.c q->rec8b = EZ(t->rec8b, T); q 3230 drivers/ata/libata-core.c q->cyc8b = EZ(t->cyc8b, T); q 3231 drivers/ata/libata-core.c q->active = EZ(t->active, T); q 3232 drivers/ata/libata-core.c q->recover = EZ(t->recover, T); q 3233 drivers/ata/libata-core.c q->dmack_hold = EZ(t->dmack_hold, T); q 3234 drivers/ata/libata-core.c q->cycle = EZ(t->cycle, T); q 3235 drivers/ata/libata-core.c q->udma = EZ(t->udma, UT); q 1252 drivers/ata/libata-scsi.c struct request_queue *q = sdev->request_queue; q 1258 drivers/ata/libata-scsi.c blk_queue_max_hw_sectors(q, dev->max_sectors); q 1266 drivers/ata/libata-scsi.c blk_queue_update_dma_pad(q, ATA_DMA_PAD_SZ - 1); q 1269 drivers/ata/libata-scsi.c buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL); q 1275 drivers/ata/libata-scsi.c blk_queue_dma_drain(q, atapi_drain_needed, buf, ATAPI_MAX_DRAIN); q 1293 drivers/ata/libata-scsi.c blk_queue_update_dma_alignment(q, sdev->sector_size - 1); q 1356 drivers/ata/libata-scsi.c struct request_queue *q = sdev->request_queue; q 1373 drivers/ata/libata-scsi.c kfree(q->dma_drain_buffer); q 1374 drivers/ata/libata-scsi.c q->dma_drain_buffer = NULL; q 1375 drivers/ata/libata-scsi.c q->dma_drain_size = 0; q 577 drivers/atm/firestream.c static inline struct FS_QENTRY *get_qentry (struct fs_dev *dev, struct queue *q) q 579 drivers/atm/firestream.c return bus_to_virt (read_fs (dev, Q_WP(q->offset)) & Q_ADDR_MASK); q 583 drivers/atm/firestream.c static void submit_qentry (struct fs_dev *dev, struct queue *q, struct FS_QENTRY *qe) q 591 drivers/atm/firestream.c while ((wp = read_fs (dev, Q_WP (q->offset))) & Q_FULL) { q 593 drivers/atm/firestream.c q->offset); q 603 drivers/atm/firestream.c write_fs (dev, Q_WP(q->offset), Q_INCWRAP); q 610 drivers/atm/firestream.c rp = read_fs (dev, Q_RP(q->offset)); q 611 drivers/atm/firestream.c wp = read_fs (dev, Q_WP(q->offset)); q 613 drivers/atm/firestream.c q->offset, rp, wp, wp-rp); q 627 drivers/atm/firestream.c static void submit_queue (struct fs_dev *dev, struct queue *q, q 632 drivers/atm/firestream.c qe = get_qentry (dev, q); q 637 drivers/atm/firestream.c submit_qentry (dev, q, qe); q 654 drivers/atm/firestream.c static void submit_command (struct fs_dev *dev, struct queue *q, q 666 drivers/atm/firestream.c static void process_return_queue (struct fs_dev *dev, struct queue *q) q 672 drivers/atm/firestream.c while (!((rq = read_fs (dev, Q_RP(q->offset))) & Q_EMPTY)) { q 687 drivers/atm/firestream.c write_fs (dev, Q_RP(q->offset), Q_INCWRAP); q 692 drivers/atm/firestream.c static void process_txdone_queue (struct fs_dev *dev, struct queue *q) q 700 drivers/atm/firestream.c while (!((rq = read_fs (dev, Q_RP(q->offset))) & Q_EMPTY)) { q 757 drivers/atm/firestream.c write_fs (dev, Q_RP(q->offset), Q_INCWRAP); q 762 drivers/atm/firestream.c static void process_incoming (struct fs_dev *dev, struct queue *q) q 771 drivers/atm/firestream.c while (!((rq = read_fs (dev, Q_RP(q->offset))) & Q_EMPTY)) { q 840 drivers/atm/firestream.c write_fs (dev, Q_RP(q->offset), Q_INCWRAP); q 347 drivers/block/aoe/aoeblk.c struct request_queue *q; q 394 drivers/block/aoe/aoeblk.c q = blk_mq_init_queue(set); q 395 drivers/block/aoe/aoeblk.c if (IS_ERR(q)) { q 408 drivers/block/aoe/aoeblk.c blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS); q 409 drivers/block/aoe/aoeblk.c q->backing_dev_info->name = "aoe"; q 410 drivers/block/aoe/aoeblk.c q->backing_dev_info->ra_pages = READ_AHEAD / PAGE_SIZE; q 412 drivers/block/aoe/aoeblk.c d->blkq = gd->queue = q; q 413 drivers/block/aoe/aoeblk.c q->queuedata = d; q 416 drivers/block/aoe/aoeblk.c blk_queue_max_hw_sectors(q, aoe_maxsectors); q 838 drivers/block/aoe/aoecmd.c struct request_queue *q; q 843 drivers/block/aoe/aoecmd.c q = d->blkq; q 844 drivers/block/aoe/aoecmd.c if (q == NULL) q 1043 drivers/block/aoe/aoecmd.c struct request_queue *q; q 1046 drivers/block/aoe/aoecmd.c q = d->blkq; q 1060 drivers/block/aoe/aoecmd.c blk_mq_run_hw_queues(q, true); q 75 drivers/block/aoe/aoenet.c register char *p, *q; q 82 drivers/block/aoe/aoenet.c for (; *p; p = q + strspn(q, WHITESPACE)) { q 83 drivers/block/aoe/aoenet.c q = p + strcspn(p, WHITESPACE); q 84 drivers/block/aoe/aoenet.c if (q != p) q 85 drivers/block/aoe/aoenet.c len = q - p; q 91 drivers/block/aoe/aoenet.c if (q == p) q 726 drivers/block/ataflop.c struct request_queue *q = unit[drive].disk->queue; q 732 drivers/block/ataflop.c blk_mq_freeze_queue(q); q 733 drivers/block/ataflop.c blk_mq_quiesce_queue(q); q 793 drivers/block/ataflop.c blk_mq_unquiesce_queue(q); q 794 drivers/block/ataflop.c blk_mq_unfreeze_queue(q); q 285 drivers/block/brd.c static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio) q 557 drivers/block/drbd/drbd_int.h struct list_head q; q 1454 drivers/block/drbd/drbd_int.h extern blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio); q 1921 drivers/block/drbd/drbd_int.h drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w) q 1924 drivers/block/drbd/drbd_int.h spin_lock_irqsave(&q->q_lock, flags); q 1925 drivers/block/drbd/drbd_int.h list_add_tail(&w->list, &q->q); q 1926 drivers/block/drbd/drbd_int.h spin_unlock_irqrestore(&q->q_lock, flags); q 1927 drivers/block/drbd/drbd_int.h wake_up(&q->q_wait); q 1931 drivers/block/drbd/drbd_int.h drbd_queue_work_if_unqueued(struct drbd_work_queue *q, struct drbd_work *w) q 1934 drivers/block/drbd/drbd_int.h spin_lock_irqsave(&q->q_lock, flags); q 1936 drivers/block/drbd/drbd_int.h list_add_tail(&w->list, &q->q); q 1937 drivers/block/drbd/drbd_int.h spin_unlock_irqrestore(&q->q_lock, flags); q 1938 drivers/block/drbd/drbd_int.h wake_up(&q->q_wait); q 1947 drivers/block/drbd/drbd_int.h struct drbd_work_queue *q = &connection->sender_work; q 1949 drivers/block/drbd/drbd_int.h wake_up(&q->q_wait); q 918 drivers/block/drbd/drbd_main.c struct request_queue *q) q 920 drivers/block/drbd/drbd_main.c if (q) { q 921 drivers/block/drbd/drbd_main.c p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q)); q 922 drivers/block/drbd/drbd_main.c p->qlim->logical_block_size = cpu_to_be32(queue_logical_block_size(q)); q 923 drivers/block/drbd/drbd_main.c p->qlim->alignment_offset = cpu_to_be32(queue_alignment_offset(q)); q 924 drivers/block/drbd/drbd_main.c p->qlim->io_min = cpu_to_be32(queue_io_min(q)); q 925 drivers/block/drbd/drbd_main.c p->qlim->io_opt = cpu_to_be32(queue_io_opt(q)); q 926 drivers/block/drbd/drbd_main.c p->qlim->discard_enabled = blk_queue_discard(q); q 927 drivers/block/drbd/drbd_main.c p->qlim->write_same_capable = !!q->limits.max_write_same_sectors; q 929 drivers/block/drbd/drbd_main.c q = device->rq_queue; q 930 drivers/block/drbd/drbd_main.c p->qlim->physical_block_size = cpu_to_be32(queue_physical_block_size(q)); q 931 drivers/block/drbd/drbd_main.c p->qlim->logical_block_size = cpu_to_be32(queue_logical_block_size(q)); q 933 drivers/block/drbd/drbd_main.c p->qlim->io_min = cpu_to_be32(queue_io_min(q)); q 934 drivers/block/drbd/drbd_main.c p->qlim->io_opt = cpu_to_be32(queue_io_opt(q)); q 961 drivers/block/drbd/drbd_main.c struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev); q 967 drivers/block/drbd/drbd_main.c max_bio_size = queue_max_hw_sectors(q) << 9; q 969 drivers/block/drbd/drbd_main.c assign_p_sizes_qlim(device, p, q); q 2089 drivers/block/drbd/drbd_main.c D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q)); q 2427 drivers/block/drbd/drbd_main.c struct request_queue *q; q 2455 drivers/block/drbd/drbd_main.c q = bdev_get_queue(device->ldev->backing_bdev); q 2456 drivers/block/drbd/drbd_main.c r = bdi_congested(q->backing_dev_info, bdi_bits); q 2476 drivers/block/drbd/drbd_main.c INIT_LIST_HEAD(&wq->q); q 2782 drivers/block/drbd/drbd_main.c struct request_queue *q; q 2804 drivers/block/drbd/drbd_main.c q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE); q 2805 drivers/block/drbd/drbd_main.c if (!q) q 2807 drivers/block/drbd/drbd_main.c device->rq_queue = q; q 2808 drivers/block/drbd/drbd_main.c q->queuedata = device; q 2817 drivers/block/drbd/drbd_main.c disk->queue = q; q 2828 drivers/block/drbd/drbd_main.c q->backing_dev_info->congested_fn = drbd_congested; q 2829 drivers/block/drbd/drbd_main.c q->backing_dev_info->congested_data = device; q 2831 drivers/block/drbd/drbd_main.c blk_queue_make_request(q, drbd_make_request); q 2832 drivers/block/drbd/drbd_main.c blk_queue_write_cache(q, true, true); q 2835 drivers/block/drbd/drbd_main.c blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8); q 2926 drivers/block/drbd/drbd_main.c blk_cleanup_queue(q); q 1190 drivers/block/drbd/drbd_nl.c static void blk_queue_discard_granularity(struct request_queue *q, unsigned int granularity) q 1192 drivers/block/drbd/drbd_nl.c q->limits.discard_granularity = granularity; q 1206 drivers/block/drbd/drbd_nl.c struct request_queue *q, q 1228 drivers/block/drbd/drbd_nl.c blk_queue_discard_granularity(q, 512); q 1229 drivers/block/drbd/drbd_nl.c q->limits.max_discard_sectors = drbd_max_discard_sectors(connection); q 1230 drivers/block/drbd/drbd_nl.c blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); q 1231 drivers/block/drbd/drbd_nl.c q->limits.max_write_zeroes_sectors = drbd_max_discard_sectors(connection); q 1233 drivers/block/drbd/drbd_nl.c blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); q 1234 drivers/block/drbd/drbd_nl.c blk_queue_discard_granularity(q, 0); q 1235 drivers/block/drbd/drbd_nl.c q->limits.max_discard_sectors = 0; q 1236 drivers/block/drbd/drbd_nl.c q->limits.max_write_zeroes_sectors = 0; q 1240 drivers/block/drbd/drbd_nl.c static void fixup_discard_if_not_supported(struct request_queue *q) q 1246 drivers/block/drbd/drbd_nl.c if (!blk_queue_discard(q)) { q 1247 drivers/block/drbd/drbd_nl.c blk_queue_max_discard_sectors(q, 0); q 1248 drivers/block/drbd/drbd_nl.c blk_queue_discard_granularity(q, 0); q 1252 drivers/block/drbd/drbd_nl.c static void fixup_write_zeroes(struct drbd_device *device, struct request_queue *q) q 1262 drivers/block/drbd/drbd_nl.c q->limits.max_write_zeroes_sectors = DRBD_MAX_BBIO_SECTORS; q 1264 drivers/block/drbd/drbd_nl.c q->limits.max_write_zeroes_sectors = 0; q 1268 drivers/block/drbd/drbd_nl.c struct request_queue *q, q 1290 drivers/block/drbd/drbd_nl.c unsigned int me_lbs = queue_logical_block_size(q); q 1312 drivers/block/drbd/drbd_nl.c blk_queue_logical_block_size(q, peer_lbs); q 1329 drivers/block/drbd/drbd_nl.c blk_queue_max_write_same_sectors(q, can_do ? DRBD_MAX_BBIO_SECTORS : 0); q 1335 drivers/block/drbd/drbd_nl.c struct request_queue * const q = device->rq_queue; q 1354 drivers/block/drbd/drbd_nl.c blk_set_stacking_limits(&q->limits); q 1357 drivers/block/drbd/drbd_nl.c blk_queue_max_hw_sectors(q, max_hw_sectors); q 1359 drivers/block/drbd/drbd_nl.c blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); q 1360 drivers/block/drbd/drbd_nl.c blk_queue_segment_boundary(q, PAGE_SIZE-1); q 1361 drivers/block/drbd/drbd_nl.c decide_on_discard_support(device, q, b, discard_zeroes_if_aligned); q 1362 drivers/block/drbd/drbd_nl.c decide_on_write_same_support(device, q, b, o, disable_write_same); q 1365 drivers/block/drbd/drbd_nl.c blk_queue_stack_limits(q, b); q 1367 drivers/block/drbd/drbd_nl.c if (q->backing_dev_info->ra_pages != q 1370 drivers/block/drbd/drbd_nl.c q->backing_dev_info->ra_pages, q 1372 drivers/block/drbd/drbd_nl.c q->backing_dev_info->ra_pages = q 1376 drivers/block/drbd/drbd_nl.c fixup_discard_if_not_supported(q); q 1377 drivers/block/drbd/drbd_nl.c fixup_write_zeroes(device, q); q 1515 drivers/block/drbd/drbd_nl.c struct request_queue * const q = nbc->backing_bdev->bd_disk->queue; q 1522 drivers/block/drbd/drbd_nl.c if (!blk_queue_discard(q)) { q 1533 drivers/block/drbd/drbd_nl.c if (q->limits.discard_granularity > disk_conf->rs_discard_granularity) q 1534 drivers/block/drbd/drbd_nl.c disk_conf->rs_discard_granularity = q->limits.discard_granularity; q 1536 drivers/block/drbd/drbd_nl.c remainder = disk_conf->rs_discard_granularity % q->limits.discard_granularity; q 1539 drivers/block/drbd/drbd_nl.c if (disk_conf->rs_discard_granularity > q->limits.max_discard_sectors << 9) q 1540 drivers/block/drbd/drbd_nl.c disk_conf->rs_discard_granularity = q->limits.max_discard_sectors << 9; q 3373 drivers/block/drbd/drbd_nl.c struct request_queue *q; q 3387 drivers/block/drbd/drbd_nl.c q = bdev_get_queue(device->ldev->backing_bdev); q 3389 drivers/block/drbd/drbd_nl.c bdi_congested(q->backing_dev_info, q 1514 drivers/block/drbd/drbd_receiver.c struct request_queue *q = bdev_get_queue(bdev); q 1524 drivers/block/drbd/drbd_receiver.c granularity = max(q->limits.discard_granularity >> 9, 1U); q 1527 drivers/block/drbd/drbd_receiver.c max_discard_sectors = min(q->limits.max_discard_sectors, (1U << 22)); q 1578 drivers/block/drbd/drbd_receiver.c struct request_queue *q = bdev_get_queue(device->ldev->backing_bdev); q 1582 drivers/block/drbd/drbd_receiver.c if (!blk_queue_discard(q)) q 27 drivers/block/drbd/drbd_req.c struct request_queue *q = device->rq_queue; q 29 drivers/block/drbd/drbd_req.c generic_start_io_acct(q, bio_op(req->master_bio), q 36 drivers/block/drbd/drbd_req.c struct request_queue *q = device->rq_queue; q 38 drivers/block/drbd/drbd_req.c generic_end_io_acct(q, bio_op(req->master_bio), q 1615 drivers/block/drbd/drbd_req.c blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio) q 1617 drivers/block/drbd/drbd_req.c struct drbd_device *device = (struct drbd_device *) q->queuedata; q 1620 drivers/block/drbd/drbd_req.c blk_queue_split(q, &bio); q 2072 drivers/block/drbd/drbd_worker.c list_splice_tail_init(&queue->q, work_list); q 2109 drivers/block/drbd/drbd_worker.c if (!list_empty(&connection->sender_work.q)) q 2110 drivers/block/drbd/drbd_worker.c list_splice_tail_init(&connection->sender_work.q, work_list); q 430 drivers/block/loop.c struct request_queue *q = lo->lo_queue; q 435 drivers/block/loop.c if (!blk_queue_discard(q)) { q 865 drivers/block/loop.c struct request_queue *q = lo->lo_queue; q 878 drivers/block/loop.c blk_queue_max_discard_sectors(q, q 881 drivers/block/loop.c blk_queue_max_write_zeroes_sectors(q, q 891 drivers/block/loop.c q->limits.discard_granularity = 0; q 892 drivers/block/loop.c q->limits.discard_alignment = 0; q 893 drivers/block/loop.c blk_queue_max_discard_sectors(q, 0); q 894 drivers/block/loop.c blk_queue_max_write_zeroes_sectors(q, 0); q 897 drivers/block/loop.c q->limits.discard_granularity = inode->i_sb->s_blocksize; q 898 drivers/block/loop.c q->limits.discard_alignment = 0; q 900 drivers/block/loop.c blk_queue_max_discard_sectors(q, UINT_MAX >> 9); q 901 drivers/block/loop.c blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9); q 904 drivers/block/loop.c if (q->limits.max_write_zeroes_sectors) q 905 drivers/block/loop.c blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); q 907 drivers/block/loop.c blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); q 938 drivers/block/loop.c struct request_queue *q = lo->lo_queue; q 946 drivers/block/loop.c blk_queue_flag_set(QUEUE_FLAG_NONROT, q); q 948 drivers/block/loop.c blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); q 1926 drivers/block/loop.c struct loop_device *lo = rq->q->queuedata; q 1961 drivers/block/loop.c struct loop_device *lo = rq->q->queuedata; q 1017 drivers/block/mtip32xx/mtip32xx.c blk_execute_rq(rq->q, NULL, rq, true); q 2587 drivers/block/mtip32xx/mtip32xx.c struct driver_data *dd = rq->q->queuedata; q 3536 drivers/block/mtip32xx/mtip32xx.c struct driver_data *dd = req->q->queuedata; q 223 drivers/block/nbd.c struct request_queue *q; q 226 drivers/block/nbd.c q = disk->queue; q 228 drivers/block/nbd.c blk_cleanup_queue(q); q 1662 drivers/block/nbd.c struct request_queue *q; q 1702 drivers/block/nbd.c q = blk_mq_init_queue(&nbd->tag_set); q 1703 drivers/block/nbd.c if (IS_ERR(q)) { q 1704 drivers/block/nbd.c err = PTR_ERR(q); q 1707 drivers/block/nbd.c disk->queue = q; q 76 drivers/block/null_blk.h struct request_queue *q; q 1123 drivers/block/null_blk_main.c struct request_queue *q = nullb->q; q 1126 drivers/block/null_blk_main.c blk_mq_stop_hw_queues(q); q 1131 drivers/block/null_blk_main.c struct request_queue *q = nullb->q; q 1134 drivers/block/null_blk_main.c blk_mq_start_stopped_hw_queues(q, true); q 1284 drivers/block/null_blk_main.c static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio) q 1288 drivers/block/null_blk_main.c struct nullb *nullb = q->queuedata; q 1406 drivers/block/null_blk_main.c blk_cleanup_queue(nullb->q); q 1422 drivers/block/null_blk_main.c nullb->q->limits.discard_granularity = nullb->dev->blocksize; q 1423 drivers/block/null_blk_main.c nullb->q->limits.discard_alignment = nullb->dev->blocksize; q 1424 drivers/block/null_blk_main.c blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9); q 1425 drivers/block/null_blk_main.c blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q); q 1456 drivers/block/null_blk_main.c struct request_queue *q = nullb->q; q 1461 drivers/block/null_blk_main.c queue_for_each_hw_ctx(q, hctx, i) { q 1544 drivers/block/null_blk_main.c disk->queue = nullb->q; q 1669 drivers/block/null_blk_main.c nullb->q = blk_mq_init_queue(nullb->tag_set); q 1670 drivers/block/null_blk_main.c if (IS_ERR(nullb->q)) { q 1676 drivers/block/null_blk_main.c nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node); q 1677 drivers/block/null_blk_main.c if (!nullb->q) { q 1681 drivers/block/null_blk_main.c blk_queue_make_request(nullb->q, null_queue_bio); q 1694 drivers/block/null_blk_main.c blk_queue_write_cache(nullb->q, true, true); q 1702 drivers/block/null_blk_main.c blk_queue_chunk_sectors(nullb->q, dev->zone_size_sects); q 1703 drivers/block/null_blk_main.c nullb->q->limits.zoned = BLK_ZONED_HM; q 1704 drivers/block/null_blk_main.c blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, nullb->q); q 1705 drivers/block/null_blk_main.c blk_queue_required_elevator_features(nullb->q, q 1709 drivers/block/null_blk_main.c nullb->q->queuedata = nullb; q 1710 drivers/block/null_blk_main.c blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q); q 1711 drivers/block/null_blk_main.c blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q); q 1718 drivers/block/null_blk_main.c blk_queue_logical_block_size(nullb->q, dev->blocksize); q 1719 drivers/block/null_blk_main.c blk_queue_physical_block_size(nullb->q, dev->blocksize); q 1738 drivers/block/null_blk_main.c blk_cleanup_queue(nullb->q); q 400 drivers/block/paride/pd.c struct request_queue *q; q 405 drivers/block/paride/pd.c q = disk ? disk->queue : NULL; q 408 drivers/block/paride/pd.c if (q) { q 409 drivers/block/paride/pd.c struct pd_unit *disk = q->queuedata; q 702 drivers/block/pktcdvd.c struct request_queue *q = bdev_get_queue(pd->bdev); q 706 drivers/block/pktcdvd.c rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? q 712 drivers/block/pktcdvd.c ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, q 725 drivers/block/pktcdvd.c blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0); q 924 drivers/block/pktcdvd.c static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) q 927 drivers/block/pktcdvd.c <= queue_max_segments(q)) { q 934 drivers/block/pktcdvd.c <= queue_max_segments(q)) { q 2175 drivers/block/pktcdvd.c struct request_queue *q; q 2197 drivers/block/pktcdvd.c q = bdev_get_queue(pd->bdev); q 2206 drivers/block/pktcdvd.c blk_queue_max_hw_sectors(q, pd->settings.size); q 2213 drivers/block/pktcdvd.c ret = pkt_set_segment_merging(pd, q); q 2347 drivers/block/pktcdvd.c static void pkt_make_request_write(struct request_queue *q, struct bio *bio) q 2349 drivers/block/pktcdvd.c struct pktcdvd_device *pd = q->queuedata; q 2395 drivers/block/pktcdvd.c set_bdi_congested(q->backing_dev_info, BLK_RW_ASYNC); q 2431 drivers/block/pktcdvd.c static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio) q 2437 drivers/block/pktcdvd.c blk_queue_split(q, &bio); q 2439 drivers/block/pktcdvd.c pd = q->queuedata; q 2483 drivers/block/pktcdvd.c pkt_make_request_write(q, split); q 2494 drivers/block/pktcdvd.c struct request_queue *q = pd->disk->queue; q 2496 drivers/block/pktcdvd.c blk_queue_make_request(q, pkt_make_request); q 2497 drivers/block/pktcdvd.c blk_queue_logical_block_size(q, CD_FRAMESIZE); q 2498 drivers/block/pktcdvd.c blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS); q 2499 drivers/block/pktcdvd.c q->queuedata = pd; q 198 drivers/block/ps3disk.c struct request_queue *q = hctx->queue; q 199 drivers/block/ps3disk.c struct ps3_storage_device *dev = q->queuedata; q 588 drivers/block/ps3vram.c static blk_qc_t ps3vram_make_request(struct request_queue *q, struct bio *bio) q 590 drivers/block/ps3vram.c struct ps3_system_bus_device *dev = q->queuedata; q 596 drivers/block/ps3vram.c blk_queue_split(q, &bio); q 4797 drivers/block/rbd.c struct rbd_device *rbd_dev = rq->q->queuedata; q 5123 drivers/block/rbd.c struct request_queue *q; q 5156 drivers/block/rbd.c q = blk_mq_init_queue(&rbd_dev->tag_set); q 5157 drivers/block/rbd.c if (IS_ERR(q)) { q 5158 drivers/block/rbd.c err = PTR_ERR(q); q 5162 drivers/block/rbd.c blk_queue_flag_set(QUEUE_FLAG_NONROT, q); q 5165 drivers/block/rbd.c blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT); q 5166 drivers/block/rbd.c q->limits.max_sectors = queue_max_hw_sectors(q); q 5167 drivers/block/rbd.c blk_queue_max_segments(q, USHRT_MAX); q 5168 drivers/block/rbd.c blk_queue_max_segment_size(q, UINT_MAX); q 5169 drivers/block/rbd.c blk_queue_io_min(q, rbd_dev->opts->alloc_size); q 5170 drivers/block/rbd.c blk_queue_io_opt(q, rbd_dev->opts->alloc_size); q 5173 drivers/block/rbd.c blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); q 5174 drivers/block/rbd.c q->limits.discard_granularity = rbd_dev->opts->alloc_size; q 5175 drivers/block/rbd.c blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT); q 5176 drivers/block/rbd.c blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT); q 5180 drivers/block/rbd.c q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; q 5186 drivers/block/rbd.c WARN_ON(!blk_get_queue(q)); q 5187 drivers/block/rbd.c disk->queue = q; q 5188 drivers/block/rbd.c q->queuedata = rbd_dev; q 134 drivers/block/rsxx/dev.c static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio) q 136 drivers/block/rsxx/dev.c struct rsxx_cardinfo *card = q->queuedata; q 140 drivers/block/rsxx/dev.c blk_queue_split(q, &bio); q 243 drivers/block/rsxx/dma.c struct list_head *q, unsigned int done) q 249 drivers/block/rsxx/dma.c list_for_each_entry_safe(dma, tmp, q, list) { q 601 drivers/block/rsxx/dma.c struct list_head *q, q 624 drivers/block/rsxx/dma.c list_add_tail(&dma->list, q); q 630 drivers/block/rsxx/dma.c struct list_head *q, q 661 drivers/block/rsxx/dma.c list_add_tail(&dma->list, q); q 374 drivers/block/rsxx/rsxx_priv.h struct list_head *q, q 443 drivers/block/skd_main.c static bool skd_fail_all(struct request_queue *q) q 445 drivers/block/skd_main.c struct skd_device *skdev = q->queuedata; q 482 drivers/block/skd_main.c struct request_queue *const q = req->q; q 483 drivers/block/skd_main.c struct skd_device *skdev = q->queuedata; q 495 drivers/block/skd_main.c return skd_fail_all(q) ? BLK_STS_IOERR : BLK_STS_RESOURCE; q 505 drivers/block/skd_main.c tag, skd_max_queue_depth, q->nr_requests); q 604 drivers/block/skd_main.c struct skd_device *skdev = req->q->queuedata; q 2818 drivers/block/skd_main.c struct request_queue *q; q 2848 drivers/block/skd_main.c q = blk_mq_init_queue(&skdev->tag_set); q 2849 drivers/block/skd_main.c if (IS_ERR(q)) { q 2851 drivers/block/skd_main.c rc = PTR_ERR(q); q 2854 drivers/block/skd_main.c q->queuedata = skdev; q 2856 drivers/block/skd_main.c skdev->queue = q; q 2857 drivers/block/skd_main.c disk->queue = q; q 2859 drivers/block/skd_main.c blk_queue_write_cache(q, true, true); q 2860 drivers/block/skd_main.c blk_queue_max_segments(q, skdev->sgs_per_request); q 2861 drivers/block/skd_main.c blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS); q 2864 drivers/block/skd_main.c blk_queue_io_opt(q, 8192); q 2866 drivers/block/skd_main.c blk_queue_flag_set(QUEUE_FLAG_NONROT, q); q 2867 drivers/block/skd_main.c blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); q 2869 drivers/block/skd_main.c blk_queue_rq_timeout(q, 8 * HZ); q 485 drivers/block/sunvdc.c nsg = blk_rq_map_sg(req->q, req, sg); q 782 drivers/block/sunvdc.c static void cleanup_queue(struct request_queue *q) q 784 drivers/block/sunvdc.c struct vdc_port *port = q->queuedata; q 786 drivers/block/sunvdc.c blk_cleanup_queue(q); q 792 drivers/block/sunvdc.c struct request_queue *q; q 794 drivers/block/sunvdc.c q = blk_mq_init_sq_queue(&port->tag_set, &vdc_mq_ops, VDC_TX_RING_SIZE, q 796 drivers/block/sunvdc.c if (IS_ERR(q)) q 797 drivers/block/sunvdc.c return q; q 799 drivers/block/sunvdc.c q->queuedata = port; q 800 drivers/block/sunvdc.c return q; q 805 drivers/block/sunvdc.c struct request_queue *q; q 840 drivers/block/sunvdc.c q = init_queue(port); q 841 drivers/block/sunvdc.c if (IS_ERR(q)) { q 844 drivers/block/sunvdc.c return PTR_ERR(q); q 850 drivers/block/sunvdc.c cleanup_queue(q); q 857 drivers/block/sunvdc.c blk_queue_segment_boundary(q, PAGE_SIZE - 1); q 858 drivers/block/sunvdc.c blk_queue_max_segment_size(q, PAGE_SIZE); q 860 drivers/block/sunvdc.c blk_queue_max_segments(q, port->ring_cookies); q 861 drivers/block/sunvdc.c blk_queue_max_hw_sectors(q, port->max_xfer_size); q 867 drivers/block/sunvdc.c g->queue = q; q 894 drivers/block/sunvdc.c blk_queue_physical_block_size(q, port->vdisk_phys_blksz); q 1126 drivers/block/sunvdc.c struct request_queue *q = port->disk->queue; q 1135 drivers/block/sunvdc.c blk_mq_freeze_queue(q); q 1136 drivers/block/sunvdc.c blk_mq_quiesce_queue(q); q 1140 drivers/block/sunvdc.c blk_mq_unquiesce_queue(q); q 1141 drivers/block/sunvdc.c blk_mq_unfreeze_queue(q); q 833 drivers/block/swim.c struct request_queue *q; q 841 drivers/block/swim.c q = blk_mq_init_sq_queue(&swd->unit[drive].tag_set, &swim_mq_ops, q 843 drivers/block/swim.c if (IS_ERR(q)) { q 844 drivers/block/swim.c err = PTR_ERR(q); q 848 drivers/block/swim.c swd->unit[drive].disk->queue = q; q 825 drivers/block/swim3.c struct request_queue *q = disks[fs->index]->queue; q 834 drivers/block/swim3.c blk_mq_freeze_queue(q); q 835 drivers/block/swim3.c blk_mq_quiesce_queue(q); q 836 drivers/block/swim3.c blk_mq_unquiesce_queue(q); q 837 drivers/block/swim3.c blk_mq_unfreeze_queue(q); q 663 drivers/block/sx8.c static inline void carm_push_q (struct carm_host *host, struct request_queue *q) q 667 drivers/block/sx8.c blk_mq_stop_hw_queues(q); q 668 drivers/block/sx8.c VPRINTK("STOPPED QUEUE %p\n", q); q 670 drivers/block/sx8.c host->wait_q[idx] = q; q 690 drivers/block/sx8.c struct request_queue *q = carm_pop_q(host); q 691 drivers/block/sx8.c if (q) { q 692 drivers/block/sx8.c blk_mq_start_hw_queues(q); q 693 drivers/block/sx8.c VPRINTK("STARTED QUEUE %p\n", q); q 705 drivers/block/sx8.c struct request_queue *q = hctx->queue; q 707 drivers/block/sx8.c struct carm_port *port = q->queuedata; q 727 drivers/block/sx8.c n_elem = blk_rq_map_sg(q, rq, sg); q 793 drivers/block/sx8.c carm_push_q(host, q); q 1346 drivers/block/sx8.c struct request_queue *q; q 1363 drivers/block/sx8.c q = blk_mq_init_queue(&host->tag_set); q 1364 drivers/block/sx8.c if (IS_ERR(q)) q 1365 drivers/block/sx8.c return PTR_ERR(q); q 1367 drivers/block/sx8.c blk_queue_max_segments(q, CARM_MAX_REQ_SG); q 1368 drivers/block/sx8.c blk_queue_segment_boundary(q, CARM_SG_BOUNDARY); q 1370 drivers/block/sx8.c q->queuedata = port; q 1371 drivers/block/sx8.c disk->queue = q; q 1410 drivers/block/sx8.c struct request_queue *q; q 1472 drivers/block/sx8.c q = blk_mq_init_queue(&host->tag_set); q 1473 drivers/block/sx8.c if (IS_ERR(q)) { q 1474 drivers/block/sx8.c rc = PTR_ERR(q); q 1479 drivers/block/sx8.c host->oob_q = q; q 1480 drivers/block/sx8.c q->queuedata = host; q 522 drivers/block/umem.c static blk_qc_t mm_make_request(struct request_queue *q, struct bio *bio) q 524 drivers/block/umem.c struct cardinfo *card = q->queuedata; q 529 drivers/block/umem.c blk_queue_split(q, &bio); q 135 drivers/block/virtio_blk.c struct virtio_blk *vblk = req->q->queuedata; q 388 drivers/block/virtio_blk.c struct request_queue *q = vblk->disk->queue; q 392 drivers/block/virtio_blk.c req = blk_get_request(q, REQ_OP_DRV_IN, 0); q 396 drivers/block/virtio_blk.c err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL); q 520 drivers/block/virtio_blk.c struct request_queue *q = vblk->disk->queue; q 535 drivers/block/virtio_blk.c nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9); q 537 drivers/block/virtio_blk.c string_get_size(nblocks, queue_logical_block_size(q), q 539 drivers/block/virtio_blk.c string_get_size(nblocks, queue_logical_block_size(q), q 547 drivers/block/virtio_blk.c queue_logical_block_size(q), q 799 drivers/block/virtio_blk.c struct request_queue *q; q 878 drivers/block/virtio_blk.c q = blk_mq_init_queue(&vblk->tag_set); q 879 drivers/block/virtio_blk.c if (IS_ERR(q)) { q 883 drivers/block/virtio_blk.c vblk->disk->queue = q; q 885 drivers/block/virtio_blk.c q->queuedata = vblk; q 904 drivers/block/virtio_blk.c blk_queue_max_segments(q, vblk->sg_elems-2); q 907 drivers/block/virtio_blk.c blk_queue_max_hw_sectors(q, -1U); q 918 drivers/block/virtio_blk.c blk_queue_max_segment_size(q, max_size); q 925 drivers/block/virtio_blk.c blk_queue_logical_block_size(q, blk_size); q 927 drivers/block/virtio_blk.c blk_size = queue_logical_block_size(q); q 934 drivers/block/virtio_blk.c blk_queue_physical_block_size(q, q 941 drivers/block/virtio_blk.c blk_queue_alignment_offset(q, blk_size * alignment_offset); q 947 drivers/block/virtio_blk.c blk_queue_io_min(q, blk_size * min_io_size); q 953 drivers/block/virtio_blk.c blk_queue_io_opt(q, blk_size * opt_io_size); q 956 drivers/block/virtio_blk.c q->limits.discard_granularity = blk_size; q 960 drivers/block/virtio_blk.c q->limits.discard_alignment = v ? v << SECTOR_SHIFT : 0; q 964 drivers/block/virtio_blk.c blk_queue_max_discard_sectors(q, v ? v : UINT_MAX); q 968 drivers/block/virtio_blk.c blk_queue_max_discard_segments(q, q 972 drivers/block/virtio_blk.c blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); q 978 drivers/block/virtio_blk.c blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX); q 459 drivers/block/xen-blkback/xenbus.c struct request_queue *q; q 491 drivers/block/xen-blkback/xenbus.c q = bdev_get_queue(bdev); q 492 drivers/block/xen-blkback/xenbus.c if (q && test_bit(QUEUE_FLAG_WC, &q->queue_flags)) q 495 drivers/block/xen-blkback/xenbus.c if (q && blk_queue_secure_erase(q)) q 550 drivers/block/xen-blkback/xenbus.c struct request_queue *q = bdev_get_queue(bdev); q 555 drivers/block/xen-blkback/xenbus.c if (blk_queue_discard(q)) { q 558 drivers/block/xen-blkback/xenbus.c q->limits.discard_granularity); q 565 drivers/block/xen-blkback/xenbus.c q->limits.discard_alignment); q 742 drivers/block/xen-blkfront.c num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg); q 464 drivers/block/xsysace.c static bool ace_has_next_request(struct request_queue *q) q 466 drivers/block/xsysace.c struct ace_device *ace = q->queuedata; q 472 drivers/block/xsysace.c static struct request *ace_get_next_request(struct request_queue *q) q 474 drivers/block/xsysace.c struct ace_device *ace = q->queuedata; q 1510 drivers/block/zram/zram_drv.c struct request_queue *q = zram->disk->queue; q 1513 drivers/block/zram/zram_drv.c generic_start_io_acct(q, op, bvec->bv_len >> SECTOR_SHIFT, q 1525 drivers/block/zram/zram_drv.c generic_end_io_acct(q, op, &zram->disk->part0, start_time); q 1346 drivers/bus/ti-sysc.c const struct sysc_revision_quirk *q; q 1350 drivers/bus/ti-sysc.c q = &sysc_revision_quirks[i]; q 1352 drivers/bus/ti-sysc.c if (!q->base) q 1355 drivers/bus/ti-sysc.c if (q->base != ddata->module_pa) q 1358 drivers/bus/ti-sysc.c if (q->rev_offset >= 0 && q 1359 drivers/bus/ti-sysc.c q->rev_offset != ddata->offsets[SYSC_REVISION]) q 1362 drivers/bus/ti-sysc.c if (q->sysc_offset >= 0 && q 1363 drivers/bus/ti-sysc.c q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG]) q 1366 drivers/bus/ti-sysc.c if (q->syss_offset >= 0 && q 1367 drivers/bus/ti-sysc.c q->syss_offset != ddata->offsets[SYSC_SYSSTATUS]) q 1370 drivers/bus/ti-sysc.c ddata->name = q->name; q 1371 drivers/bus/ti-sysc.c ddata->cfg.quirks |= q->quirks; q 1378 drivers/bus/ti-sysc.c const struct sysc_revision_quirk *q; q 1382 drivers/bus/ti-sysc.c q = &sysc_revision_quirks[i]; q 1384 drivers/bus/ti-sysc.c if (q->base && q->base != ddata->module_pa) q 1387 drivers/bus/ti-sysc.c if (q->rev_offset >= 0 && q 1388 drivers/bus/ti-sysc.c q->rev_offset != ddata->offsets[SYSC_REVISION]) q 1391 drivers/bus/ti-sysc.c if (q->sysc_offset >= 0 && q 1392 drivers/bus/ti-sysc.c q->sysc_offset != ddata->offsets[SYSC_SYSCONFIG]) q 1395 drivers/bus/ti-sysc.c if (q->syss_offset >= 0 && q 1396 drivers/bus/ti-sysc.c q->syss_offset != ddata->offsets[SYSC_SYSSTATUS]) q 1399 drivers/bus/ti-sysc.c if (q->revision == ddata->revision || q 1400 drivers/bus/ti-sysc.c (q->revision & q->revision_mask) == q 1401 drivers/bus/ti-sysc.c (ddata->revision & q->revision_mask)) { q 1402 drivers/bus/ti-sysc.c ddata->name = q->name; q 1403 drivers/bus/ti-sysc.c ddata->cfg.quirks |= q->quirks; q 2176 drivers/cdrom/cdrom.c struct request_queue *q = cdi->disk->queue; q 2183 drivers/cdrom/cdrom.c if (!q) q 2186 drivers/cdrom/cdrom.c if (!blk_queue_scsi_passthrough(q)) { q 2198 drivers/cdrom/cdrom.c if (nr * CD_FRAMESIZE_RAW > (queue_max_sectors(q) << 9)) q 2199 drivers/cdrom/cdrom.c nr = (queue_max_sectors(q) << 9) / CD_FRAMESIZE_RAW; q 2203 drivers/cdrom/cdrom.c rq = blk_get_request(q, REQ_OP_SCSI_IN, 0); q 2210 drivers/cdrom/cdrom.c ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL); q 2231 drivers/cdrom/cdrom.c blk_execute_rq(q, cdi->disk, rq, 0); q 2617 drivers/cdrom/cdrom.c struct cdrom_subchnl q; q 2623 drivers/cdrom/cdrom.c if (copy_from_user(&q, argp, sizeof(q))) q 2626 drivers/cdrom/cdrom.c requested = q.cdsc_format; q 2629 drivers/cdrom/cdrom.c q.cdsc_format = CDROM_MSF; q 2631 drivers/cdrom/cdrom.c ret = cdi->ops->audio_ioctl(cdi, CDROMSUBCHNL, &q); q 2635 drivers/cdrom/cdrom.c back = q.cdsc_format; /* local copy */ q 2636 drivers/cdrom/cdrom.c sanitize_format(&q.cdsc_absaddr, &back, requested); q 2637 drivers/cdrom/cdrom.c sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested); q 2639 drivers/cdrom/cdrom.c if (copy_to_user(argp, &q, sizeof(q))) q 3044 drivers/cdrom/cdrom.c struct cdrom_subchnl q; q 3046 drivers/cdrom/cdrom.c if (copy_from_user(&q, (struct cdrom_subchnl __user *)arg, sizeof(q))) q 3048 drivers/cdrom/cdrom.c requested = q.cdsc_format; q 3053 drivers/cdrom/cdrom.c ret = cdrom_read_subchannel(cdi, &q, 0); q 3056 drivers/cdrom/cdrom.c back = q.cdsc_format; /* local copy */ q 3057 drivers/cdrom/cdrom.c sanitize_format(&q.cdsc_absaddr, &back, requested); q 3058 drivers/cdrom/cdrom.c sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested); q 3059 drivers/cdrom/cdrom.c if (copy_to_user((struct cdrom_subchnl __user *)arg, &q, sizeof(q))) q 166 drivers/char/apm-emulation.c static inline int queue_empty(struct apm_queue *q) q 168 drivers/char/apm-emulation.c return q->event_head == q->event_tail; q 171 drivers/char/apm-emulation.c static inline apm_event_t queue_get_event(struct apm_queue *q) q 173 drivers/char/apm-emulation.c q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS; q 174 drivers/char/apm-emulation.c return q->events[q->event_tail]; q 177 drivers/char/apm-emulation.c static void queue_add_event(struct apm_queue *q, apm_event_t event) q 179 drivers/char/apm-emulation.c q->event_head = (q->event_head + 1) % APM_MAX_EVENTS; q 180 drivers/char/apm-emulation.c if (q->event_head == q->event_tail) { q 185 drivers/char/apm-emulation.c q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS; q 187 drivers/char/apm-emulation.c q->events[q->event_head] = event; q 681 drivers/char/ipmi/ipmi_msghandler.c static void free_recv_msg_list(struct list_head *q) q 685 drivers/char/ipmi/ipmi_msghandler.c list_for_each_entry_safe(msg, msg2, q, link) { q 691 drivers/char/ipmi/ipmi_msghandler.c static void free_smi_msg_list(struct list_head *q) q 695 drivers/char/ipmi/ipmi_msghandler.c list_for_each_entry_safe(msg, msg2, q, link) { q 223 drivers/clk/clk-cdce925.c u8 q; q 241 drivers/clk/clk-cdce925.c q = nn / m; q 242 drivers/clk/clk-cdce925.c if ((q < 16) || (q > 63)) { q 243 drivers/clk/clk-cdce925.c pr_debug("%s invalid q=%d\n", __func__, q); q 246 drivers/clk/clk-cdce925.c r = nn - (m*q); q 252 drivers/clk/clk-cdce925.c n, m, p, q, r); q 256 drivers/clk/clk-cdce925.c pll[2] = ((r & 0x1F) << 3) | ((q >> 3) & 0x07); q 257 drivers/clk/clk-cdce925.c pll[3] = ((q & 0x07) << 5) | (p << 2) | q 2860 drivers/clk/clk.c bool clk_is_match(const struct clk *p, const struct clk *q) q 2863 drivers/clk/clk.c if (p == q) q 2867 drivers/clk/clk.c if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q)) q 2868 drivers/clk/clk.c if (p->core == q->core) q 61 drivers/cpufreq/amd_freq_sensitivity.c if (actual.q < data->actual || reference.q < data->reference) { q 66 drivers/cpufreq/amd_freq_sensitivity.c d_actual = actual.q - data->actual; q 67 drivers/cpufreq/amd_freq_sensitivity.c d_reference = reference.q - data->reference; q 102 drivers/cpufreq/amd_freq_sensitivity.c data->actual = actual.q; q 103 drivers/cpufreq/amd_freq_sensitivity.c data->reference = reference.q; q 478 drivers/crypto/caam/caampkc.c pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE); q 549 drivers/crypto/caam/caampkc.c pdb->q_dma = dma_map_single(dev, key->q, q_sz, DMA_TO_DEVICE); q 796 drivers/crypto/caam/caampkc.c kzfree(key->q); q 925 drivers/crypto/caam/caampkc.c rsa_key->q = caam_read_raw_data(raw_key->q, &q_sz); q 926 drivers/crypto/caam/caampkc.c if (!rsa_key->q) q 966 drivers/crypto/caam/caampkc.c kzfree(rsa_key->q); q 74 drivers/crypto/caam/caampkc.h u8 *q; q 457 drivers/crypto/caam/pdb.h u8 *q; q 470 drivers/crypto/caam/pdb.h u8 *q; q 59 drivers/crypto/cavium/cpt/cptpf_mbox.c static int cpt_bind_vq_to_grp(struct cpt_device *cpt, u8 q, u8 grp) q 65 drivers/crypto/cavium/cpt/cptpf_mbox.c if (q >= CPT_MAX_VF_NUM) { q 77 drivers/crypto/cavium/cpt/cptpf_mbox.c pf_qx_ctl.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, q)); q 79 drivers/crypto/cavium/cpt/cptpf_mbox.c cpt_write_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, q), pf_qx_ctl.u); q 80 drivers/crypto/cavium/cpt/cptpf_mbox.c dev_dbg(dev, "VF %d TYPE %s", q, (mcode[grp].is_ae ? "AE" : "SE")); q 90 drivers/crypto/cavium/cpt/cptvf.h #define for_each_pending_queue(qinfo, q, i) \ q 91 drivers/crypto/cavium/cpt/cptvf.h for (i = 0, q = &qinfo->queue[i]; i < qinfo->nr_queues; i++, \ q 92 drivers/crypto/cavium/cpt/cptvf.h q = &qinfo->queue[i]) q 14 drivers/crypto/cavium/cpt/cptvf_reqmanager.c static struct pending_entry *get_free_pending_entry(struct pending_queue *q, q 19 drivers/crypto/cavium/cpt/cptvf_reqmanager.c ent = &q->head[q->rear]; q 25 drivers/crypto/cavium/cpt/cptvf_reqmanager.c q->rear++; q 26 drivers/crypto/cavium/cpt/cptvf_reqmanager.c if (unlikely(q->rear == qlen)) q 27 drivers/crypto/cavium/cpt/cptvf_reqmanager.c q->rear = 0; q 131 drivers/crypto/cavium/zip/zip_main.c int q = 0; q 154 drivers/crypto/cavium/zip/zip_main.c for (q = 0; q < ZIP_NUM_QUEUES; q++) { q 160 drivers/crypto/cavium/zip/zip_main.c (zip->reg_base + ZIP_QUEX_SBUF_CTL(q))); q 162 drivers/crypto/cavium/zip/zip_main.c zip_msg("QUEX_SBUF_CTL[%d]: 0x%016llx", q, q 163 drivers/crypto/cavium/zip/zip_main.c zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_CTL(q))); q 166 drivers/crypto/cavium/zip/zip_main.c for (q = 0; q < ZIP_NUM_QUEUES; q++) { q 167 drivers/crypto/cavium/zip/zip_main.c memset(&zip->iq[q], 0x0, sizeof(struct zip_iq)); q 169 drivers/crypto/cavium/zip/zip_main.c spin_lock_init(&zip->iq[q].lock); q 171 drivers/crypto/cavium/zip/zip_main.c if (zip_cmd_qbuf_alloc(zip, q)) { q 172 drivers/crypto/cavium/zip/zip_main.c while (q != 0) { q 173 drivers/crypto/cavium/zip/zip_main.c q--; q 174 drivers/crypto/cavium/zip/zip_main.c zip_cmd_qbuf_free(zip, q); q 180 drivers/crypto/cavium/zip/zip_main.c zip->iq[q].sw_tail = zip->iq[q].sw_head; q 181 drivers/crypto/cavium/zip/zip_main.c zip->iq[q].hw_tail = zip->iq[q].sw_head; q 185 drivers/crypto/cavium/zip/zip_main.c que_sbuf_addr.s.ptr = (__pa(zip->iq[q].sw_head) >> q 188 drivers/crypto/cavium/zip/zip_main.c zip_msg("QUE[%d]_PTR(PHYS): 0x%016llx", q, q 192 drivers/crypto/cavium/zip/zip_main.c (zip->reg_base + ZIP_QUEX_SBUF_ADDR(q))); q 194 drivers/crypto/cavium/zip/zip_main.c zip_msg("QUEX_SBUF_ADDR[%d]: 0x%016llx", q, q 195 drivers/crypto/cavium/zip/zip_main.c zip_reg_read(zip->reg_base + ZIP_QUEX_SBUF_ADDR(q))); q 198 drivers/crypto/cavium/zip/zip_main.c zip->iq[q].sw_head, zip->iq[q].sw_tail, q 199 drivers/crypto/cavium/zip/zip_main.c zip->iq[q].hw_tail); q 210 drivers/crypto/cavium/zip/zip_main.c for (q = 0; q < ZIP_NUM_QUEUES; q++) q 211 drivers/crypto/cavium/zip/zip_main.c que_ena.s.ena |= (0x1 << q); q 217 drivers/crypto/cavium/zip/zip_main.c for (q = 0; q < ZIP_NUM_QUEUES; q++) { q 222 drivers/crypto/cavium/zip/zip_main.c (zip->reg_base + ZIP_QUEX_MAP(q))); q 224 drivers/crypto/cavium/zip/zip_main.c zip_msg("QUE_MAP(%d) : 0x%016llx", q, q 225 drivers/crypto/cavium/zip/zip_main.c zip_reg_read(zip->reg_base + ZIP_QUEX_MAP(q))); q 229 drivers/crypto/cavium/zip/zip_main.c for (q = 0; q < ZIP_NUM_QUEUES; q++) q 230 drivers/crypto/cavium/zip/zip_main.c que_pri.s.pri |= (0x1 << q); /* Higher Priority RR */ q 315 drivers/crypto/cavium/zip/zip_main.c int q = 0; q 334 drivers/crypto/cavium/zip/zip_main.c for (q = 0; q < ZIP_NUM_QUEUES; q++) q 335 drivers/crypto/cavium/zip/zip_main.c zip_cmd_qbuf_free(zip, q); q 467 drivers/crypto/cavium/zip/zip_main.c u32 q = 0; q 481 drivers/crypto/cavium/zip/zip_main.c for (q = 0; q < ZIP_NUM_QUEUES; q++) { q 483 drivers/crypto/cavium/zip/zip_main.c ZIP_DBG_QUEX_STA(q))); q 57 drivers/crypto/cavium/zip/zip_mem.c int zip_cmd_qbuf_alloc(struct zip_device *zip, int q) q 59 drivers/crypto/cavium/zip/zip_mem.c zip->iq[q].sw_head = (u64 *)__get_free_pages((GFP_KERNEL | GFP_DMA), q 62 drivers/crypto/cavium/zip/zip_mem.c if (!zip->iq[q].sw_head) q 65 drivers/crypto/cavium/zip/zip_mem.c memset(zip->iq[q].sw_head, 0, ZIP_CMD_QBUF_SIZE); q 67 drivers/crypto/cavium/zip/zip_mem.c zip_dbg("cmd_qbuf_alloc[%d] Success : %p\n", q, zip->iq[q].sw_head); q 76 drivers/crypto/cavium/zip/zip_mem.c void zip_cmd_qbuf_free(struct zip_device *zip, int q) q 78 drivers/crypto/cavium/zip/zip_mem.c zip_dbg("Freeing cmd_qbuf 0x%lx\n", zip->iq[q].sw_tail); q 80 drivers/crypto/cavium/zip/zip_mem.c free_pages((u64)zip->iq[q].sw_tail, get_order(ZIP_CMD_QBUF_SIZE)); q 54 drivers/crypto/cavium/zip/zip_mem.h void zip_cmd_qbuf_free(struct zip_device *zip, int q); q 62 drivers/crypto/cavium/zip/zip_mem.h int zip_cmd_qbuf_alloc(struct zip_device *zip, int q); q 206 drivers/crypto/ccp/ccp-ops.c u8 *p, *q; q 214 drivers/crypto/ccp/ccp-ops.c q = p + len - 1; q 215 drivers/crypto/ccp/ccp-ops.c while (p < q) { q 216 drivers/crypto/ccp/ccp-ops.c *p = *p ^ *q; q 217 drivers/crypto/ccp/ccp-ops.c *q = *p ^ *q; q 218 drivers/crypto/ccp/ccp-ops.c *p = *p ^ *q; q 220 drivers/crypto/ccp/ccp-ops.c q--; q 231 drivers/crypto/ccp/ccp-ops.c u8 *p, *q; q 234 drivers/crypto/ccp/ccp-ops.c q = p + len - 1; q 235 drivers/crypto/ccp/ccp-ops.c while (p < q) { q 236 drivers/crypto/ccp/ccp-ops.c *p = *p ^ *q; q 237 drivers/crypto/ccp/ccp-ops.c *q = *p ^ *q; q 238 drivers/crypto/ccp/ccp-ops.c *p = *p ^ *q; q 240 drivers/crypto/ccp/ccp-ops.c q--; q 383 drivers/crypto/chelsio/chcr_ipsec.c struct sge_eth_txq *q; q 396 drivers/crypto/chelsio/chcr_ipsec.c q = &adap->sge.ethtxq[qidx + pi->first_qset]; q 399 drivers/crypto/chelsio/chcr_ipsec.c eoq = (void *)q->q.stat - pos; q 401 drivers/crypto/chelsio/chcr_ipsec.c pos = q->q.desc; q 434 drivers/crypto/chelsio/chcr_ipsec.c struct sge_eth_txq *q; q 444 drivers/crypto/chelsio/chcr_ipsec.c q = &adap->sge.ethtxq[qidx + pi->first_qset]; q 446 drivers/crypto/chelsio/chcr_ipsec.c left = (void *)q->q.stat - pos; q 448 drivers/crypto/chelsio/chcr_ipsec.c pos = q->q.desc; q 456 drivers/crypto/chelsio/chcr_ipsec.c q->vlan_ins++; q 479 drivers/crypto/chelsio/chcr_ipsec.c struct sge_eth_txq *q; q 487 drivers/crypto/chelsio/chcr_ipsec.c q = &adap->sge.ethtxq[qidx + pi->first_qset]; q 491 drivers/crypto/chelsio/chcr_ipsec.c eoq = (void *)q->q.stat - pos; q 494 drivers/crypto/chelsio/chcr_ipsec.c pos = q->q.desc; q 495 drivers/crypto/chelsio/chcr_ipsec.c left = 64 * q->q.size; q 510 drivers/crypto/chelsio/chcr_ipsec.c memcpy(q->q.desc, sa_entry->key + left, q 512 drivers/crypto/chelsio/chcr_ipsec.c pos = (u8 *)q->q.desc + (key_len - left); q 543 drivers/crypto/chelsio/chcr_ipsec.c struct sge_eth_txq *q = &adap->sge.ethtxq[qidx + pi->first_qset]; q 545 drivers/crypto/chelsio/chcr_ipsec.c int qid = q->q.cntxt_id; q 569 drivers/crypto/chelsio/chcr_ipsec.c netif_tx_stop_queue(q->txq); q 570 drivers/crypto/chelsio/chcr_ipsec.c q->q.stops++; q 571 drivers/crypto/chelsio/chcr_ipsec.c if (!q->dbqt) q 651 drivers/crypto/chelsio/chcr_ipsec.c static inline unsigned int txq_avail(const struct sge_txq *q) q 653 drivers/crypto/chelsio/chcr_ipsec.c return q->size - 1 - q->in_use; q 656 drivers/crypto/chelsio/chcr_ipsec.c static void eth_txq_stop(struct sge_eth_txq *q) q 658 drivers/crypto/chelsio/chcr_ipsec.c netif_tx_stop_queue(q->txq); q 659 drivers/crypto/chelsio/chcr_ipsec.c q->q.stops++; q 662 drivers/crypto/chelsio/chcr_ipsec.c static inline void txq_advance(struct sge_txq *q, unsigned int n) q 664 drivers/crypto/chelsio/chcr_ipsec.c q->in_use += n; q 665 drivers/crypto/chelsio/chcr_ipsec.c q->pidx += n; q 666 drivers/crypto/chelsio/chcr_ipsec.c if (q->pidx >= q->size) q 667 drivers/crypto/chelsio/chcr_ipsec.c q->pidx -= q->size; q 681 drivers/crypto/chelsio/chcr_ipsec.c struct sge_eth_txq *q; q 701 drivers/crypto/chelsio/chcr_ipsec.c q = &adap->sge.ethtxq[qidx + pi->first_qset]; q 703 drivers/crypto/chelsio/chcr_ipsec.c cxgb4_reclaim_completed_tx(adap, &q->q, true); q 707 drivers/crypto/chelsio/chcr_ipsec.c credits = txq_avail(&q->q) - ndesc; q 710 drivers/crypto/chelsio/chcr_ipsec.c eth_txq_stop(q); q 713 drivers/crypto/chelsio/chcr_ipsec.c dev->name, qidx, credits, ndesc, txq_avail(&q->q), q 720 drivers/crypto/chelsio/chcr_ipsec.c q->mapping_err++; q 724 drivers/crypto/chelsio/chcr_ipsec.c pos = (u64 *)&q->q.desc[q->q.pidx]; q 731 drivers/crypto/chelsio/chcr_ipsec.c left = (u8 *)end - (u8 *)q->q.stat; q 732 drivers/crypto/chelsio/chcr_ipsec.c end = (void *)q->q.desc + left; q 734 drivers/crypto/chelsio/chcr_ipsec.c if (pos == (u64 *)q->q.stat) { q 735 drivers/crypto/chelsio/chcr_ipsec.c left = (u8 *)end - (u8 *)q->q.stat; q 736 drivers/crypto/chelsio/chcr_ipsec.c end = (void *)q->q.desc + left; q 737 drivers/crypto/chelsio/chcr_ipsec.c pos = (void *)q->q.desc; q 742 drivers/crypto/chelsio/chcr_ipsec.c cxgb4_inline_tx_skb(skb, &q->q, sgl); q 747 drivers/crypto/chelsio/chcr_ipsec.c cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, q 751 drivers/crypto/chelsio/chcr_ipsec.c last_desc = q->q.pidx + ndesc - 1; q 752 drivers/crypto/chelsio/chcr_ipsec.c if (last_desc >= q->q.size) q 753 drivers/crypto/chelsio/chcr_ipsec.c last_desc -= q->q.size; q 754 drivers/crypto/chelsio/chcr_ipsec.c q->q.sdesc[last_desc].skb = skb; q 755 drivers/crypto/chelsio/chcr_ipsec.c q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl; q 757 drivers/crypto/chelsio/chcr_ipsec.c txq_advance(&q->q, ndesc); q 759 drivers/crypto/chelsio/chcr_ipsec.c cxgb4_ring_tx_db(adap, &q->q, ndesc); q 105 drivers/crypto/hisilicon/sec/sec_drv.c #define SEC_Q_VMID_CFG_REG(q) (0x0100 + (q) * 4) q 106 drivers/crypto/hisilicon/sec/sec_drv.c #define SEC_Q_WEIGHT_CFG_REG(q) (0x200 + (q) * 4) q 673 drivers/crypto/hisilicon/sec/sec_drv.c static irqreturn_t sec_isr_handle_th(int irq, void *q) q 675 drivers/crypto/hisilicon/sec/sec_drv.c sec_queue_irq_disable(q); q 679 drivers/crypto/hisilicon/sec/sec_drv.c static irqreturn_t sec_isr_handle(int irq, void *q) q 681 drivers/crypto/hisilicon/sec/sec_drv.c struct sec_queue *queue = q; q 58 drivers/crypto/hisilicon/zip/zip_crypto.c struct hisi_zip_req *q; q 233 drivers/crypto/hisilicon/zip/zip_crypto.c req_q->q = kcalloc(req_q->size, sizeof(struct hisi_zip_req), q 235 drivers/crypto/hisilicon/zip/zip_crypto.c if (!req_q->q) { q 249 drivers/crypto/hisilicon/zip/zip_crypto.c kfree(ctx->qp_ctx[QPC_COMP].req_q.q); q 260 drivers/crypto/hisilicon/zip/zip_crypto.c kfree(ctx->qp_ctx[i].req_q.q); q 315 drivers/crypto/hisilicon/zip/zip_crypto.c struct hisi_zip_req *req = req_q->q + sqe->tag; q 431 drivers/crypto/hisilicon/zip/zip_crypto.c struct hisi_zip_req *q = req_q->q; q 445 drivers/crypto/hisilicon/zip/zip_crypto.c req_cache = q + req_id; q 56 drivers/crypto/n2_core.c void *q; q 77 drivers/crypto/n2_core.c static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off) q 79 drivers/crypto/n2_core.c if (q->q_type == HV_NCS_QTYPE_MAU) { q 101 drivers/crypto/n2_core.c static inline bool job_finished(struct spu_queue *q, unsigned int offset, q 122 drivers/crypto/n2_core.c struct spu_queue *q = dev_id; q 125 drivers/crypto/n2_core.c smp_processor_id(), q->qhandle); q 127 drivers/crypto/n2_core.c spin_lock(&q->lock); q 129 drivers/crypto/n2_core.c hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head); q 134 drivers/crypto/n2_core.c for (off = q->head; off != new_head; off = spu_next_offset(q, off)) { q 138 drivers/crypto/n2_core.c hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head); q 140 drivers/crypto/n2_core.c q->head = new_head; q 142 drivers/crypto/n2_core.c spin_unlock(&q->lock); q 149 drivers/crypto/n2_core.c struct spu_queue *q = dev_id; q 152 drivers/crypto/n2_core.c spin_lock(&q->lock); q 155 drivers/crypto/n2_core.c smp_processor_id(), q->qhandle); q 157 drivers/crypto/n2_core.c hv_ret = sun4v_ncs_gethead(q->qhandle, &head); q 162 drivers/crypto/n2_core.c sun4v_ncs_sethead_marker(q->qhandle, head); q 164 drivers/crypto/n2_core.c spin_unlock(&q->lock); q 169 drivers/crypto/n2_core.c static void *spu_queue_next(struct spu_queue *q, void *cur) q 171 drivers/crypto/n2_core.c return q->q + spu_next_offset(q, cur - q->q); q 174 drivers/crypto/n2_core.c static int spu_queue_num_free(struct spu_queue *q) q 176 drivers/crypto/n2_core.c unsigned long head = q->head; q 177 drivers/crypto/n2_core.c unsigned long tail = q->tail; q 189 drivers/crypto/n2_core.c static void *spu_queue_alloc(struct spu_queue *q, int num_entries) q 191 drivers/crypto/n2_core.c int avail = spu_queue_num_free(q); q 194 drivers/crypto/n2_core.c return q->q + q->tail; q 199 drivers/crypto/n2_core.c static unsigned long spu_queue_submit(struct spu_queue *q, void *last) q 203 drivers/crypto/n2_core.c new_tail = spu_next_offset(q, last - q->q); q 205 drivers/crypto/n2_core.c hv_ret = sun4v_ncs_settail(q->qhandle, new_tail); q 207 drivers/crypto/n2_core.c q->tail = new_tail; q 562 drivers/crypto/n2_core.c ent = qp->q + qp->tail; q 1652 drivers/crypto/n2_core.c hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q), q 1672 drivers/crypto/n2_core.c p->q = new_queue(p->q_type); q 1673 drivers/crypto/n2_core.c if (!p->q) q 1678 drivers/crypto/n2_core.c free_queue(p->q, p->q_type); q 1679 drivers/crypto/n2_core.c p->q = NULL; q 1689 drivers/crypto/n2_core.c if (!p->q) q 1695 drivers/crypto/n2_core.c free_queue(p->q, p->q_type); q 82 drivers/crypto/qat/qat_common/qat_asym_algs.c dma_addr_t q; q 108 drivers/crypto/qat/qat_common/qat_asym_algs.c char *q; q 857 drivers/crypto/qat/qat_common/qat_asym_algs.c qat_req->in.rsa.dec_crt.q = ctx->dma_q; q 1086 drivers/crypto/qat/qat_common/qat_asym_algs.c ptr = rsa_key->q; q 1091 drivers/crypto/qat/qat_common/qat_asym_algs.c ctx->q = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL); q 1092 drivers/crypto/qat/qat_common/qat_asym_algs.c if (!ctx->q) q 1094 drivers/crypto/qat/qat_common/qat_asym_algs.c memcpy(ctx->q + (half_key_sz - len), ptr, len); q 1144 drivers/crypto/qat/qat_common/qat_asym_algs.c memset(ctx->q, '\0', half_key_sz); q 1145 drivers/crypto/qat/qat_common/qat_asym_algs.c dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q); q 1146 drivers/crypto/qat/qat_common/qat_asym_algs.c ctx->q = NULL; q 1172 drivers/crypto/qat/qat_common/qat_asym_algs.c if (ctx->q) { q 1173 drivers/crypto/qat/qat_common/qat_asym_algs.c memset(ctx->q, '\0', half_key_sz); q 1174 drivers/crypto/qat/qat_common/qat_asym_algs.c dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q); q 1193 drivers/crypto/qat/qat_common/qat_asym_algs.c ctx->q = NULL; q 163 drivers/dax/super.c struct request_queue *q; q 168 drivers/dax/super.c q = bdev_get_queue(bdev); q 169 drivers/dax/super.c if (!q || !blk_queue_dax(q)) { q 625 drivers/dma/fsl_raid.c struct device_node *np, u8 q, u32 off) q 647 drivers/dma/fsl_raid.c dev_err(dev, "Not able to create ofdev for jr %d\n", q); q 655 drivers/dma/fsl_raid.c dev_err(dev, "Reg property not found in jr %d\n", q); q 666 drivers/dma/fsl_raid.c dev_err(dev, "No IRQ defined for JR %d\n", q); q 671 drivers/dma/fsl_raid.c snprintf(chan->name, sizeof(chan->name), "re_jr%02d", q); q 678 drivers/dma/fsl_raid.c dev_err(dev, "Unable to register interrupt for JR %d\n", q); q 683 drivers/dma/fsl_raid.c re_priv->re_jrs[q] = chan; q 3054 drivers/edac/amd64_edac.c cpu, reg->q, q 1107 drivers/firewire/core-device.c u32 q; q 1111 drivers/firewire/core-device.c rcode = read_rom(device, generation, i, &q); q 1115 drivers/firewire/core-device.c if (i == 0 && q == 0) q 1119 drivers/firewire/core-device.c if (q != device->config_rom[i]) { q 24 drivers/firewire/core-topology.c #define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f) q 25 drivers/firewire/core-topology.c #define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01) q 26 drivers/firewire/core-topology.c #define SELF_ID_LINK_ON(q) (((q) >> 22) & 0x01) q 27 drivers/firewire/core-topology.c #define SELF_ID_GAP_COUNT(q) (((q) >> 16) & 0x3f) q 28 drivers/firewire/core-topology.c #define SELF_ID_PHY_SPEED(q) (((q) >> 14) & 0x03) q 29 drivers/firewire/core-topology.c #define SELF_ID_CONTENDER(q) (((q) >> 11) & 0x01) q 30 drivers/firewire/core-topology.c #define SELF_ID_PHY_INITIATOR(q) (((q) >> 1) & 0x01) q 31 drivers/firewire/core-topology.c #define SELF_ID_MORE_PACKETS(q) (((q) >> 0) & 0x01) q 33 drivers/firewire/core-topology.c #define SELF_ID_EXT_SEQUENCE(q) (((q) >> 20) & 0x07) q 42 drivers/firewire/core-topology.c u32 q; q 49 drivers/firewire/core-topology.c q = *sid; q 53 drivers/firewire/core-topology.c port_type = (q >> shift) & 0x03; q 67 drivers/firewire/core-topology.c if (!SELF_ID_MORE_PACKETS(q)) q 72 drivers/firewire/core-topology.c q = *sid; q 81 drivers/firewire/core-topology.c if (!SELF_ID_EXTENDED(q) || q 82 drivers/firewire/core-topology.c seq != SELF_ID_EXT_SEQUENCE(q)) q 175 drivers/firewire/core-topology.c u32 *next_sid, *end, q; q 198 drivers/firewire/core-topology.c q = *sid; q 199 drivers/firewire/core-topology.c if (phy_id != SELF_ID_PHY_ID(q)) { q 201 drivers/firewire/core-topology.c phy_id, SELF_ID_PHY_ID(q)); q 222 drivers/firewire/core-topology.c node = fw_node_create(q, port_count, card->color); q 231 drivers/firewire/core-topology.c if (SELF_ID_CONTENDER(q)) q 291 drivers/firewire/core-topology.c if (SELF_ID_GAP_COUNT(q) != gap_count) q 44 drivers/firewire/core-transaction.c #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f) q 45 drivers/firewire/core-transaction.c #define HEADER_GET_TLABEL(q) (((q) >> 10) & 0x3f) q 46 drivers/firewire/core-transaction.c #define HEADER_GET_RCODE(q) (((q) >> 12) & 0x0f) q 47 drivers/firewire/core-transaction.c #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff) q 48 drivers/firewire/core-transaction.c #define HEADER_GET_SOURCE(q) (((q) >> 16) & 0xffff) q 49 drivers/firewire/core-transaction.c #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff) q 50 drivers/firewire/core-transaction.c #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) q 51 drivers/firewire/core-transaction.c #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) q 53 drivers/firewire/core-transaction.c #define HEADER_DESTINATION_IS_BROADCAST(q) \ q 54 drivers/firewire/core-transaction.c (((q) & HEADER_DESTINATION(0x3f)) == HEADER_DESTINATION(0x3f)) q 1510 drivers/firewire/ohci.c #define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff) q 1511 drivers/firewire/ohci.c #define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f) q 1512 drivers/firewire/ohci.c #define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff) q 1513 drivers/firewire/ohci.c #define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff) q 1514 drivers/firewire/ohci.c #define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff) q 617 drivers/firmware/dmi_scan.c char __iomem *p, *q; q 673 drivers/firmware/dmi_scan.c for (q = p + 16; q < p + 0x10000; q += 16) { q 674 drivers/firmware/dmi_scan.c memcpy_fromio(buf + 16, q, 16); q 691 drivers/firmware/dmi_scan.c for (q = p; q < p + 0x10000; q += 16) { q 692 drivers/firmware/dmi_scan.c memcpy_fromio(buf + 16, q, 16); q 334 drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h #define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid)) q 1179 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c u32 me, u32 pipe, u32 q, u32 vm) q 1181 drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c nv_grbm_select(adev, me, pipe, q, vm); q 3046 drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c u32 me, u32 pipe, u32 q, u32 vm) q 4200 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c u32 me, u32 pipe, u32 q, u32 vm) q 4202 drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c cik_srbm_select(adev, me, pipe, q, vm); q 3472 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c u32 me, u32 pipe, u32 q, u32 vm) q 3474 drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c vi_srbm_select(adev, me, pipe, q, vm); q 1841 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c u32 me, u32 pipe, u32 q, u32 vm) q 1843 drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c soc15_grbm_select(adev, me, pipe, q, vm); q 55 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c struct queue *q); q 58 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c struct queue *q); q 59 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q); q 61 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c struct queue *q); q 135 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q) q 143 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->doorbell_id = q->properties.queue_id; q 144 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA || q 145 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { q 154 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->doorbell_id = idx_offset[q->properties.sdma_engine_id] q 155 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c + (q->properties.sdma_queue_id & 1) q 157 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c + (q->properties.sdma_queue_id >> 1); q 169 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->doorbell_id = found; q 172 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.doorbell_off = q 173 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c kfd_doorbell_id_to_offset(dev, q->process, q 174 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->doorbell_id); q 180 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c struct queue *q) q 186 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.type == KFD_QUEUE_TYPE_SDMA || q 187 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) q 190 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap); q 196 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c struct queue *q) q 209 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.vmid = allocated_vmid; q 211 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid); q 249 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c struct queue *q) q 254 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (q->device->device_info->asic_family == CHIP_HAWAII) q 255 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (flush_texture_cache_nocpsch(q->device, qpd)) q 265 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.vmid = 0; q 269 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c struct queue *q, q 275 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c print_queue(q); q 287 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c retval = allocate_vmid(dqm, qpd, q); q 291 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.vmid = qpd->vmid; q 297 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.is_evicted = !!qpd->evicted; q 299 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.tba_addr = qpd->tba_addr; q 300 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.tma_addr = qpd->tma_addr; q 303 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.type)]; q 304 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) { q 305 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c retval = allocate_hqd(dqm, q); q 309 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->pipe, q->queue); q 310 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA || q 311 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { q 312 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c retval = allocate_sdma_queue(dqm, q); q 315 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c dqm->asic_ops.init_sdma_vm(dqm, q, qpd); q 318 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c retval = allocate_doorbell(qpd, q); q 324 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties); q 327 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (!q->mqd_mem_obj) { q 331 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, q 332 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c &q->gart_mqd_addr, &q->properties); q 333 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (q->properties.is_active) { q 335 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (WARN(q->process->mm != current->mm, q 339 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q 340 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->queue, &q->properties, current->mm); q 345 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c list_add(&q->list, &qpd->queues_list); q 347 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (q->properties.is_active) q 350 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (q->properties.type == KFD_QUEUE_TYPE_SDMA) q 352 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) q 365 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); q 367 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c deallocate_doorbell(qpd, q); q 369 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) q 370 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c deallocate_hqd(dqm, q); q 371 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c else if (q->properties.type == KFD_QUEUE_TYPE_SDMA || q 372 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) q 373 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c deallocate_sdma_queue(dqm, q); q 376 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c deallocate_vmid(dqm, qpd, q); q 382 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q) q 399 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->pipe = pipe; q 400 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->queue = bit; q 409 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue); q 417 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c struct queue *q) q 419 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c dqm->allocated_queues[q->pipe] |= (1 << q->queue); q 427 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c struct queue *q) q 433 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.type)]; q 435 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) { q 436 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c deallocate_hqd(dqm, q); q 437 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { q 439 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c deallocate_sdma_queue(dqm, q); q 440 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { q 442 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c deallocate_sdma_queue(dqm, q); q 445 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.type); q 450 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c deallocate_doorbell(qpd, q); q 452 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, q 455 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->pipe, q->queue); q 459 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); q 461 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c list_del(&q->list); q 474 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c deallocate_vmid(dqm, qpd, q); q 477 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (q->properties.is_active) q 485 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c struct queue *q) q 490 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c retval = destroy_queue_nocpsch_locked(dqm, qpd, q); q 496 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c static int update_queue(struct device_queue_manager *dqm, struct queue *q) q 504 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c pdd = kfd_get_process_device_data(q->device, q->process); q 510 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.type)]; q 513 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c prev_active = q->properties.is_active; q 524 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || q 525 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.type == KFD_QUEUE_TYPE_SDMA || q 526 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) { q 527 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, q 529 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); q 536 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties); q 544 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (q->properties.is_active && !prev_active) q 546 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c else if (!q->properties.is_active && prev_active) q 551 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c else if (q->properties.is_active && q 552 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || q 553 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.type == KFD_QUEUE_TYPE_SDMA || q 554 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) { q 555 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (WARN(q->process->mm != current->mm, q 559 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q 560 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->pipe, q->queue, q 561 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c &q->properties, current->mm); q 572 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c struct queue *q; q 588 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c list_for_each_entry(q, &qpd->queues_list, list) { q 589 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.is_evicted = true; q 590 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (!q->properties.is_active) q 594 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.type)]; q 595 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.is_active = false; q 596 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, q 598 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); q 615 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c struct queue *q; q 630 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c list_for_each_entry(q, &qpd->queues_list, list) { q 631 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.is_evicted = true; q 632 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (!q->properties.is_active) q 635 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.is_active = false; q 652 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c struct queue *q; q 697 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c list_for_each_entry(q, &qpd->queues_list, list) { q 698 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.is_evicted = false; q 699 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (!QUEUE_IS_ACTIVE(q->properties)) q 703 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.type)]; q 704 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.is_active = true; q 705 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q 706 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->queue, &q->properties, mm); q 725 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c struct queue *q; q 750 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c list_for_each_entry(q, &qpd->queues_list, list) { q 751 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.is_evicted = false; q 752 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (!QUEUE_IS_ACTIVE(q->properties)) q 755 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.is_active = true; q 915 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c struct queue *q) q 919 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { q 924 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->sdma_id = bit; q 925 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.sdma_engine_id = q->sdma_id % q 927 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.sdma_queue_id = q->sdma_id / q 929 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { q 934 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->sdma_id = bit; q 941 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.sdma_engine_id = get_num_sdma_engines(dqm) + q 942 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->sdma_id % get_num_xgmi_sdma_engines(dqm); q 943 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.sdma_queue_id = q->sdma_id / q 947 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id); q 948 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id); q 954 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c struct queue *q) q 956 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { q 957 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (q->sdma_id >= get_num_sdma_queues(dqm)) q 959 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c dqm->sdma_bitmap |= (1ULL << q->sdma_id); q 960 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { q 961 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm)) q 963 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c dqm->xgmi_sdma_bitmap |= (1ULL << q->sdma_id); q 1132 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, q 1145 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (q->properties.type == KFD_QUEUE_TYPE_SDMA || q 1146 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { q 1148 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c retval = allocate_sdma_queue(dqm, q); q 1154 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c retval = allocate_doorbell(qpd, q); q 1159 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.type)]; q 1161 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (q->properties.type == KFD_QUEUE_TYPE_SDMA || q 1162 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) q 1163 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c dqm->asic_ops.init_sdma_vm(dqm, q, qpd); q 1164 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.tba_addr = qpd->tba_addr; q 1165 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.tma_addr = qpd->tma_addr; q 1166 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties); q 1167 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (!q->mqd_mem_obj) { q 1178 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.is_evicted = !!qpd->evicted; q 1179 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, q 1180 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c &q->gart_mqd_addr, &q->properties); q 1182 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c list_add(&q->list, &qpd->queues_list); q 1185 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (q->properties.type == KFD_QUEUE_TYPE_SDMA) q 1187 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) q 1190 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (q->properties.is_active) { q 1209 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c deallocate_doorbell(qpd, q); q 1211 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (q->properties.type == KFD_QUEUE_TYPE_SDMA || q 1212 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { q 1214 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c deallocate_sdma_queue(dqm, q); q 1341 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c struct queue *q) q 1362 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.type)]; q 1364 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c deallocate_doorbell(qpd, q); q 1366 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { q 1368 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c deallocate_sdma_queue(dqm, q); q 1369 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { q 1371 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c deallocate_sdma_queue(dqm, q); q 1374 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c list_del(&q->list); q 1376 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (q->properties.is_active) { q 1395 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); q 1498 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c struct queue *q, *next; q 1506 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c list_for_each_entry_safe(q, next, &qpd->queues_list, list) { q 1509 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c ret = destroy_queue_nocpsch_locked(dqm, qpd, q); q 1537 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c struct queue *q, q 1547 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE || q 1548 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.is_active || !q->device->cwsr_enabled) { q 1560 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c r = mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack, q 1572 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c struct queue *q, *next; q 1594 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c list_for_each_entry(q, &qpd->queues_list, list) { q 1595 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { q 1597 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c deallocate_sdma_queue(dqm, q); q 1598 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { q 1600 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c deallocate_sdma_queue(dqm, q); q 1603 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c if (q->properties.is_active) q 1638 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c list_for_each_entry_safe(q, next, &qpd->queues_list, list) { q 1640 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c q->properties.type)]; q 1641 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c list_del(&q->list); q 1643 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); q 86 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h struct queue *q, q 91 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h struct queue *q); q 94 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h struct queue *q); q 135 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h struct queue *q, q 151 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h struct queue *q, q 39 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, q 42 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c struct queue *q, q 180 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, q 185 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c if (q->process->is_32bit_user_mode) q 193 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c q->properties.sdma_vm_addr = value; q 197 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c struct queue *q, q 203 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c q->properties.sdma_vm_addr = q 31 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q, q 83 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c static void init_sdma_vm_v10(struct device_queue_manager *dqm, struct queue *q, q 87 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v10.c q->properties.sdma_vm_addr = 0; q 32 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q, q 80 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c static void init_sdma_vm_v9(struct device_queue_manager *dqm, struct queue *q, q 84 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c q->properties.sdma_vm_addr = 0; q 45 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, q 48 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c struct queue *q, q 228 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q, q 233 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c if (q->process->is_32bit_user_mode) q 241 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c q->properties.sdma_vm_addr = value; q 245 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c struct queue *q, q 251 drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_vi.c q->properties.sdma_vm_addr = q 152 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c struct queue *q, bool is_static) q 171 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c switch (q->properties.type) { q 183 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c packet->bitfields2.engine_sel = q->properties.sdma_engine_id + q 188 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c WARN(1, "queue type %d\n", q->properties.type); q 192 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c q->properties.doorbell_off; q 195 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c lower_32_bits(q->gart_mqd_addr); q 198 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c upper_32_bits(q->gart_mqd_addr); q 201 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c lower_32_bits((uint64_t)q->properties.write_ptr); q 204 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c upper_32_bits((uint64_t)q->properties.write_ptr); q 176 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c struct queue *q, bool is_static) q 192 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c packet->bitfields2.gws_control_queue = q->gws ? 1 : 0; q 198 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c switch (q->properties.type) { q 211 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c if (q->properties.sdma_engine_id < 2) q 212 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c packet->bitfields2.engine_sel = q->properties.sdma_engine_id + q 217 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c packet->bitfields2.engine_sel = q->properties.sdma_engine_id; q 221 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c WARN(1, "queue type %d", q->properties.type); q 225 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c q->properties.doorbell_off; q 228 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c lower_32_bits(q->gart_mqd_addr); q 231 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c upper_32_bits(q->gart_mqd_addr); q 234 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c lower_32_bits((uint64_t)q->properties.write_ptr); q 237 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c upper_32_bits((uint64_t)q->properties.write_ptr); q 183 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c struct queue *q, bool is_static) q 202 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c switch (q->properties.type) { q 214 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c packet->bitfields2.engine_sel = q->properties.sdma_engine_id + q 219 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c WARN(1, "queue type %d", q->properties.type); q 223 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c q->properties.doorbell_off; q 226 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c lower_32_bits(q->gart_mqd_addr); q 229 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c upper_32_bits(q->gart_mqd_addr); q 232 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c lower_32_bits((uint64_t)q->properties.write_ptr); q 235 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c upper_32_bits((uint64_t)q->properties.write_ptr); q 48 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c struct kfd_mem_obj *allocate_hiq_mqd(struct kfd_dev *dev, struct queue_properties *q) q 64 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c struct queue_properties *q) q 73 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c offset = (q->sdma_engine_id * q 75 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c q->sdma_queue_id) * q 70 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h struct queue_properties *q); q 74 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h struct queue_properties *q); q 82 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h struct queue_properties *q); q 111 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h struct queue_properties *q); q 114 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h struct queue_properties *q); q 45 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c struct queue_properties *q) q 50 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c if (q->cu_mask_count == 0) q 54 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c q->cu_mask, q->cu_mask_count, se_mask); q 69 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c static void set_priority(struct cik_mqd *m, struct queue_properties *q) q 71 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; q 72 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c m->cp_hqd_queue_priority = q->priority; q 76 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c struct queue_properties *q) q 89 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c struct queue_properties *q) q 130 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c set_priority(m, q); q 132 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c if (q->format == KFD_QUEUE_FORMAT_AQL) q 138 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c mm->update_mqd(mm, m, q); q 143 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c struct queue_properties *q) q 155 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c mm->update_mqd(mm, m, q); q 188 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c struct queue_properties *q, unsigned int atc_bit) q 205 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1; q 206 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8); q 207 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8); q 208 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr); q 209 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr); q 210 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c m->cp_hqd_pq_doorbell_control = DOORBELL_OFFSET(q->doorbell_off); q 212 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c m->cp_hqd_vmid = q->vmid; q 214 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c if (q->format == KFD_QUEUE_FORMAT_AQL) q 217 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c update_cu_mask(mm, mqd, q); q 218 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c set_priority(m, q); q 220 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c q->is_active = QUEUE_IS_ACTIVE(*q); q 224 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c struct queue_properties *q) q 226 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c __update_mqd(mm, mqd, q, 1); q 230 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c struct queue_properties *q) q 232 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c __update_mqd(mm, mqd, q, 0); q 236 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c struct queue_properties *q) q 241 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c m->sdma_rlc_rb_cntl = order_base_2(q->queue_size / 4) q 243 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT | q 247 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c m->sdma_rlc_rb_base = lower_32_bits(q->queue_address >> 8); q 248 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c m->sdma_rlc_rb_base_hi = upper_32_bits(q->queue_address >> 8); q 249 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c m->sdma_rlc_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr); q 250 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c m->sdma_rlc_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr); q 252 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c q->doorbell_off << SDMA0_RLC0_DOORBELL__OFFSET__SHIFT; q 254 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c m->sdma_rlc_virtual_addr = q->sdma_vm_addr; q 256 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c m->sdma_engine_id = q->sdma_engine_id; q 257 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c m->sdma_queue_id = q->sdma_queue_id; q 259 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c q->is_active = QUEUE_IS_ACTIVE(*q); q 308 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c struct queue_properties *q) q 310 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q); q 314 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c struct queue_properties *q) q 328 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1; q 329 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8); q 330 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8); q 331 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr); q 332 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr); q 333 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c m->cp_hqd_pq_doorbell_control = DOORBELL_OFFSET(q->doorbell_off); q 335 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c m->cp_hqd_vmid = q->vmid; q 337 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c q->is_active = QUEUE_IS_ACTIVE(*q); q 339 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c set_priority(m, q); q 45 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c struct queue_properties *q) q 50 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c if (q->cu_mask_count == 0) q 54 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c q->cu_mask, q->cu_mask_count, se_mask); q 70 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c struct queue_properties *q) q 79 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) { q 84 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c ALIGN(q->ctl_stack_size, PAGE_SIZE) + q 105 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c struct queue_properties *q) q 137 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c if (q->format == KFD_QUEUE_FORMAT_AQL) { q 146 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c lower_32_bits(q->ctx_save_restore_area_address); q 148 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c upper_32_bits(q->ctx_save_restore_area_address); q 149 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size; q 150 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c m->cp_hqd_cntl_stack_size = q->ctl_stack_size; q 151 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c m->cp_hqd_cntl_stack_offset = q->ctl_stack_size; q 152 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c m->cp_hqd_wg_state_offset = q->ctl_stack_size; q 158 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c mm->update_mqd(mm, m, q); q 176 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c struct queue_properties *q) q 184 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1; q 187 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8); q 188 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8); q 190 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr); q 191 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr); q 192 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr); q 193 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr); q 196 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c q->doorbell_off << q 211 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1); q 213 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c lower_32_bits(q->eop_ring_buffer_address >> 8); q 215 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c upper_32_bits(q->eop_ring_buffer_address >> 8); q 219 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c m->cp_hqd_vmid = q->vmid; q 221 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c if (q->format == KFD_QUEUE_FORMAT_AQL) { q 232 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c update_cu_mask(mm, mqd, q); q 234 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c q->is_active = (q->queue_size > 0 && q 235 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c q->queue_address != 0 && q 236 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c q->queue_percent > 0 && q 237 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c !q->is_evicted); q 297 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c struct queue_properties *q) q 301 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q); q 310 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c struct queue_properties *q) q 314 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c update_mqd(mm, mqd, q); q 318 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c m->cp_hqd_vmid = q->vmid; q 323 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c struct queue_properties *q) q 335 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c mm->update_mqd(mm, m, q); q 350 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c struct queue_properties *q) q 355 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c m->sdmax_rlcx_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1) q 357 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT | q 361 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8); q 362 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8); q 363 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr); q 364 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr); q 366 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c q->doorbell_off << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT; q 368 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c m->sdma_engine_id = q->sdma_engine_id; q 369 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c m->sdma_queue_id = q->sdma_queue_id; q 373 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c q->is_active = (q->queue_size > 0 && q 374 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c q->queue_address != 0 && q 375 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c q->queue_percent > 0 && q 376 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c !q->is_evicted); q 46 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c struct queue_properties *q) q 51 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c if (q->cu_mask_count == 0) q 55 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c q->cu_mask, q->cu_mask_count, se_mask); q 78 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c static void set_priority(struct v9_mqd *m, struct queue_properties *q) q 80 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; q 81 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c m->cp_hqd_queue_priority = q->priority; q 85 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c struct queue_properties *q) q 94 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) { q 99 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c ALIGN(q->ctl_stack_size, PAGE_SIZE) + q 120 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c struct queue_properties *q) q 153 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c if (q->format == KFD_QUEUE_FORMAT_AQL) { q 158 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c if (q->tba_addr) { q 163 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) { q 167 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c lower_32_bits(q->ctx_save_restore_area_address); q 169 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c upper_32_bits(q->ctx_save_restore_area_address); q 170 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size; q 171 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c m->cp_hqd_cntl_stack_size = q->ctl_stack_size; q 172 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c m->cp_hqd_cntl_stack_offset = q->ctl_stack_size; q 173 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c m->cp_hqd_wg_state_offset = q->ctl_stack_size; q 179 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c mm->update_mqd(mm, m, q); q 195 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c struct queue_properties *q) q 202 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1; q 205 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8); q 206 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8); q 208 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr); q 209 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr); q 210 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr); q 211 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr); q 214 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c q->doorbell_off << q 231 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c order_base_2(q->eop_ring_buffer_size / 4) - 1); q 233 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c lower_32_bits(q->eop_ring_buffer_address >> 8); q 235 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c upper_32_bits(q->eop_ring_buffer_address >> 8); q 239 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c m->cp_hqd_vmid = q->vmid; q 241 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c if (q->format == KFD_QUEUE_FORMAT_AQL) { q 249 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) q 252 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c update_cu_mask(mm, mqd, q); q 253 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c set_priority(m, q); q 255 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c q->is_active = QUEUE_IS_ACTIVE(*q); q 315 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c struct queue_properties *q) q 319 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q); q 328 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c struct queue_properties *q) q 332 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c update_mqd(mm, mqd, q); q 336 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c m->cp_hqd_vmid = q->vmid; q 341 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c struct queue_properties *q) q 353 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c mm->update_mqd(mm, m, q); q 368 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c struct queue_properties *q) q 373 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c m->sdmax_rlcx_rb_cntl = order_base_2(q->queue_size / 4) q 375 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT | q 379 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8); q 380 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8); q 381 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr); q 382 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr); q 384 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c q->doorbell_off << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT; q 386 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c m->sdma_engine_id = q->sdma_engine_id; q 387 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c m->sdma_queue_id = q->sdma_queue_id; q 390 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c q->is_active = QUEUE_IS_ACTIVE(*q); q 48 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c struct queue_properties *q) q 53 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c if (q->cu_mask_count == 0) q 57 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c q->cu_mask, q->cu_mask_count, se_mask); q 72 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c static void set_priority(struct vi_mqd *m, struct queue_properties *q) q 74 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; q 75 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c m->cp_hqd_queue_priority = q->priority; q 79 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c struct queue_properties *q) q 92 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c struct queue_properties *q) q 122 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c set_priority(m, q); q 125 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c if (q->format == KFD_QUEUE_FORMAT_AQL) q 128 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c if (q->tba_addr) { q 129 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c m->compute_tba_lo = lower_32_bits(q->tba_addr >> 8); q 130 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c m->compute_tba_hi = upper_32_bits(q->tba_addr >> 8); q 131 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c m->compute_tma_lo = lower_32_bits(q->tma_addr >> 8); q 132 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c m->compute_tma_hi = upper_32_bits(q->tma_addr >> 8); q 137 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) { q 141 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c lower_32_bits(q->ctx_save_restore_area_address); q 143 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c upper_32_bits(q->ctx_save_restore_area_address); q 144 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size; q 145 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c m->cp_hqd_cntl_stack_size = q->ctl_stack_size; q 146 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c m->cp_hqd_cntl_stack_offset = q->ctl_stack_size; q 147 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c m->cp_hqd_wg_state_offset = q->ctl_stack_size; q 153 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c mm->update_mqd(mm, m, q); q 170 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c struct queue_properties *q, unsigned int mtype, q 180 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1; q 183 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8); q 184 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8); q 186 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr); q 187 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr); q 188 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr); q 189 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr); q 192 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c q->doorbell_off << q 212 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c order_base_2(q->eop_ring_buffer_size / 4) - 1); q 214 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c lower_32_bits(q->eop_ring_buffer_address >> 8); q 216 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c upper_32_bits(q->eop_ring_buffer_address >> 8); q 221 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c m->cp_hqd_vmid = q->vmid; q 223 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c if (q->format == KFD_QUEUE_FORMAT_AQL) { q 228 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) q 233 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c update_cu_mask(mm, mqd, q); q 234 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c set_priority(m, q); q 236 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c q->is_active = QUEUE_IS_ACTIVE(*q); q 241 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c struct queue_properties *q) q 243 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c __update_mqd(mm, mqd, q, MTYPE_CC, 1); q 247 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c struct queue_properties *q) q 249 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c __update_mqd(mm, mqd, q, MTYPE_UC, 0); q 301 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c struct queue_properties *q) q 304 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q); q 313 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c struct queue_properties *q) q 316 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c __update_mqd(mm, mqd, q, MTYPE_UC, 0); q 319 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c m->cp_hqd_vmid = q->vmid; q 324 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c struct queue_properties *q) q 336 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c mm->update_mqd(mm, m, q); q 349 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c struct queue_properties *q) q 354 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c m->sdmax_rlcx_rb_cntl = order_base_2(q->queue_size / 4) q 356 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT | q 360 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8); q 361 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8); q 362 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr); q 363 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr); q 365 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c q->doorbell_off << SDMA0_RLC0_DOORBELL__OFFSET__SHIFT; q 367 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c m->sdmax_rlcx_virtual_addr = q->sdma_vm_addr; q 369 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c m->sdma_engine_id = q->sdma_engine_id; q 370 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c m->sdma_queue_id = q->sdma_queue_id; q 372 drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c q->is_active = QUEUE_IS_ACTIVE(*q); q 129 drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c struct queue *q; q 183 drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c list_for_each_entry(q, &qpd->queues_list, list) { q 184 drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c if (!q->properties.is_active) q 188 drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c q->queue, qpd->is_debug); q 192 drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c q, q 451 drivers/gpu/drm/amd/amdkfd/kfd_priv.h #define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 && \ q 452 drivers/gpu/drm/amd/amdkfd/kfd_priv.h (q).queue_address != 0 && \ q 453 drivers/gpu/drm/amd/amdkfd/kfd_priv.h (q).queue_percent > 0 && \ q 454 drivers/gpu/drm/amd/amdkfd/kfd_priv.h !(q).is_evicted) q 867 drivers/gpu/drm/amd/amdkfd/kfd_priv.h int init_queue(struct queue **q, const struct queue_properties *properties); q 868 drivers/gpu/drm/amd/amdkfd/kfd_priv.h void uninit_queue(struct queue *q); q 869 drivers/gpu/drm/amd/amdkfd/kfd_priv.h void print_queue_properties(struct queue_properties *q); q 870 drivers/gpu/drm/amd/amdkfd/kfd_priv.h void print_queue(struct queue *q); q 893 drivers/gpu/drm/amd/amdkfd/kfd_priv.h struct queue *q; q 952 drivers/gpu/drm/amd/amdkfd/kfd_priv.h struct queue *q, bool is_static); q 37 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c if ((pqn->q && pqn->q->properties.queue_id == qid) || q 93 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c if (pqn->q) q 94 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c dev = pqn->q->device; q 116 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c pqn->q->gws); q 120 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c pqn->q->gws = mem; q 123 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, q 124 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c pqn->q); q 153 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c if (pqn->q && pqn->q->gws) q 155 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c pqn->q->gws); q 156 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c uninit_queue(pqn->q); q 166 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c struct kfd_dev *dev, struct queue **q, q 179 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c retval = init_queue(q, q_properties); q 183 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c (*q)->device = dev; q 184 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c (*q)->process = pqm->process; q 199 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c struct queue *q; q 205 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c q = NULL; q 253 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c retval = create_cp_queue(pqm, dev, &q, properties, f, *qid); q 256 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c pqn->q = q; q 258 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd); q 260 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c print_queue(q); q 274 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c retval = create_cp_queue(pqm, dev, &q, properties, f, *qid); q 277 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c pqn->q = q; q 279 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd); q 281 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c print_queue(q); q 291 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c pqn->q = NULL; q 306 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c if (q) q 312 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c (q->properties.doorbell_off * sizeof(uint32_t)) & q 319 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c if (q) { q 321 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c print_queue_properties(&q->properties); q 358 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c if (pqn->q) q 359 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c dev = pqn->q->device; q 376 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c if (pqn->q) { q 377 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c dqm = pqn->q->device->dqm; q 378 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q); q 382 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c pqn->q->properties.queue_id, retval); q 387 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c if (pqn->q->gws) { q 389 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c pqn->q->gws); q 393 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c kfree(pqn->q->properties.cu_mask); q 394 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c pqn->q->properties.cu_mask = NULL; q 395 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c uninit_queue(pqn->q); q 422 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c pqn->q->properties.queue_address = p->queue_address; q 423 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c pqn->q->properties.queue_size = p->queue_size; q 424 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c pqn->q->properties.queue_percent = p->queue_percent; q 425 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c pqn->q->properties.priority = p->priority; q 427 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, q 428 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c pqn->q); q 450 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c kfree(pqn->q->properties.cu_mask); q 452 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c pqn->q->properties.cu_mask_count = p->cu_mask_count; q 453 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c pqn->q->properties.cu_mask = p->cu_mask; q 455 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm, q 456 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c pqn->q); q 491 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c return pqn->q->device->dqm->ops.get_wave_state(pqn->q->device->dqm, q 492 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c pqn->q, q 504 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c struct queue *q; q 510 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c if (pqn->q) { q 511 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c q = pqn->q; q 512 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c switch (q->properties.type) { q 516 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c q->device->id); q 521 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c q->device->id); q 527 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c q->properties.type, q->device->id); q 530 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c mqd_mgr = q->device->dqm->mqd_mgrs[mqd_type]; q 532 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c q = pqn->kq->queue; q 534 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c switch (q->properties.type) { q 542 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c q->properties.type, q 552 drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c r = mqd_mgr->debugfs_show_mqd(m, q->mqd); q 27 drivers/gpu/drm/amd/amdkfd/kfd_queue.c void print_queue_properties(struct queue_properties *q) q 29 drivers/gpu/drm/amd/amdkfd/kfd_queue.c if (!q) q 33 drivers/gpu/drm/amd/amdkfd/kfd_queue.c pr_debug("Queue Type: %u\n", q->type); q 34 drivers/gpu/drm/amd/amdkfd/kfd_queue.c pr_debug("Queue Size: %llu\n", q->queue_size); q 35 drivers/gpu/drm/amd/amdkfd/kfd_queue.c pr_debug("Queue percent: %u\n", q->queue_percent); q 36 drivers/gpu/drm/amd/amdkfd/kfd_queue.c pr_debug("Queue Address: 0x%llX\n", q->queue_address); q 37 drivers/gpu/drm/amd/amdkfd/kfd_queue.c pr_debug("Queue Id: %u\n", q->queue_id); q 38 drivers/gpu/drm/amd/amdkfd/kfd_queue.c pr_debug("Queue Process Vmid: %u\n", q->vmid); q 39 drivers/gpu/drm/amd/amdkfd/kfd_queue.c pr_debug("Queue Read Pointer: 0x%px\n", q->read_ptr); q 40 drivers/gpu/drm/amd/amdkfd/kfd_queue.c pr_debug("Queue Write Pointer: 0x%px\n", q->write_ptr); q 41 drivers/gpu/drm/amd/amdkfd/kfd_queue.c pr_debug("Queue Doorbell Pointer: 0x%p\n", q->doorbell_ptr); q 42 drivers/gpu/drm/amd/amdkfd/kfd_queue.c pr_debug("Queue Doorbell Offset: %u\n", q->doorbell_off); q 45 drivers/gpu/drm/amd/amdkfd/kfd_queue.c void print_queue(struct queue *q) q 47 drivers/gpu/drm/amd/amdkfd/kfd_queue.c if (!q) q 50 drivers/gpu/drm/amd/amdkfd/kfd_queue.c pr_debug("Queue Type: %u\n", q->properties.type); q 51 drivers/gpu/drm/amd/amdkfd/kfd_queue.c pr_debug("Queue Size: %llu\n", q->properties.queue_size); q 52 drivers/gpu/drm/amd/amdkfd/kfd_queue.c pr_debug("Queue percent: %u\n", q->properties.queue_percent); q 53 drivers/gpu/drm/amd/amdkfd/kfd_queue.c pr_debug("Queue Address: 0x%llX\n", q->properties.queue_address); q 54 drivers/gpu/drm/amd/amdkfd/kfd_queue.c pr_debug("Queue Id: %u\n", q->properties.queue_id); q 55 drivers/gpu/drm/amd/amdkfd/kfd_queue.c pr_debug("Queue Process Vmid: %u\n", q->properties.vmid); q 56 drivers/gpu/drm/amd/amdkfd/kfd_queue.c pr_debug("Queue Read Pointer: 0x%px\n", q->properties.read_ptr); q 57 drivers/gpu/drm/amd/amdkfd/kfd_queue.c pr_debug("Queue Write Pointer: 0x%px\n", q->properties.write_ptr); q 58 drivers/gpu/drm/amd/amdkfd/kfd_queue.c pr_debug("Queue Doorbell Pointer: 0x%p\n", q->properties.doorbell_ptr); q 59 drivers/gpu/drm/amd/amdkfd/kfd_queue.c pr_debug("Queue Doorbell Offset: %u\n", q->properties.doorbell_off); q 60 drivers/gpu/drm/amd/amdkfd/kfd_queue.c pr_debug("Queue MQD Address: 0x%p\n", q->mqd); q 61 drivers/gpu/drm/amd/amdkfd/kfd_queue.c pr_debug("Queue MQD Gart: 0x%llX\n", q->gart_mqd_addr); q 62 drivers/gpu/drm/amd/amdkfd/kfd_queue.c pr_debug("Queue Process Address: 0x%p\n", q->process); q 63 drivers/gpu/drm/amd/amdkfd/kfd_queue.c pr_debug("Queue Device Address: 0x%p\n", q->device); q 66 drivers/gpu/drm/amd/amdkfd/kfd_queue.c int init_queue(struct queue **q, const struct queue_properties *properties) q 76 drivers/gpu/drm/amd/amdkfd/kfd_queue.c *q = tmp_q; q 80 drivers/gpu/drm/amd/amdkfd/kfd_queue.c void uninit_queue(struct queue *q) q 82 drivers/gpu/drm/amd/amdkfd/kfd_queue.c kfree(q); q 265 drivers/gpu/drm/drm_debugfs.c struct list_head *pos, *q; q 271 drivers/gpu/drm/drm_debugfs.c list_for_each_safe(pos, q, &minor->debugfs_list) { q 157 drivers/gpu/drm/i915/display/intel_quirks.c struct intel_quirk *q = &intel_quirks[i]; q 159 drivers/gpu/drm/i915/display/intel_quirks.c if (d->device == q->device && q 160 drivers/gpu/drm/i915/display/intel_quirks.c (d->subsystem_vendor == q->subsystem_vendor || q 161 drivers/gpu/drm/i915/display/intel_quirks.c q->subsystem_vendor == PCI_ANY_ID) && q 162 drivers/gpu/drm/i915/display/intel_quirks.c (d->subsystem_device == q->subsystem_device || q 163 drivers/gpu/drm/i915/display/intel_quirks.c q->subsystem_device == PCI_ANY_ID)) q 164 drivers/gpu/drm/i915/display/intel_quirks.c q->hook(i915); q 71 drivers/gpu/drm/i915/gvt/reg.h typeof(_plane) (q) = (_plane); \ q 72 drivers/gpu/drm/i915/gvt/reg.h (((p) == PIPE_A) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x50080)) : \ q 74 drivers/gpu/drm/i915/gvt/reg.h (((p) == PIPE_B) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x50088)) : \ q 76 drivers/gpu/drm/i915/gvt/reg.h (((p) == PIPE_C) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x5008C)) : \ q 1480 drivers/gpu/drm/i915/gvt/scheduler.c struct list_head *q = workload_q_head(vgpu, ring_id); q 1507 drivers/gpu/drm/i915/gvt/scheduler.c list_for_each_entry_reverse(last_workload, q, list) { q 276 drivers/gpu/drm/i915/intel_uncore.h __raw_read(64, q) q 281 drivers/gpu/drm/i915/intel_uncore.h __raw_write(64, q) q 325 drivers/gpu/drm/i915/intel_uncore.h __uncore_read(read64, 64, q, true) q 43 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c #define msgqueue_0137c63d(q) \ q 44 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c container_of(q, struct msgqueue_0137c63d, base) q 51 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c #define msgqueue_0137bca5(q) \ q 52 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0137c63d.c container_of(container_of(q, struct msgqueue_0137c63d, base), \ q 44 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c #define msgqueue_0148cdec(q) \ q 45 drivers/gpu/drm/nouveau/nvkm/falcon/msgqueue_0148cdec.c container_of(q, struct msgqueue_0148cdec, base) q 424 drivers/gpu/drm/omapdrm/dss/hdmi5_core.c unsigned int itc, ec, q, sc; q 446 drivers/gpu/drm/omapdrm/dss/hdmi5_core.c q = (ptr[2] >> 2) & 0x3; q 462 drivers/gpu/drm/omapdrm/dss/hdmi5_core.c (itc << 7) | (ec << 4) | (q << 2) | (sc << 0)); q 163 drivers/gpu/drm/v3d/v3d_drv.c enum v3d_queue q; q 165 drivers/gpu/drm/v3d/v3d_drv.c for (q = 0; q < V3D_MAX_QUEUES; q++) { q 166 drivers/gpu/drm/v3d/v3d_drv.c drm_sched_entity_destroy(&v3d_priv->sched_entity[q]); q 265 drivers/gpu/drm/v3d/v3d_sched.c enum v3d_queue q; q 270 drivers/gpu/drm/v3d/v3d_sched.c for (q = 0; q < V3D_MAX_QUEUES; q++) q 271 drivers/gpu/drm/v3d/v3d_sched.c drm_sched_stop(&v3d->queue[q].sched, sched_job); q 279 drivers/gpu/drm/v3d/v3d_sched.c for (q = 0; q < V3D_MAX_QUEUES; q++) q 280 drivers/gpu/drm/v3d/v3d_sched.c drm_sched_resubmit_jobs(&v3d->queue[q].sched); q 283 drivers/gpu/drm/v3d/v3d_sched.c for (q = 0; q < V3D_MAX_QUEUES; q++) { q 284 drivers/gpu/drm/v3d/v3d_sched.c drm_sched_start(&v3d->queue[q].sched, true); q 296 drivers/gpu/drm/v3d/v3d_sched.c v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q, q 301 drivers/gpu/drm/v3d/v3d_sched.c u32 ctca = V3D_CORE_READ(0, V3D_CLE_CTNCA(q)); q 302 drivers/gpu/drm/v3d/v3d_sched.c u32 ctra = V3D_CORE_READ(0, V3D_CLE_CTNRA(q)); q 466 drivers/gpu/drm/v3d/v3d_sched.c enum v3d_queue q; q 468 drivers/gpu/drm/v3d/v3d_sched.c for (q = 0; q < V3D_MAX_QUEUES; q++) { q 469 drivers/gpu/drm/v3d/v3d_sched.c if (v3d->queue[q].sched.ready) q 470 drivers/gpu/drm/v3d/v3d_sched.c drm_sched_fini(&v3d->queue[q].sched); q 54 drivers/gpu/drm/xen/xen_drm_front.c struct xen_drm_front_dbuf *buf, *q; q 56 drivers/gpu/drm/xen/xen_drm_front.c list_for_each_entry_safe(buf, q, dbuf_list, list) q 65 drivers/gpu/drm/xen/xen_drm_front.c struct xen_drm_front_dbuf *buf, *q; q 67 drivers/gpu/drm/xen/xen_drm_front.c list_for_each_entry_safe(buf, q, dbuf_list, list) q 79 drivers/gpu/drm/xen/xen_drm_front.c struct xen_drm_front_dbuf *buf, *q; q 81 drivers/gpu/drm/xen/xen_drm_front.c list_for_each_entry_safe(buf, q, dbuf_list, list) { q 13 drivers/gpu/ipu-v3/ipu-ic-csc.c #define QUANT_MAP(q) \ q 14 drivers/gpu/ipu-v3/ipu-ic-csc.c ((q) == V4L2_QUANTIZATION_FULL_RANGE || \ q 15 drivers/gpu/ipu-v3/ipu-ic-csc.c (q) == V4L2_QUANTIZATION_DEFAULT ? 0 : 1) q 1239 drivers/gpu/ipu-v3/ipu-image-convert.c struct list_head *q) q 1246 drivers/gpu/ipu-v3/ipu-image-convert.c list_for_each_entry(run, q, list) { q 1037 drivers/hid/hid-quirks.c struct quirks_list_struct *q; q 1040 drivers/hid/hid-quirks.c list_for_each_entry(q, &dquirks_list, node) { q 1041 drivers/hid/hid-quirks.c if (hid_match_one_id(hdev, &q->hid_bl_item)) { q 1042 drivers/hid/hid-quirks.c bl_entry = &q->hid_bl_item; q 1072 drivers/hid/hid-quirks.c struct quirks_list_struct *q_new, *q; q 1094 drivers/hid/hid-quirks.c list_for_each_entry(q, &dquirks_list, node) { q 1096 drivers/hid/hid-quirks.c if (hid_match_one_id(hdev, &q->hid_bl_item)) { q 1098 drivers/hid/hid-quirks.c list_replace(&q->node, &q_new->node); q 1099 drivers/hid/hid-quirks.c kfree(q); q 1128 drivers/hid/hid-quirks.c struct quirks_list_struct *q, *temp; q 1131 drivers/hid/hid-quirks.c list_for_each_entry_safe(q, temp, &dquirks_list, node) { q 1132 drivers/hid/hid-quirks.c if (bus == HID_BUS_ANY || bus == q->hid_bl_item.bus) { q 1133 drivers/hid/hid-quirks.c list_del(&q->node); q 1134 drivers/hid/hid-quirks.c kfree(q); q 99 drivers/i2c/busses/i2c-au1550.c do_address(struct i2c_au1550_data *adap, unsigned int addr, int rd, int q) q 120 drivers/i2c/busses/i2c-au1550.c if (q) q 128 drivers/i2c/busses/i2c-au1550.c return (q) ? wait_master_done(adap) : 0; q 1902 drivers/i2c/i2c-core-base.c const struct i2c_adapter_quirks *q = adap->quirks; q 1903 drivers/i2c/i2c-core-base.c int max_num = q->max_num_msgs, i; q 1906 drivers/i2c/i2c-core-base.c if (q->flags & I2C_AQ_COMB) { q 1911 drivers/i2c/i2c-core-base.c if (q->flags & I2C_AQ_COMB_WRITE_FIRST && msgs[0].flags & I2C_M_RD) q 1914 drivers/i2c/i2c-core-base.c if (q->flags & I2C_AQ_COMB_READ_SECOND && !(msgs[1].flags & I2C_M_RD)) q 1917 drivers/i2c/i2c-core-base.c if (q->flags & I2C_AQ_COMB_SAME_ADDR && msgs[0].addr != msgs[1].addr) q 1920 drivers/i2c/i2c-core-base.c if (i2c_quirk_exceeded(msgs[0].len, q->max_comb_1st_msg_len)) q 1923 drivers/i2c/i2c-core-base.c if (i2c_quirk_exceeded(msgs[1].len, q->max_comb_2nd_msg_len)) q 1937 drivers/i2c/i2c-core-base.c if (do_len_check && i2c_quirk_exceeded(len, q->max_read_len)) q 1940 drivers/i2c/i2c-core-base.c if (q->flags & I2C_AQ_NO_ZERO_LEN_READ && len == 0) q 1943 drivers/i2c/i2c-core-base.c if (do_len_check && i2c_quirk_exceeded(len, q->max_write_len)) q 1946 drivers/i2c/i2c-core-base.c if (q->flags & I2C_AQ_NO_ZERO_LEN_WRITE && len == 0) q 531 drivers/ide/ide-cd.c static bool ide_cdrom_prep_fs(struct request_queue *q, struct request *rq) q 533 drivers/ide/ide-cd.c int hard_sect = queue_logical_block_size(q); q 803 drivers/ide/ide-cd.c struct request_queue *q = drive->queue; q 806 drivers/ide/ide-cd.c queue_logical_block_size(q) >> SECTOR_SHIFT; q 855 drivers/ide/ide-cd.c struct request_queue *q = drive->queue; q 867 drivers/ide/ide-cd.c alignment = queue_dma_alignment(q) | q->dma_pad_mask; q 869 drivers/ide/ide-cd.c || blk_rq_bytes(rq) & q->dma_pad_mask q 1517 drivers/ide/ide-cd.c struct request_queue *q = drive->queue; q 1525 drivers/ide/ide-cd.c blk_queue_dma_alignment(q, 31); q 1526 drivers/ide/ide-cd.c blk_queue_update_dma_pad(q, 15); q 1544 drivers/ide/ide-cd.c blk_queue_logical_block_size(q, CD_FRAMESIZE); q 162 drivers/ide/ide-devsets.c struct request_queue *q = drive->queue; q 169 drivers/ide/ide-devsets.c rq = blk_get_request(q, REQ_OP_DRV_IN, 0); q 176 drivers/ide/ide-devsets.c blk_execute_rq(q, NULL, rq, 0); q 655 drivers/ide/ide-disk.c struct request_queue *q = drive->queue; q 682 drivers/ide/ide-disk.c blk_queue_max_hw_sectors(q, max_s); q 686 drivers/ide/ide-disk.c queue_max_sectors(q) / 2); q 689 drivers/ide/ide-disk.c blk_queue_flag_set(QUEUE_FLAG_NONROT, q); q 690 drivers/ide/ide-disk.c blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); q 446 drivers/ide/ide-io.c struct request_queue *q = drive->queue; q 451 drivers/ide/ide-io.c blk_mq_delay_kick_requeue_list(q, 3); q 453 drivers/ide/ide-io.c blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3); q 13 drivers/ide/ide-park.c struct request_queue *q = drive->queue; q 30 drivers/ide/ide-park.c blk_mq_run_hw_queues(q, true); q 35 drivers/ide/ide-park.c rq = blk_get_request(q, REQ_OP_DRV_IN, 0); q 40 drivers/ide/ide-park.c blk_execute_rq(q, NULL, rq, 1); q 50 drivers/ide/ide-park.c rq = blk_get_request(q, REQ_OP_DRV_IN, BLK_MQ_REQ_NOWAIT); q 45 drivers/ide/ide-pm.c struct request_queue *q = rq->q; q 47 drivers/ide/ide-pm.c if (unlikely(blk_queue_dying(q))) { q 53 drivers/ide/ide-pm.c blk_execute_rq(q, NULL, rq, true); q 202 drivers/ide/ide-pm.c struct request_queue *q = drive->queue; q 214 drivers/ide/ide-pm.c blk_mq_stop_hw_queues(q); q 245 drivers/ide/ide-pm.c struct request_queue *q = drive->queue; q 259 drivers/ide/ide-pm.c blk_mq_start_hw_queues(q); q 765 drivers/ide/ide-probe.c struct request_queue *q; q 790 drivers/ide/ide-probe.c q = blk_mq_init_queue(set); q 791 drivers/ide/ide-probe.c if (IS_ERR(q)) { q 796 drivers/ide/ide-probe.c blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q); q 798 drivers/ide/ide-probe.c q->queuedata = drive; q 799 drivers/ide/ide-probe.c blk_queue_segment_boundary(q, 0xffff); q 803 drivers/ide/ide-probe.c blk_queue_max_hw_sectors(q, max_sectors); q 818 drivers/ide/ide-probe.c blk_queue_max_segments(q, max_sg_entries); q 821 drivers/ide/ide-probe.c drive->queue = q; q 328 drivers/ide/ide-proc.c char *q = p; q 336 drivers/ide/ide-proc.c if (p - q > MAX_LEN) q 338 drivers/ide/ide-proc.c memcpy(name, q, p - q); q 339 drivers/ide/ide-proc.c name[p - q] = 0; q 347 drivers/ide/ide-proc.c val = simple_strtoul(p, &q, 10); q 348 drivers/ide/ide-proc.c n -= q - p; q 349 drivers/ide/ide-proc.c p = q; q 96 drivers/ide/ide-timings.c static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q, q 99 drivers/ide/ide-timings.c q->setup = EZ(t->setup, T); q 100 drivers/ide/ide-timings.c q->act8b = EZ(t->act8b, T); q 101 drivers/ide/ide-timings.c q->rec8b = EZ(t->rec8b, T); q 102 drivers/ide/ide-timings.c q->cyc8b = EZ(t->cyc8b, T); q 103 drivers/ide/ide-timings.c q->active = EZ(t->active, T); q 104 drivers/ide/ide-timings.c q->recover = EZ(t->recover, T); q 105 drivers/ide/ide-timings.c q->cycle = EZ(t->cycle, T); q 106 drivers/ide/ide-timings.c q->udma = EZ(t->udma, UT); q 683 drivers/iio/common/st_sensors/st_sensors_core.c int i, len = 0, q, r; q 692 drivers/iio/common/st_sensors/st_sensors_core.c q = sdata->sensor_settings->fs.fs_avl[i].gain / 1000000; q 695 drivers/iio/common/st_sensors/st_sensors_core.c len += scnprintf(buf + len, PAGE_SIZE - len, "%u.%06u ", q, r); q 794 drivers/iio/industrialio-buffer.c struct iio_demux_table *p, *q; q 795 drivers/iio/industrialio-buffer.c list_for_each_entry_safe(p, q, &buffer->demux_list, l) { q 232 drivers/infiniband/hw/hfi1/affinity.c struct list_head *pos, *q; q 236 drivers/infiniband/hw/hfi1/affinity.c list_for_each_safe(pos, q, &node_affinity.list) { q 127 drivers/infiniband/hw/hfi1/mad.c struct trap_node *node, *q; q 142 drivers/infiniband/hw/hfi1/mad.c list_for_each_entry_safe(node, q, &trap_list, list) { q 1029 drivers/infiniband/hw/hfi1/mad.c u16 *q; q 1062 drivers/infiniband/hw/hfi1/mad.c q = (u16 *)data; q 1065 drivers/infiniband/hw/hfi1/mad.c get_pkeys(dd, port, q); q 1067 drivers/infiniband/hw/hfi1/mad.c p[i] = cpu_to_be16(q[i]); q 1728 drivers/infiniband/hw/hfi1/mad.c __be16 *q = (__be16 *)data; q 1760 drivers/infiniband/hw/hfi1/mad.c p[i] = be16_to_cpu(q[i]); q 431 drivers/infiniband/hw/hfi1/mad.h #define COUNTER_MASK(q, n) (q << ((9 - n) * 3)) q 1663 drivers/infiniband/hw/hfi1/verbs.c char *names_out, *p, **q; q 1682 drivers/infiniband/hw/hfi1/verbs.c q = (char **)names_out; q 1684 drivers/infiniband/hw/hfi1/verbs.c q[i] = p; q 1034 drivers/infiniband/hw/mlx4/mad.c int p, q; q 1040 drivers/infiniband/hw/mlx4/mad.c for (q = 0; q <= 1; ++q) { q 1043 drivers/infiniband/hw/mlx4/mad.c q ? IB_QPT_GSI : IB_QPT_SMI, q 1050 drivers/infiniband/hw/mlx4/mad.c dev->send_agent[p][q] = agent; q 1052 drivers/infiniband/hw/mlx4/mad.c dev->send_agent[p][q] = NULL; q 1060 drivers/infiniband/hw/mlx4/mad.c for (q = 0; q <= 1; ++q) q 1061 drivers/infiniband/hw/mlx4/mad.c if (dev->send_agent[p][q]) q 1062 drivers/infiniband/hw/mlx4/mad.c ib_unregister_mad_agent(dev->send_agent[p][q]); q 1070 drivers/infiniband/hw/mlx4/mad.c int p, q; q 1073 drivers/infiniband/hw/mlx4/mad.c for (q = 0; q <= 1; ++q) { q 1074 drivers/infiniband/hw/mlx4/mad.c agent = dev->send_agent[p][q]; q 1076 drivers/infiniband/hw/mlx4/mad.c dev->send_agent[p][q] = NULL; q 298 drivers/infiniband/hw/mthca/mthca_mad.c int p, q; q 304 drivers/infiniband/hw/mthca/mthca_mad.c for (q = 0; q <= 1; ++q) { q 306 drivers/infiniband/hw/mthca/mthca_mad.c q ? IB_QPT_GSI : IB_QPT_SMI, q 313 drivers/infiniband/hw/mthca/mthca_mad.c dev->send_agent[p][q] = agent; q 330 drivers/infiniband/hw/mthca/mthca_mad.c for (q = 0; q <= 1; ++q) q 331 drivers/infiniband/hw/mthca/mthca_mad.c if (dev->send_agent[p][q]) q 332 drivers/infiniband/hw/mthca/mthca_mad.c ib_unregister_mad_agent(dev->send_agent[p][q]); q 340 drivers/infiniband/hw/mthca/mthca_mad.c int p, q; q 343 drivers/infiniband/hw/mthca/mthca_mad.c for (q = 0; q <= 1; ++q) { q 344 drivers/infiniband/hw/mthca/mthca_mad.c agent = dev->send_agent[p][q]; q 345 drivers/infiniband/hw/mthca/mthca_mad.c dev->send_agent[p][q] = NULL; q 149 drivers/infiniband/hw/ocrdma/ocrdma.h struct ocrdma_queue_info q; q 561 drivers/infiniband/hw/ocrdma/ocrdma.h if (dev->eq_tbl[indx].q.id == eqid) q 113 drivers/infiniband/hw/ocrdma/ocrdma_hw.c return eq->q.va + (eq->q.tail * sizeof(struct ocrdma_eqe)); q 118 drivers/infiniband/hw/ocrdma/ocrdma_hw.c eq->q.tail = (eq->q.tail + 1) & (OCRDMA_EQ_LEN - 1); q 371 drivers/infiniband/hw/ocrdma/ocrdma_hw.c static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q) q 373 drivers/infiniband/hw/ocrdma/ocrdma_hw.c dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma); q 377 drivers/infiniband/hw/ocrdma/ocrdma_hw.c struct ocrdma_queue_info *q, u16 len, u16 entry_size) q 379 drivers/infiniband/hw/ocrdma/ocrdma_hw.c memset(q, 0, sizeof(*q)); q 380 drivers/infiniband/hw/ocrdma/ocrdma_hw.c q->len = len; q 381 drivers/infiniband/hw/ocrdma/ocrdma_hw.c q->entry_size = entry_size; q 382 drivers/infiniband/hw/ocrdma/ocrdma_hw.c q->size = len * entry_size; q 383 drivers/infiniband/hw/ocrdma/ocrdma_hw.c q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size, &q->dma, q 385 drivers/infiniband/hw/ocrdma/ocrdma_hw.c if (!q->va) q 403 drivers/infiniband/hw/ocrdma/ocrdma_hw.c struct ocrdma_queue_info *q, int queue_type) q 424 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cmd->id = q->id; q 429 drivers/infiniband/hw/ocrdma/ocrdma_hw.c q->created = false; q 448 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ocrdma_build_q_pages(&cmd->pa[0], cmd->num_pages, eq->q.dma, q 453 drivers/infiniband/hw/ocrdma/ocrdma_hw.c eq->q.id = rsp->vector_eqid & 0xffff; q 455 drivers/infiniband/hw/ocrdma/ocrdma_hw.c eq->q.created = true; q 465 drivers/infiniband/hw/ocrdma/ocrdma_hw.c status = ocrdma_alloc_q(dev, &eq->q, OCRDMA_EQ_LEN, q 474 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0); q 478 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ocrdma_free_q(dev, &eq->q); q 495 drivers/infiniband/hw/ocrdma/ocrdma_hw.c if (eq->q.created) { q 496 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ocrdma_mbx_delete_q(dev, &eq->q, QTYPE_EQ); q 497 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ocrdma_free_q(dev, &eq->q); q 508 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ocrdma_ring_eq_db(dev, eq->q.id, false, false, 0); q 615 drivers/infiniband/hw/ocrdma/ocrdma_hw.c status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->eq_tbl[0].q); q 1012 drivers/infiniband/hw/ocrdma/ocrdma_hw.c eq->q.id, eqe.id_valid); q 1018 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ocrdma_ring_eq_db(dev, eq->q.id, false, true, 1); q 1035 drivers/infiniband/hw/ocrdma/ocrdma_hw.c ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0); q 1756 drivers/infiniband/hw/ocrdma/ocrdma_hw.c eq_id = dev->eq_tbl[0].q.id; q 1763 drivers/infiniband/hw/ocrdma/ocrdma_hw.c eq_id = dev->eq_tbl[i].q.id; q 3132 drivers/infiniband/hw/ocrdma/ocrdma_hw.c cmd->cmd.set_eqd[i].eq_id = eq[i].q.id; q 1564 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q) q 1566 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt; q 1579 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q) q 1581 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c return q->va + (q->head * q->entry_size); q 1584 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q, q 1587 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c return q->va + (idx * q->entry_size); q 1590 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q) q 1592 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c q->head = (q->head + 1) & q->max_wqe_idx; q 1595 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q) q 1597 drivers/infiniband/hw/ocrdma/ocrdma_verbs.c q->tail = (q->tail + 1) & q->max_wqe_idx; q 293 drivers/infiniband/hw/qedr/qedr.h struct qedr_userq q; q 700 drivers/infiniband/hw/qedr/verbs.c struct qedr_userq *q, u64 buf_addr, q 707 drivers/infiniband/hw/qedr/verbs.c q->buf_addr = buf_addr; q 708 drivers/infiniband/hw/qedr/verbs.c q->buf_len = buf_len; q 709 drivers/infiniband/hw/qedr/verbs.c q->umem = ib_umem_get(udata, q->buf_addr, q->buf_len, access, dmasync); q 710 drivers/infiniband/hw/qedr/verbs.c if (IS_ERR(q->umem)) { q 712 drivers/infiniband/hw/qedr/verbs.c PTR_ERR(q->umem)); q 713 drivers/infiniband/hw/qedr/verbs.c return PTR_ERR(q->umem); q 716 drivers/infiniband/hw/qedr/verbs.c fw_pages = ib_umem_page_count(q->umem) << q 719 drivers/infiniband/hw/qedr/verbs.c rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0); q 724 drivers/infiniband/hw/qedr/verbs.c q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL); q 725 drivers/infiniband/hw/qedr/verbs.c if (IS_ERR(q->pbl_tbl)) { q 726 drivers/infiniband/hw/qedr/verbs.c rc = PTR_ERR(q->pbl_tbl); q 729 drivers/infiniband/hw/qedr/verbs.c qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info, q 732 drivers/infiniband/hw/qedr/verbs.c q->pbl_tbl = kzalloc(sizeof(*q->pbl_tbl), GFP_KERNEL); q 733 drivers/infiniband/hw/qedr/verbs.c if (!q->pbl_tbl) { q 742 drivers/infiniband/hw/qedr/verbs.c ib_umem_release(q->umem); q 743 drivers/infiniband/hw/qedr/verbs.c q->umem = NULL; q 859 drivers/infiniband/hw/qedr/verbs.c rc = qedr_init_user_queue(udata, dev, &cq->q, ureq.addr, q 865 drivers/infiniband/hw/qedr/verbs.c pbl_ptr = cq->q.pbl_tbl->pa; q 866 drivers/infiniband/hw/qedr/verbs.c page_cnt = cq->q.pbl_info.num_pbes; q 930 drivers/infiniband/hw/qedr/verbs.c qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl); q 935 drivers/infiniband/hw/qedr/verbs.c ib_umem_release(cq->q.umem); q 974 drivers/infiniband/hw/qedr/verbs.c qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl); q 975 drivers/infiniband/hw/qedr/verbs.c ib_umem_release(cq->q.umem); q 601 drivers/infiniband/hw/qib/qib_mad.c __be16 *q = (__be16 *) smp->data; q 613 drivers/infiniband/hw/qib/qib_mad.c q[i] = cpu_to_be16(p[i]); q 1044 drivers/infiniband/hw/qib/qib_mad.c u16 *q = (u16 *) smp->data; q 1049 drivers/infiniband/hw/qib/qib_mad.c q[i] = be16_to_cpu(p[i]); q 1051 drivers/infiniband/hw/qib/qib_mad.c if (startpx != 0 || set_pkeys(dd, port, q) != 0) q 292 drivers/infiniband/hw/qib/qib_mad.h #define COUNTER_MASK(q, n) (q << ((9 - n) * 3)) q 784 drivers/infiniband/sw/rdmavt/qp.c struct rvt_qp *q; q 789 drivers/infiniband/sw/rdmavt/qp.c for (; (q = rcu_dereference_protected(*qpp, q 791 drivers/infiniband/sw/rdmavt/qp.c qpp = &q->next) { q 792 drivers/infiniband/sw/rdmavt/qp.c if (q == qp) { q 73 drivers/infiniband/sw/rxe/rxe_queue.c inline void rxe_queue_reset(struct rxe_queue *q) q 79 drivers/infiniband/sw/rxe/rxe_queue.c memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf)); q 86 drivers/infiniband/sw/rxe/rxe_queue.c struct rxe_queue *q; q 94 drivers/infiniband/sw/rxe/rxe_queue.c q = kmalloc(sizeof(*q), GFP_KERNEL); q 95 drivers/infiniband/sw/rxe/rxe_queue.c if (!q) q 98 drivers/infiniband/sw/rxe/rxe_queue.c q->rxe = rxe; q 101 drivers/infiniband/sw/rxe/rxe_queue.c q->elem_size = elem_size; q 108 drivers/infiniband/sw/rxe/rxe_queue.c q->log2_elem_size = order_base_2(elem_size); q 112 drivers/infiniband/sw/rxe/rxe_queue.c q->index_mask = num_slots - 1; q 116 drivers/infiniband/sw/rxe/rxe_queue.c q->buf = vmalloc_user(buf_size); q 117 drivers/infiniband/sw/rxe/rxe_queue.c if (!q->buf) q 120 drivers/infiniband/sw/rxe/rxe_queue.c q->buf->log2_elem_size = q->log2_elem_size; q 121 drivers/infiniband/sw/rxe/rxe_queue.c q->buf->index_mask = q->index_mask; q 123 drivers/infiniband/sw/rxe/rxe_queue.c q->buf_size = buf_size; q 126 drivers/infiniband/sw/rxe/rxe_queue.c return q; q 129 drivers/infiniband/sw/rxe/rxe_queue.c kfree(q); q 138 drivers/infiniband/sw/rxe/rxe_queue.c static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q, q 141 drivers/infiniband/sw/rxe/rxe_queue.c if (!queue_empty(q) && (num_elem < queue_count(q))) q 144 drivers/infiniband/sw/rxe/rxe_queue.c while (!queue_empty(q)) { q 145 drivers/infiniband/sw/rxe/rxe_queue.c memcpy(producer_addr(new_q), consumer_addr(q), q 148 drivers/infiniband/sw/rxe/rxe_queue.c advance_consumer(q); q 151 drivers/infiniband/sw/rxe/rxe_queue.c swap(*q, *new_q); q 156 drivers/infiniband/sw/rxe/rxe_queue.c int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p, q 166 drivers/infiniband/sw/rxe/rxe_queue.c new_q = rxe_queue_init(q->rxe, &num_elem, elem_size); q 182 drivers/infiniband/sw/rxe/rxe_queue.c err = resize_finish(q, new_q, num_elem); q 185 drivers/infiniband/sw/rxe/rxe_queue.c err = resize_finish(q, new_q, num_elem); q 201 drivers/infiniband/sw/rxe/rxe_queue.c void rxe_queue_cleanup(struct rxe_queue *q) q 203 drivers/infiniband/sw/rxe/rxe_queue.c if (q->ip) q 204 drivers/infiniband/sw/rxe/rxe_queue.c kref_put(&q->ip->ref, rxe_mmap_release); q 206 drivers/infiniband/sw/rxe/rxe_queue.c vfree(q->buf); q 208 drivers/infiniband/sw/rxe/rxe_queue.c kfree(q); q 83 drivers/infiniband/sw/rxe/rxe_queue.h void rxe_queue_reset(struct rxe_queue *q); q 89 drivers/infiniband/sw/rxe/rxe_queue.h int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p, q 99 drivers/infiniband/sw/rxe/rxe_queue.h static inline int next_index(struct rxe_queue *q, int index) q 101 drivers/infiniband/sw/rxe/rxe_queue.h return (index + 1) & q->buf->index_mask; q 104 drivers/infiniband/sw/rxe/rxe_queue.h static inline int queue_empty(struct rxe_queue *q) q 106 drivers/infiniband/sw/rxe/rxe_queue.h return ((q->buf->producer_index - q->buf->consumer_index) q 107 drivers/infiniband/sw/rxe/rxe_queue.h & q->index_mask) == 0; q 110 drivers/infiniband/sw/rxe/rxe_queue.h static inline int queue_full(struct rxe_queue *q) q 112 drivers/infiniband/sw/rxe/rxe_queue.h return ((q->buf->producer_index + 1 - q->buf->consumer_index) q 113 drivers/infiniband/sw/rxe/rxe_queue.h & q->index_mask) == 0; q 116 drivers/infiniband/sw/rxe/rxe_queue.h static inline void advance_producer(struct rxe_queue *q) q 118 drivers/infiniband/sw/rxe/rxe_queue.h q->buf->producer_index = (q->buf->producer_index + 1) q 119 drivers/infiniband/sw/rxe/rxe_queue.h & q->index_mask; q 122 drivers/infiniband/sw/rxe/rxe_queue.h static inline void advance_consumer(struct rxe_queue *q) q 124 drivers/infiniband/sw/rxe/rxe_queue.h q->buf->consumer_index = (q->buf->consumer_index + 1) q 125 drivers/infiniband/sw/rxe/rxe_queue.h & q->index_mask; q 128 drivers/infiniband/sw/rxe/rxe_queue.h static inline void *producer_addr(struct rxe_queue *q) q 130 drivers/infiniband/sw/rxe/rxe_queue.h return q->buf->data + ((q->buf->producer_index & q->index_mask) q 131 drivers/infiniband/sw/rxe/rxe_queue.h << q->log2_elem_size); q 134 drivers/infiniband/sw/rxe/rxe_queue.h static inline void *consumer_addr(struct rxe_queue *q) q 136 drivers/infiniband/sw/rxe/rxe_queue.h return q->buf->data + ((q->buf->consumer_index & q->index_mask) q 137 drivers/infiniband/sw/rxe/rxe_queue.h << q->log2_elem_size); q 140 drivers/infiniband/sw/rxe/rxe_queue.h static inline unsigned int producer_index(struct rxe_queue *q) q 142 drivers/infiniband/sw/rxe/rxe_queue.h return q->buf->producer_index; q 145 drivers/infiniband/sw/rxe/rxe_queue.h static inline unsigned int consumer_index(struct rxe_queue *q) q 147 drivers/infiniband/sw/rxe/rxe_queue.h return q->buf->consumer_index; q 150 drivers/infiniband/sw/rxe/rxe_queue.h static inline void *addr_from_index(struct rxe_queue *q, unsigned int index) q 152 drivers/infiniband/sw/rxe/rxe_queue.h return q->buf->data + ((index & q->index_mask) q 153 drivers/infiniband/sw/rxe/rxe_queue.h << q->buf->log2_elem_size); q 156 drivers/infiniband/sw/rxe/rxe_queue.h static inline unsigned int index_from_addr(const struct rxe_queue *q, q 159 drivers/infiniband/sw/rxe/rxe_queue.h return (((u8 *)addr - q->buf->data) >> q->log2_elem_size) q 160 drivers/infiniband/sw/rxe/rxe_queue.h & q->index_mask; q 163 drivers/infiniband/sw/rxe/rxe_queue.h static inline unsigned int queue_count(const struct rxe_queue *q) q 165 drivers/infiniband/sw/rxe/rxe_queue.h return (q->buf->producer_index - q->buf->consumer_index) q 166 drivers/infiniband/sw/rxe/rxe_queue.h & q->index_mask; q 169 drivers/infiniband/sw/rxe/rxe_queue.h static inline void *queue_head(struct rxe_queue *q) q 171 drivers/infiniband/sw/rxe/rxe_queue.h return queue_empty(q) ? NULL : consumer_addr(q); q 319 drivers/infiniband/sw/rxe/rxe_resp.c struct rxe_queue *q = srq->rq.queue; q 328 drivers/infiniband/sw/rxe/rxe_resp.c wqe = queue_head(q); q 338 drivers/infiniband/sw/rxe/rxe_resp.c advance_consumer(q); q 341 drivers/infiniband/sw/rxe/rxe_resp.c (queue_count(q) < srq->limit)) { q 107 drivers/infiniband/sw/rxe/rxe_srq.c struct rxe_queue *q; q 121 drivers/infiniband/sw/rxe/rxe_srq.c q = rxe_queue_init(rxe, &srq->rq.max_wr, q 123 drivers/infiniband/sw/rxe/rxe_srq.c if (!q) { q 128 drivers/infiniband/sw/rxe/rxe_srq.c srq->rq.queue = q; q 130 drivers/infiniband/sw/rxe/rxe_srq.c err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata, q->buf, q 131 drivers/infiniband/sw/rxe/rxe_srq.c q->buf_size, &q->ip); q 133 drivers/infiniband/sw/rxe/rxe_srq.c vfree(q->buf); q 134 drivers/infiniband/sw/rxe/rxe_srq.c kfree(q); q 141 drivers/infiniband/sw/rxe/rxe_srq.c rxe_queue_cleanup(q); q 154 drivers/infiniband/sw/rxe/rxe_srq.c struct rxe_queue *q = srq->rq.queue; q 164 drivers/infiniband/sw/rxe/rxe_srq.c err = rxe_queue_resize(q, &attr->max_wr, q 178 drivers/infiniband/sw/rxe/rxe_srq.c rxe_queue_cleanup(q); q 3054 drivers/infiniband/ulp/srp/ib_srp.c struct request_queue *q = sdev->request_queue; q 3059 drivers/infiniband/ulp/srp/ib_srp.c blk_queue_rq_timeout(q, timeout); q 29 drivers/input/misc/hisi_powerkey.c static irqreturn_t hi65xx_power_press_isr(int irq, void *q) q 31 drivers/input/misc/hisi_powerkey.c struct input_dev *input = q; q 40 drivers/input/misc/hisi_powerkey.c static irqreturn_t hi65xx_power_release_isr(int irq, void *q) q 42 drivers/input/misc/hisi_powerkey.c struct input_dev *input = q; q 51 drivers/input/misc/hisi_powerkey.c static irqreturn_t hi65xx_restart_toggle_isr(int irq, void *q) q 53 drivers/input/misc/hisi_powerkey.c struct input_dev *input = q; q 65 drivers/input/misc/hisi_powerkey.c irqreturn_t (*handler)(int irq, void *q); q 276 drivers/input/rmi4/rmi_f54.c static int rmi_f54_queue_setup(struct vb2_queue *q, unsigned int *nbuffers, q 280 drivers/input/rmi4/rmi_f54.c struct f54_data *f54 = q->drv_priv; q 2333 drivers/input/touchscreen/atmel_mxt_ts.c static int mxt_queue_setup(struct vb2_queue *q, q 2337 drivers/input/touchscreen/atmel_mxt_ts.c struct mxt_data *data = q->drv_priv; q 832 drivers/input/touchscreen/sur40.c static int sur40_queue_setup(struct vb2_queue *q, q 836 drivers/input/touchscreen/sur40.c struct sur40_state *sur40 = vb2_get_drv_priv(q); q 838 drivers/input/touchscreen/sur40.c if (q->num_buffers + *nbuffers < 3) q 839 drivers/input/touchscreen/sur40.c *nbuffers = 3 - q->num_buffers; q 188 drivers/iommu/arm-smmu-v3.c #define Q_ENT(q, p) ((q)->base + \ q 189 drivers/iommu/arm-smmu-v3.c Q_IDX(&((q)->llq), p) * \ q 190 drivers/iommu/arm-smmu-v3.c (q)->ent_dwords) q 527 drivers/iommu/arm-smmu-v3.c struct arm_smmu_queue q; q 534 drivers/iommu/arm-smmu-v3.c struct arm_smmu_queue q; q 539 drivers/iommu/arm-smmu-v3.c struct arm_smmu_queue q; q 712 drivers/iommu/arm-smmu-v3.c static bool queue_has_space(struct arm_smmu_ll_queue *q, u32 n) q 716 drivers/iommu/arm-smmu-v3.c prod = Q_IDX(q, q->prod); q 717 drivers/iommu/arm-smmu-v3.c cons = Q_IDX(q, q->cons); q 719 drivers/iommu/arm-smmu-v3.c if (Q_WRP(q, q->prod) == Q_WRP(q, q->cons)) q 720 drivers/iommu/arm-smmu-v3.c space = (1 << q->max_n_shift) - (prod - cons); q 727 drivers/iommu/arm-smmu-v3.c static bool queue_full(struct arm_smmu_ll_queue *q) q 729 drivers/iommu/arm-smmu-v3.c return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) && q 730 drivers/iommu/arm-smmu-v3.c Q_WRP(q, q->prod) != Q_WRP(q, q->cons); q 733 drivers/iommu/arm-smmu-v3.c static bool queue_empty(struct arm_smmu_ll_queue *q) q 735 drivers/iommu/arm-smmu-v3.c return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) && q 736 drivers/iommu/arm-smmu-v3.c Q_WRP(q, q->prod) == Q_WRP(q, q->cons); q 739 drivers/iommu/arm-smmu-v3.c static bool queue_consumed(struct arm_smmu_ll_queue *q, u32 prod) q 741 drivers/iommu/arm-smmu-v3.c return ((Q_WRP(q, q->cons) == Q_WRP(q, prod)) && q 742 drivers/iommu/arm-smmu-v3.c (Q_IDX(q, q->cons) > Q_IDX(q, prod))) || q 743 drivers/iommu/arm-smmu-v3.c ((Q_WRP(q, q->cons) != Q_WRP(q, prod)) && q 744 drivers/iommu/arm-smmu-v3.c (Q_IDX(q, q->cons) <= Q_IDX(q, prod))); q 747 drivers/iommu/arm-smmu-v3.c static void queue_sync_cons_out(struct arm_smmu_queue *q) q 754 drivers/iommu/arm-smmu-v3.c writel_relaxed(q->llq.cons, q->cons_reg); q 757 drivers/iommu/arm-smmu-v3.c static void queue_inc_cons(struct arm_smmu_ll_queue *q) q 759 drivers/iommu/arm-smmu-v3.c u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1; q 760 drivers/iommu/arm-smmu-v3.c q->cons = Q_OVF(q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons); q 763 drivers/iommu/arm-smmu-v3.c static int queue_sync_prod_in(struct arm_smmu_queue *q) q 766 drivers/iommu/arm-smmu-v3.c u32 prod = readl_relaxed(q->prod_reg); q 768 drivers/iommu/arm-smmu-v3.c if (Q_OVF(prod) != Q_OVF(q->llq.prod)) q 771 drivers/iommu/arm-smmu-v3.c q->llq.prod = prod; q 775 drivers/iommu/arm-smmu-v3.c static u32 queue_inc_prod_n(struct arm_smmu_ll_queue *q, int n) q 777 drivers/iommu/arm-smmu-v3.c u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + n; q 778 drivers/iommu/arm-smmu-v3.c return Q_OVF(q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod); q 824 drivers/iommu/arm-smmu-v3.c static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent) q 826 drivers/iommu/arm-smmu-v3.c if (queue_empty(&q->llq)) q 829 drivers/iommu/arm-smmu-v3.c queue_read(ent, Q_ENT(q, q->llq.cons), q->ent_dwords); q 830 drivers/iommu/arm-smmu-v3.c queue_inc_cons(&q->llq); q 831 drivers/iommu/arm-smmu-v3.c queue_sync_cons_out(q); q 918 drivers/iommu/arm-smmu-v3.c struct arm_smmu_queue *q = &smmu->cmdq.q; q 929 drivers/iommu/arm-smmu-v3.c ent.sync.msiaddr = q->base_dma + Q_IDX(&q->llq, prod) * q 930 drivers/iommu/arm-smmu-v3.c q->ent_dwords * 8; q 947 drivers/iommu/arm-smmu-v3.c struct arm_smmu_queue *q = &smmu->cmdq.q; q 948 drivers/iommu/arm-smmu-v3.c u32 cons = readl_relaxed(q->cons_reg); q 980 drivers/iommu/arm-smmu-v3.c queue_read(cmd, Q_ENT(q, cons), q->ent_dwords); q 991 drivers/iommu/arm-smmu-v3.c queue_write(Q_ENT(q, cons), cmd, q->ent_dwords); q 1099 drivers/iommu/arm-smmu-v3.c .max_n_shift = cmdq->q.llq.max_n_shift, q 1168 drivers/iommu/arm-smmu-v3.c WRITE_ONCE(cmdq->q.llq.cons, readl_relaxed(cmdq->q.cons_reg)); q 1170 drivers/iommu/arm-smmu-v3.c llq->val = READ_ONCE(cmdq->q.llq.val); q 1176 drivers/iommu/arm-smmu-v3.c llq->val = READ_ONCE(smmu->cmdq.q.llq.val); q 1196 drivers/iommu/arm-smmu-v3.c u32 *cmd = (u32 *)(Q_ENT(&cmdq->q, llq->prod)); q 1223 drivers/iommu/arm-smmu-v3.c llq->val = READ_ONCE(smmu->cmdq.q.llq.val); q 1258 drivers/iommu/arm-smmu-v3.c llq->cons = readl(cmdq->q.cons_reg); q 1279 drivers/iommu/arm-smmu-v3.c .max_n_shift = cmdq->q.llq.max_n_shift, q 1287 drivers/iommu/arm-smmu-v3.c queue_write(Q_ENT(&cmdq->q, prod), cmd, CMDQ_ENT_DWORDS); q 1316 drivers/iommu/arm-smmu-v3.c .max_n_shift = cmdq->q.llq.max_n_shift, q 1322 drivers/iommu/arm-smmu-v3.c llq.val = READ_ONCE(cmdq->q.llq.val); q 1337 drivers/iommu/arm-smmu-v3.c old = cmpxchg_relaxed(&cmdq->q.llq.val, llq.val, head.val); q 1355 drivers/iommu/arm-smmu-v3.c queue_write(Q_ENT(&cmdq->q, prod), cmd_sync, CMDQ_ENT_DWORDS); q 1377 drivers/iommu/arm-smmu-v3.c &cmdq->q.llq.atomic.prod); q 1391 drivers/iommu/arm-smmu-v3.c writel_relaxed(prod, cmdq->q.prod_reg); q 1409 drivers/iommu/arm-smmu-v3.c readl_relaxed(cmdq->q.prod_reg), q 1410 drivers/iommu/arm-smmu-v3.c readl_relaxed(cmdq->q.cons_reg)); q 1418 drivers/iommu/arm-smmu-v3.c WRITE_ONCE(cmdq->q.llq.cons, llq.cons); q 1698 drivers/iommu/arm-smmu-v3.c struct arm_smmu_queue *q = &smmu->evtq.q; q 1699 drivers/iommu/arm-smmu-v3.c struct arm_smmu_ll_queue *llq = &q->llq; q 1703 drivers/iommu/arm-smmu-v3.c while (!queue_remove_raw(q, evt)) { q 1717 drivers/iommu/arm-smmu-v3.c if (queue_sync_prod_in(q) == -EOVERFLOW) q 1768 drivers/iommu/arm-smmu-v3.c struct arm_smmu_queue *q = &smmu->priq.q; q 1769 drivers/iommu/arm-smmu-v3.c struct arm_smmu_ll_queue *llq = &q->llq; q 1773 drivers/iommu/arm-smmu-v3.c while (!queue_remove_raw(q, evt)) q 1776 drivers/iommu/arm-smmu-v3.c if (queue_sync_prod_in(q) == -EOVERFLOW) q 1783 drivers/iommu/arm-smmu-v3.c queue_sync_cons_out(q); q 2747 drivers/iommu/arm-smmu-v3.c struct arm_smmu_queue *q, q 2755 drivers/iommu/arm-smmu-v3.c qsz = ((1 << q->llq.max_n_shift) * dwords) << 3; q 2756 drivers/iommu/arm-smmu-v3.c q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, q 2758 drivers/iommu/arm-smmu-v3.c if (q->base || qsz < PAGE_SIZE) q 2761 drivers/iommu/arm-smmu-v3.c q->llq.max_n_shift--; q 2764 drivers/iommu/arm-smmu-v3.c if (!q->base) { q 2771 drivers/iommu/arm-smmu-v3.c if (!WARN_ON(q->base_dma & (qsz - 1))) { q 2773 drivers/iommu/arm-smmu-v3.c 1 << q->llq.max_n_shift, name); q 2776 drivers/iommu/arm-smmu-v3.c q->prod_reg = arm_smmu_page1_fixup(prod_off, smmu); q 2777 drivers/iommu/arm-smmu-v3.c q->cons_reg = arm_smmu_page1_fixup(cons_off, smmu); q 2778 drivers/iommu/arm-smmu-v3.c q->ent_dwords = dwords; q 2780 drivers/iommu/arm-smmu-v3.c q->q_base = Q_BASE_RWA; q 2781 drivers/iommu/arm-smmu-v3.c q->q_base |= q->base_dma & Q_BASE_ADDR_MASK; q 2782 drivers/iommu/arm-smmu-v3.c q->q_base |= FIELD_PREP(Q_BASE_LOG2SIZE, q->llq.max_n_shift); q 2784 drivers/iommu/arm-smmu-v3.c q->llq.prod = q->llq.cons = 0; q 2798 drivers/iommu/arm-smmu-v3.c unsigned int nents = 1 << cmdq->q.llq.max_n_shift; q 2821 drivers/iommu/arm-smmu-v3.c ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD, q 2832 drivers/iommu/arm-smmu-v3.c ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD, q 2842 drivers/iommu/arm-smmu-v3.c return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD, q 3054 drivers/iommu/arm-smmu-v3.c smmu->evtq.q.irq = desc->irq; q 3060 drivers/iommu/arm-smmu-v3.c smmu->priq.q.irq = desc->irq; q 3078 drivers/iommu/arm-smmu-v3.c irq = smmu->evtq.q.irq; q 3101 drivers/iommu/arm-smmu-v3.c irq = smmu->priq.q.irq; q 3207 drivers/iommu/arm-smmu-v3.c writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE); q 3208 drivers/iommu/arm-smmu-v3.c writel_relaxed(smmu->cmdq.q.llq.prod, smmu->base + ARM_SMMU_CMDQ_PROD); q 3209 drivers/iommu/arm-smmu-v3.c writel_relaxed(smmu->cmdq.q.llq.cons, smmu->base + ARM_SMMU_CMDQ_CONS); q 3235 drivers/iommu/arm-smmu-v3.c writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE); q 3236 drivers/iommu/arm-smmu-v3.c writel_relaxed(smmu->evtq.q.llq.prod, q 3238 drivers/iommu/arm-smmu-v3.c writel_relaxed(smmu->evtq.q.llq.cons, q 3251 drivers/iommu/arm-smmu-v3.c writeq_relaxed(smmu->priq.q.q_base, q 3253 drivers/iommu/arm-smmu-v3.c writel_relaxed(smmu->priq.q.llq.prod, q 3255 drivers/iommu/arm-smmu-v3.c writel_relaxed(smmu->priq.q.llq.cons, q 3409 drivers/iommu/arm-smmu-v3.c smmu->cmdq.q.llq.max_n_shift = min_t(u32, CMDQ_MAX_SZ_SHIFT, q 3411 drivers/iommu/arm-smmu-v3.c if (smmu->cmdq.q.llq.max_n_shift <= ilog2(CMDQ_BATCH_ENTRIES)) { q 3423 drivers/iommu/arm-smmu-v3.c smmu->evtq.q.llq.max_n_shift = min_t(u32, EVTQ_MAX_SZ_SHIFT, q 3425 drivers/iommu/arm-smmu-v3.c smmu->priq.q.llq.max_n_shift = min_t(u32, PRIQ_MAX_SZ_SHIFT, q 3622 drivers/iommu/arm-smmu-v3.c smmu->evtq.q.irq = irq; q 3626 drivers/iommu/arm-smmu-v3.c smmu->priq.q.irq = irq; q 1314 drivers/isdn/mISDN/dsp_cmx.c u8 *d, *p, *q, *o_q; q 1369 drivers/isdn/mISDN/dsp_cmx.c q = dsp->rx_buff; /* received data */ q 1441 drivers/isdn/mISDN/dsp_cmx.c *d++ = dsp_audio_mix_law[(p[t] << 8) | q[r]]; q 1446 drivers/isdn/mISDN/dsp_cmx.c *d++ = q[r]; /* echo */ q 1493 drivers/isdn/mISDN/dsp_cmx.c dsp_audio_law_to_s32[q[r]] + q 1506 drivers/isdn/mISDN/dsp_cmx.c *d++ = dsp_audio_mix_law[(q[r] << 8) | o_q[o_r]]; q 1523 drivers/isdn/mISDN/dsp_cmx.c dsp_audio_law_to_s32[q[r]]; q 1534 drivers/isdn/mISDN/dsp_cmx.c sample = *c++ - dsp_audio_law_to_s32[q[r]]; q 1636 drivers/isdn/mISDN/dsp_cmx.c u8 *p, *q; q 1715 drivers/isdn/mISDN/dsp_cmx.c q = dsp->rx_buff; q 1720 drivers/isdn/mISDN/dsp_cmx.c *c++ += dsp_audio_law_to_s32[q[r]]; q 1739 drivers/isdn/mISDN/dsp_cmx.c q = dsp->tx_buff; q 1824 drivers/isdn/mISDN/dsp_cmx.c q[r] = dsp_silence; q 221 drivers/lightnvm/core.c tgt_dev->q = dev->q; q 383 drivers/lightnvm/core.c tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node); q 453 drivers/lightnvm/core.c struct request_queue *q = tdisk->queue; q 456 drivers/lightnvm/core.c blk_cleanup_queue(q); q 1183 drivers/lightnvm/core.c if (!dev->q || !dev->ops) { q 24 drivers/lightnvm/pblk-cache.c struct request_queue *q = pblk->dev->q; q 32 drivers/lightnvm/pblk-cache.c generic_start_io_acct(q, REQ_OP_WRITE, bio_sectors(bio), q 82 drivers/lightnvm/pblk-cache.c generic_end_io_acct(q, REQ_OP_WRITE, &pblk->disk->part0, start_time); q 342 drivers/lightnvm/pblk-core.c struct request_queue *q = pblk->dev->q; q 349 drivers/lightnvm/pblk-core.c ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0); q 50 drivers/lightnvm/pblk-init.c static blk_qc_t pblk_make_rq(struct request_queue *q, struct bio *bio) q 52 drivers/lightnvm/pblk-init.c struct pblk *pblk = q->queuedata; q 66 drivers/lightnvm/pblk-init.c blk_queue_split(q, &bio); q 74 drivers/lightnvm/pblk-init.c blk_queue_split(q, &bio); q 392 drivers/lightnvm/pblk-init.c queue_max_hw_sectors(dev->q) / (geo->csecs >> SECTOR_SHIFT)); q 1148 drivers/lightnvm/pblk-init.c struct request_queue *bqueue = dev->q; q 555 drivers/lightnvm/pblk-rb.c struct request_queue *q = pblk->dev->q; q 599 drivers/lightnvm/pblk-rb.c if (bio_add_pc_page(q, bio, page, rb->seg_size, 0) != q 195 drivers/lightnvm/pblk-read.c generic_end_io_acct(dev->q, REQ_OP_READ, &pblk->disk->part0, start_time); q 267 drivers/lightnvm/pblk-read.c struct request_queue *q = dev->q; q 275 drivers/lightnvm/pblk-read.c generic_start_io_acct(q, REQ_OP_READ, bio_sectors(bio), q 1258 drivers/lightnvm/pblk.h print_ppa(tgt_dev->q->queuedata, ppa, "boundary", i); q 351 drivers/mailbox/omap-mailbox.c static void mbox_queue_free(struct omap_mbox_queue *q) q 353 drivers/mailbox/omap-mailbox.c kfifo_free(&q->fifo); q 354 drivers/mailbox/omap-mailbox.c kfree(q); q 1176 drivers/md/bcache/request.c static blk_qc_t cached_dev_make_request(struct request_queue *q, q 1206 drivers/md/bcache/request.c generic_start_io_acct(q, q 1256 drivers/md/bcache/request.c struct request_queue *q = bdev_get_queue(dc->bdev); q 1259 drivers/md/bcache/request.c if (bdi_congested(q->backing_dev_info, bits)) q 1267 drivers/md/bcache/request.c q = bdev_get_queue(ca->bdev); q 1268 drivers/md/bcache/request.c ret |= bdi_congested(q->backing_dev_info, bits); q 1316 drivers/md/bcache/request.c static blk_qc_t flash_dev_make_request(struct request_queue *q, q 1329 drivers/md/bcache/request.c generic_start_io_acct(q, bio_op(bio), bio_sectors(bio), &d->disk->part0); q 1373 drivers/md/bcache/request.c struct request_queue *q; q 1379 drivers/md/bcache/request.c q = bdev_get_queue(ca->bdev); q 1380 drivers/md/bcache/request.c ret |= bdi_congested(q->backing_dev_info, bits); q 813 drivers/md/bcache/super.c struct request_queue *q; q 861 drivers/md/bcache/super.c q = blk_alloc_queue(GFP_KERNEL); q 862 drivers/md/bcache/super.c if (!q) q 865 drivers/md/bcache/super.c blk_queue_make_request(q, NULL); q 866 drivers/md/bcache/super.c d->disk->queue = q; q 867 drivers/md/bcache/super.c q->queuedata = d; q 868 drivers/md/bcache/super.c q->backing_dev_info->congested_data = d; q 869 drivers/md/bcache/super.c q->limits.max_hw_sectors = UINT_MAX; q 870 drivers/md/bcache/super.c q->limits.max_sectors = UINT_MAX; q 871 drivers/md/bcache/super.c q->limits.max_segment_size = UINT_MAX; q 872 drivers/md/bcache/super.c q->limits.max_segments = BIO_MAX_PAGES; q 873 drivers/md/bcache/super.c blk_queue_max_discard_sectors(q, UINT_MAX); q 874 drivers/md/bcache/super.c q->limits.discard_granularity = 512; q 875 drivers/md/bcache/super.c q->limits.io_min = block_size; q 876 drivers/md/bcache/super.c q->limits.logical_block_size = block_size; q 877 drivers/md/bcache/super.c q->limits.physical_block_size = block_size; q 882 drivers/md/bcache/super.c blk_queue_write_cache(q, true, true); q 909 drivers/md/bcache/super.c struct request_queue *q; q 917 drivers/md/bcache/super.c q = bdev_get_queue(dc->bdev); q 918 drivers/md/bcache/super.c if (blk_queue_dying(q)) q 1307 drivers/md/bcache/super.c struct request_queue *q = bdev_get_queue(dc->bdev); q 1327 drivers/md/bcache/super.c dc->disk.stripe_size = q->limits.io_opt >> 9; q 1331 drivers/md/bcache/super.c q->limits.raid_partial_stripes_expensive; q 1340 drivers/md/bcache/super.c q->backing_dev_info->ra_pages); q 1022 drivers/md/bcache/sysfs.c uint16_t q[31], *p, *cached; q 1062 drivers/md/bcache/sysfs.c for (i = 0; i < ARRAY_SIZE(q); i++) q 1063 drivers/md/bcache/sysfs.c q[i] = INITIAL_PRIO - cached[n * (i + 1) / q 1064 drivers/md/bcache/sysfs.c (ARRAY_SIZE(q) + 1)]; q 1080 drivers/md/bcache/sysfs.c n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1)); q 1082 drivers/md/bcache/sysfs.c for (i = 0; i < ARRAY_SIZE(q); i++) q 1084 drivers/md/bcache/sysfs.c "%u ", q[i]); q 97 drivers/md/bcache/util.c uint64_t q; q 100 drivers/md/bcache/util.c q = -v; q 102 drivers/md/bcache/util.c q = v; q 111 drivers/md/bcache/util.c t = q & ~(~0 << 10); q 112 drivers/md/bcache/util.c q >>= 10; q 113 drivers/md/bcache/util.c } while (q >= 1000); q 119 drivers/md/bcache/util.c return sprintf(buf, "-%llu.%i%c", q, t * 10 / 1024, units[u]); q 121 drivers/md/bcache/util.c return sprintf(buf, "%llu.%i%c", q, t * 10 / 1024, units[u]); q 269 drivers/md/dm-cache-policy-smq.c static void q_init(struct queue *q, struct entry_space *es, unsigned nr_levels) q 273 drivers/md/dm-cache-policy-smq.c q->es = es; q 274 drivers/md/dm-cache-policy-smq.c q->nr_elts = 0; q 275 drivers/md/dm-cache-policy-smq.c q->nr_levels = nr_levels; q 277 drivers/md/dm-cache-policy-smq.c for (i = 0; i < q->nr_levels; i++) { q 278 drivers/md/dm-cache-policy-smq.c l_init(q->qs + i); q 279 drivers/md/dm-cache-policy-smq.c q->target_count[i] = 0u; q 282 drivers/md/dm-cache-policy-smq.c q->last_target_nr_elts = 0u; q 283 drivers/md/dm-cache-policy-smq.c q->nr_top_levels = 0u; q 284 drivers/md/dm-cache-policy-smq.c q->nr_in_top_levels = 0u; q 287 drivers/md/dm-cache-policy-smq.c static unsigned q_size(struct queue *q) q 289 drivers/md/dm-cache-policy-smq.c return q->nr_elts; q 295 drivers/md/dm-cache-policy-smq.c static void q_push(struct queue *q, struct entry *e) q 300 drivers/md/dm-cache-policy-smq.c q->nr_elts++; q 302 drivers/md/dm-cache-policy-smq.c l_add_tail(q->es, q->qs + e->level, e); q 305 drivers/md/dm-cache-policy-smq.c static void q_push_front(struct queue *q, struct entry *e) q 310 drivers/md/dm-cache-policy-smq.c q->nr_elts++; q 312 drivers/md/dm-cache-policy-smq.c l_add_head(q->es, q->qs + e->level, e); q 315 drivers/md/dm-cache-policy-smq.c static void q_push_before(struct queue *q, struct entry *old, struct entry *e) q 320 drivers/md/dm-cache-policy-smq.c q->nr_elts++; q 322 drivers/md/dm-cache-policy-smq.c l_add_before(q->es, q->qs + e->level, old, e); q 325 drivers/md/dm-cache-policy-smq.c static void q_del(struct queue *q, struct entry *e) q 327 drivers/md/dm-cache-policy-smq.c l_del(q->es, q->qs + e->level, e); q 329 drivers/md/dm-cache-policy-smq.c q->nr_elts--; q 335 drivers/md/dm-cache-policy-smq.c static struct entry *q_peek(struct queue *q, unsigned max_level, bool can_cross_sentinel) q 340 drivers/md/dm-cache-policy-smq.c max_level = min(max_level, q->nr_levels); q 343 drivers/md/dm-cache-policy-smq.c for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) { q 357 drivers/md/dm-cache-policy-smq.c static struct entry *q_pop(struct queue *q) q 359 drivers/md/dm-cache-policy-smq.c struct entry *e = q_peek(q, q->nr_levels, true); q 362 drivers/md/dm-cache-policy-smq.c q_del(q, e); q 372 drivers/md/dm-cache-policy-smq.c static struct entry *__redist_pop_from(struct queue *q, unsigned level) q 376 drivers/md/dm-cache-policy-smq.c for (; level < q->nr_levels; level++) q 377 drivers/md/dm-cache-policy-smq.c for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) q 379 drivers/md/dm-cache-policy-smq.c l_del(q->es, q->qs + e->level, e); q 386 drivers/md/dm-cache-policy-smq.c static void q_set_targets_subrange_(struct queue *q, unsigned nr_elts, unsigned lbegin, unsigned lend) q 391 drivers/md/dm-cache-policy-smq.c BUG_ON(lend > q->nr_levels); q 397 drivers/md/dm-cache-policy-smq.c q->target_count[level] = q 405 drivers/md/dm-cache-policy-smq.c static void q_set_targets(struct queue *q) q 407 drivers/md/dm-cache-policy-smq.c if (q->last_target_nr_elts == q->nr_elts) q 410 drivers/md/dm-cache-policy-smq.c q->last_target_nr_elts = q->nr_elts; q 412 drivers/md/dm-cache-policy-smq.c if (q->nr_top_levels > q->nr_levels) q 413 drivers/md/dm-cache-policy-smq.c q_set_targets_subrange_(q, q->nr_elts, 0, q->nr_levels); q 416 drivers/md/dm-cache-policy-smq.c q_set_targets_subrange_(q, q->nr_in_top_levels, q 417 drivers/md/dm-cache-policy-smq.c q->nr_levels - q->nr_top_levels, q->nr_levels); q 419 drivers/md/dm-cache-policy-smq.c if (q->nr_in_top_levels < q->nr_elts) q 420 drivers/md/dm-cache-policy-smq.c q_set_targets_subrange_(q, q->nr_elts - q->nr_in_top_levels, q 421 drivers/md/dm-cache-policy-smq.c 0, q->nr_levels - q->nr_top_levels); q 423 drivers/md/dm-cache-policy-smq.c q_set_targets_subrange_(q, 0, 0, q->nr_levels - q->nr_top_levels); q 427 drivers/md/dm-cache-policy-smq.c static void q_redistribute(struct queue *q) q 433 drivers/md/dm-cache-policy-smq.c q_set_targets(q); q 435 drivers/md/dm-cache-policy-smq.c for (level = 0u; level < q->nr_levels - 1u; level++) { q 436 drivers/md/dm-cache-policy-smq.c l = q->qs + level; q 437 drivers/md/dm-cache-policy-smq.c target = q->target_count[level]; q 443 drivers/md/dm-cache-policy-smq.c e = __redist_pop_from(q, level + 1u); q 450 drivers/md/dm-cache-policy-smq.c l_add_tail(q->es, l, e); q 456 drivers/md/dm-cache-policy-smq.c l_above = q->qs + level + 1u; q 458 drivers/md/dm-cache-policy-smq.c e = l_pop_tail(q->es, l); q 465 drivers/md/dm-cache-policy-smq.c l_add_tail(q->es, l_above, e); q 470 drivers/md/dm-cache-policy-smq.c static void q_requeue(struct queue *q, struct entry *e, unsigned extra_levels, q 475 drivers/md/dm-cache-policy-smq.c unsigned new_level = min(q->nr_levels - 1u, e->level + extra_levels); q 478 drivers/md/dm-cache-policy-smq.c if (extra_levels && (e->level < q->nr_levels - 1u)) { q 479 drivers/md/dm-cache-policy-smq.c for (de = l_head(q->es, q->qs + new_level); de && de->sentinel; de = l_next(q->es, de)) q 483 drivers/md/dm-cache-policy-smq.c q_del(q, de); q 488 drivers/md/dm-cache-policy-smq.c q_push_before(q, s1, de); q 492 drivers/md/dm-cache-policy-smq.c q_push_before(q, s2, de); q 496 drivers/md/dm-cache-policy-smq.c q_push(q, de); q 499 drivers/md/dm-cache-policy-smq.c q_push(q, de); q 503 drivers/md/dm-cache-policy-smq.c q_del(q, e); q 505 drivers/md/dm-cache-policy-smq.c q_push(q, e); q 880 drivers/md/dm-cache-policy-smq.c struct queue *q = &mq->dirty; q 883 drivers/md/dm-cache-policy-smq.c for (level = 0; level < q->nr_levels; level++) { q 885 drivers/md/dm-cache-policy-smq.c q_del(q, sentinel); q 886 drivers/md/dm-cache-policy-smq.c q_push(q, sentinel); q 893 drivers/md/dm-cache-policy-smq.c struct queue *q = &mq->clean; q 896 drivers/md/dm-cache-policy-smq.c for (level = 0; level < q->nr_levels; level++) { q 898 drivers/md/dm-cache-policy-smq.c q_del(q, sentinel); q 899 drivers/md/dm-cache-policy-smq.c q_push(q, sentinel); q 2449 drivers/md/dm-cache-target.c struct request_queue *q = bdev_get_queue(dev->bdev); q 2450 drivers/md/dm-cache-target.c return bdi_congested(q->backing_dev_info, bdi_bits); q 3437 drivers/md/dm-cache-target.c struct request_queue *q = bdev_get_queue(origin_bdev); q 3439 drivers/md/dm-cache-target.c return q && blk_queue_discard(q); q 2040 drivers/md/dm-clone-target.c struct request_queue *q = bdev_get_queue(bdev); q 2042 drivers/md/dm-clone-target.c return (q && blk_queue_discard(q)); q 1380 drivers/md/dm-era-target.c struct request_queue *q = bdev_get_queue(dev->bdev); q 1381 drivers/md/dm-era-target.c return bdi_congested(q->backing_dev_info, bdi_bits); q 306 drivers/md/dm-io.c struct request_queue *q = bdev_get_queue(where->bdev); q 307 drivers/md/dm-io.c unsigned short logical_block_size = queue_logical_block_size(q); q 315 drivers/md/dm-io.c special_cmd_max_sectors = q->limits.max_discard_sectors; q 317 drivers/md/dm-io.c special_cmd_max_sectors = q->limits.max_write_zeroes_sectors; q 319 drivers/md/dm-io.c special_cmd_max_sectors = q->limits.max_write_same_sectors; q 890 drivers/md/dm-log-writes.c struct request_queue *q = bdev_get_queue(lc->dev->bdev); q 892 drivers/md/dm-log-writes.c if (!q || !blk_queue_discard(q)) { q 492 drivers/md/dm-mpath.c struct request_queue *q; q 515 drivers/md/dm-mpath.c q = bdev_get_queue(bdev); q 516 drivers/md/dm-mpath.c clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, q 520 drivers/md/dm-mpath.c if (blk_queue_dying(q)) { q 783 drivers/md/dm-mpath.c struct request_queue *q = bdev_get_queue(bdev); q 811 drivers/md/dm-mpath.c r = scsi_dh_attach(q, m->hw_handler_name); q 825 drivers/md/dm-mpath.c r = scsi_dh_set_params(q, m->hw_handler_params); q 842 drivers/md/dm-mpath.c struct request_queue *q; q 862 drivers/md/dm-mpath.c q = bdev_get_queue(p->path.dev->bdev); q 863 drivers/md/dm-mpath.c attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL); q 1505 drivers/md/dm-mpath.c struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); q 1507 drivers/md/dm-mpath.c if (pgpath->is_active && !blk_queue_dying(q)) q 1508 drivers/md/dm-mpath.c scsi_dh_activate(q, pg_init_done, pgpath); q 1919 drivers/md/dm-mpath.c struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); q 1921 drivers/md/dm-mpath.c return blk_lld_busy(q); q 2971 drivers/md/dm-raid.c struct request_queue *q; q 2976 drivers/md/dm-raid.c q = bdev_get_queue(rs->dev[i].rdev.bdev); q 2977 drivers/md/dm-raid.c if (!q || !blk_queue_discard(q)) q 65 drivers/md/dm-rq.c void dm_start_queue(struct request_queue *q) q 67 drivers/md/dm-rq.c blk_mq_unquiesce_queue(q); q 68 drivers/md/dm-rq.c blk_mq_kick_requeue_list(q); q 71 drivers/md/dm-rq.c void dm_stop_queue(struct request_queue *q) q 73 drivers/md/dm-rq.c if (blk_mq_queue_stopped(q)) q 76 drivers/md/dm-rq.c blk_mq_quiesce_queue(q); q 178 drivers/md/dm-rq.c static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs) q 180 drivers/md/dm-rq.c blk_mq_delay_kick_requeue_list(q, msecs); q 192 drivers/md/dm-rq.c __dm_mq_kick_requeue_list(rq->q, msecs); q 226 drivers/md/dm-rq.c !clone->q->limits.max_discard_sectors) q 229 drivers/md/dm-rq.c !clone->q->limits.max_write_same_sectors) q 232 drivers/md/dm-rq.c !clone->q->limits.max_write_zeroes_sectors) q 317 drivers/md/dm-rq.c if (blk_queue_io_stat(clone->q)) q 321 drivers/md/dm-rq.c r = blk_insert_cloned_request(clone->q, clone); q 406 drivers/md/dm-rq.c trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)), q 539 drivers/md/dm-rq.c struct request_queue *q; q 566 drivers/md/dm-rq.c q = blk_mq_init_allocated_queue(md->tag_set, md->queue, true); q 567 drivers/md/dm-rq.c if (IS_ERR(q)) { q 568 drivers/md/dm-rq.c err = PTR_ERR(q); q 36 drivers/md/dm-rq.h void dm_start_queue(struct request_queue *q); q 37 drivers/md/dm-rq.h void dm_stop_queue(struct request_queue *q); q 909 drivers/md/dm-stats.c const char *q; q 914 drivers/md/dm-stats.c for (q = h; *q; q++) q 915 drivers/md/dm-stats.c if (*q == ',') q 282 drivers/md/dm-table.c struct request_queue *q; q 296 drivers/md/dm-table.c q = bdev_get_queue(bdev); q 297 drivers/md/dm-table.c if (!q || !q->make_request_fn) { q 472 drivers/md/dm-table.c struct request_queue *q = bdev_get_queue(bdev); q 475 drivers/md/dm-table.c if (unlikely(!q)) { q 486 drivers/md/dm-table.c q->limits.physical_block_size, q 487 drivers/md/dm-table.c q->limits.logical_block_size, q 488 drivers/md/dm-table.c q->limits.alignment_offset, q 491 drivers/md/dm-table.c limits->zoned = blk_queue_zoned_model(q); q 929 drivers/md/dm-table.c struct request_queue *q = bdev_get_queue(dev->bdev); q 932 drivers/md/dm-table.c if (queue_is_mq(q)) q 937 drivers/md/dm-table.c return queue_is_mq(q); q 1422 drivers/md/dm-table.c struct request_queue *q = bdev_get_queue(dev->bdev); q 1425 drivers/md/dm-table.c return q && blk_queue_zoned_model(q) == *zoned_model; q 1452 drivers/md/dm-table.c struct request_queue *q = bdev_get_queue(dev->bdev); q 1455 drivers/md/dm-table.c return q && blk_queue_zone_sectors(q) == *zone_sectors; q 1635 drivers/md/dm-table.c struct request_queue *q = bdev_get_queue(dev->bdev); q 1637 drivers/md/dm-table.c return q && (q->queue_flags & flush); q 1702 drivers/md/dm-table.c struct request_queue *q = bdev_get_queue(dev->bdev); q 1704 drivers/md/dm-table.c return q && blk_queue_nonrot(q); q 1710 drivers/md/dm-table.c struct request_queue *q = bdev_get_queue(dev->bdev); q 1712 drivers/md/dm-table.c return q && !blk_queue_add_random(q); q 1749 drivers/md/dm-table.c struct request_queue *q = bdev_get_queue(dev->bdev); q 1751 drivers/md/dm-table.c return q && !q->limits.max_write_same_sectors; q 1776 drivers/md/dm-table.c struct request_queue *q = bdev_get_queue(dev->bdev); q 1778 drivers/md/dm-table.c return q && !q->limits.max_write_zeroes_sectors; q 1803 drivers/md/dm-table.c struct request_queue *q = bdev_get_queue(dev->bdev); q 1805 drivers/md/dm-table.c return q && !blk_queue_discard(q); q 1837 drivers/md/dm-table.c struct request_queue *q = bdev_get_queue(dev->bdev); q 1839 drivers/md/dm-table.c return q && !blk_queue_secure_erase(q); q 1865 drivers/md/dm-table.c struct request_queue *q = bdev_get_queue(dev->bdev); q 1867 drivers/md/dm-table.c return q && bdi_cap_stable_pages_required(q->backing_dev_info); q 1891 drivers/md/dm-table.c void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, q 1900 drivers/md/dm-table.c q->limits = *limits; q 1903 drivers/md/dm-table.c blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); q 1905 drivers/md/dm-table.c q->limits.max_discard_sectors = 0; q 1906 drivers/md/dm-table.c q->limits.max_hw_discard_sectors = 0; q 1907 drivers/md/dm-table.c q->limits.discard_granularity = 0; q 1908 drivers/md/dm-table.c q->limits.discard_alignment = 0; q 1909 drivers/md/dm-table.c q->limits.discard_misaligned = 0; q 1911 drivers/md/dm-table.c blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); q 1914 drivers/md/dm-table.c blk_queue_flag_set(QUEUE_FLAG_SECERASE, q); q 1921 drivers/md/dm-table.c blk_queue_write_cache(q, wc, fua); q 1924 drivers/md/dm-table.c blk_queue_flag_set(QUEUE_FLAG_DAX, q); q 1929 drivers/md/dm-table.c blk_queue_flag_clear(QUEUE_FLAG_DAX, q); q 1936 drivers/md/dm-table.c blk_queue_flag_set(QUEUE_FLAG_NONROT, q); q 1938 drivers/md/dm-table.c blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); q 1941 drivers/md/dm-table.c q->limits.max_write_same_sectors = 0; q 1943 drivers/md/dm-table.c q->limits.max_write_zeroes_sectors = 0; q 1952 drivers/md/dm-table.c q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; q 1954 drivers/md/dm-table.c q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES; q 1962 drivers/md/dm-table.c if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) q 1963 drivers/md/dm-table.c blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); q 1972 drivers/md/dm-table.c if (blk_queue_is_zoned(q)) q 1976 drivers/md/dm-table.c q->backing_dev_info->io_pages = limits->max_sectors >> (PAGE_SHIFT - 9); q 2095 drivers/md/dm-table.c struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev); q 2098 drivers/md/dm-table.c if (likely(q)) q 2099 drivers/md/dm-table.c r |= bdi_congested(q->backing_dev_info, bdi_bits); q 2812 drivers/md/dm-thin.c struct request_queue *q; q 2817 drivers/md/dm-thin.c q = bdev_get_queue(pt->data_dev->bdev); q 2818 drivers/md/dm-thin.c return bdi_congested(q->backing_dev_info, bdi_bits); q 2841 drivers/md/dm-thin.c struct request_queue *q = bdev_get_queue(pt->data_dev->bdev); q 2843 drivers/md/dm-thin.c return q && blk_queue_discard(q); q 685 drivers/md/dm-zoned-target.c struct request_queue *q; q 713 drivers/md/dm-zoned-target.c q = bdev_get_queue(dev->bdev); q 716 drivers/md/dm-zoned-target.c ~((sector_t)blk_queue_zone_sectors(q) - 1); q 724 drivers/md/dm-zoned-target.c dev->zone_nr_sectors = blk_queue_zone_sectors(q); q 1775 drivers/md/dm.c static blk_qc_t dm_make_request(struct request_queue *q, struct bio *bio) q 1777 drivers/md/dm.c struct mapped_device *md = q->queuedata; q 2129 drivers/md/dm.c struct request_queue *q = md->queue; q 2156 drivers/md/dm.c dm_stop_queue(q); q 2178 drivers/md/dm.c dm_table_set_restrictions(t, q, limits); q 59 drivers/md/dm.h void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, q 64 drivers/md/md-linear.c struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); q 65 drivers/md/md-linear.c ret |= bdi_congested(q->backing_dev_info, bits); q 163 drivers/md/md-multipath.c struct request_queue *q = bdev_get_queue(rdev->bdev); q 165 drivers/md/md-multipath.c ret |= bdi_congested(q->backing_dev_info, bits); q 372 drivers/md/md.c static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio) q 376 drivers/md/md.c struct mddev *mddev = q->queuedata; q 384 drivers/md/md.c blk_queue_split(q, &bio); q 40 drivers/md/raid0.c struct request_queue *q = bdev_get_queue(devlist[i]->bdev); q 42 drivers/md/raid0.c ret |= bdi_congested(q->backing_dev_info, bits); q 785 drivers/md/raid1.c struct request_queue *q = bdev_get_queue(rdev->bdev); q 787 drivers/md/raid1.c BUG_ON(!q); q 793 drivers/md/raid1.c ret |= bdi_congested(q->backing_dev_info, bits); q 795 drivers/md/raid1.c ret &= bdi_congested(q->backing_dev_info, bits); q 867 drivers/md/raid10.c struct request_queue *q = bdev_get_queue(rdev->bdev); q 869 drivers/md/raid10.c ret |= bdi_congested(q->backing_dev_info, bits); q 3066 drivers/md/raid5-cache.c struct request_queue *q = bdev_get_queue(rdev->bdev); q 3097 drivers/md/raid5-cache.c log->need_cache_flush = test_bit(QUEUE_FLAG_WC, &q->queue_flags) != 0; q 1311 drivers/md/raid5-ppl.c struct request_queue *q; q 1326 drivers/md/raid5-ppl.c q = bdev_get_queue(rdev->bdev); q 1327 drivers/md/raid5-ppl.c if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) q 52 drivers/media/common/saa7146/saa7146_fops.c void saa7146_dma_free(struct saa7146_dev *dev,struct videobuf_queue *q, q 60 drivers/media/common/saa7146/saa7146_fops.c videobuf_waiton(q, &buf->vb, 0, 0); q 61 drivers/media/common/saa7146/saa7146_fops.c videobuf_dma_unmap(q->dev, dma); q 71 drivers/media/common/saa7146/saa7146_fops.c struct saa7146_dmaqueue *q, q 75 drivers/media/common/saa7146/saa7146_fops.c DEB_EE("dev:%p, dmaq:%p, buf:%p\n", dev, q, buf); q 77 drivers/media/common/saa7146/saa7146_fops.c BUG_ON(!q); q 79 drivers/media/common/saa7146/saa7146_fops.c if (NULL == q->curr) { q 80 drivers/media/common/saa7146/saa7146_fops.c q->curr = buf; q 84 drivers/media/common/saa7146/saa7146_fops.c list_add_tail(&buf->vb.queue,&q->queue); q 93 drivers/media/common/saa7146/saa7146_fops.c struct saa7146_dmaqueue *q, q 97 drivers/media/common/saa7146/saa7146_fops.c DEB_EE("dev:%p, dmaq:%p, state:%d\n", dev, q, state); q 98 drivers/media/common/saa7146/saa7146_fops.c DEB_EE("q->curr:%p\n", q->curr); q 100 drivers/media/common/saa7146/saa7146_fops.c BUG_ON(!q->curr); q 103 drivers/media/common/saa7146/saa7146_fops.c if (NULL == q->curr) { q 108 drivers/media/common/saa7146/saa7146_fops.c q->curr->vb.state = state; q 109 drivers/media/common/saa7146/saa7146_fops.c q->curr->vb.ts = ktime_get_ns(); q 110 drivers/media/common/saa7146/saa7146_fops.c wake_up(&q->curr->vb.done); q 112 drivers/media/common/saa7146/saa7146_fops.c q->curr = NULL; q 116 drivers/media/common/saa7146/saa7146_fops.c struct saa7146_dmaqueue *q, int vbi) q 120 drivers/media/common/saa7146/saa7146_fops.c BUG_ON(!q); q 122 drivers/media/common/saa7146/saa7146_fops.c DEB_INT("dev:%p, dmaq:%p, vbi:%d\n", dev, q, vbi); q 125 drivers/media/common/saa7146/saa7146_fops.c if (!list_empty(&q->queue)) { q 127 drivers/media/common/saa7146/saa7146_fops.c buf = list_entry(q->queue.next,struct saa7146_buf,vb.queue); q 129 drivers/media/common/saa7146/saa7146_fops.c if (!list_empty(&q->queue)) q 130 drivers/media/common/saa7146/saa7146_fops.c next = list_entry(q->queue.next,struct saa7146_buf, vb.queue); q 131 drivers/media/common/saa7146/saa7146_fops.c q->curr = buf; q 133 drivers/media/common/saa7146/saa7146_fops.c buf, q->queue.prev, q->queue.next); q 163 drivers/media/common/saa7146/saa7146_fops.c del_timer(&q->timeout); q 169 drivers/media/common/saa7146/saa7146_fops.c struct saa7146_dmaqueue *q = from_timer(q, t, timeout); q 170 drivers/media/common/saa7146/saa7146_fops.c struct saa7146_dev *dev = q->dev; q 173 drivers/media/common/saa7146/saa7146_fops.c DEB_EE("dev:%p, dmaq:%p\n", dev, q); q 176 drivers/media/common/saa7146/saa7146_fops.c if (q->curr) { q 177 drivers/media/common/saa7146/saa7146_fops.c DEB_D("timeout on %p\n", q->curr); q 178 drivers/media/common/saa7146/saa7146_fops.c saa7146_buffer_finish(dev,q,VIDEOBUF_ERROR); q 295 drivers/media/common/saa7146/saa7146_fops.c struct videobuf_queue *q; q 302 drivers/media/common/saa7146/saa7146_fops.c q = &fh->video_q; q 310 drivers/media/common/saa7146/saa7146_fops.c q = &fh->vbi_q; q 319 drivers/media/common/saa7146/saa7146_fops.c res = videobuf_mmap_mapper(q, vma); q 329 drivers/media/common/saa7146/saa7146_fops.c struct videobuf_queue *q; q 339 drivers/media/common/saa7146/saa7146_fops.c q = &fh->vbi_q; q 342 drivers/media/common/saa7146/saa7146_fops.c q = &fh->video_q; q 345 drivers/media/common/saa7146/saa7146_fops.c if (!list_empty(&q->stream)) q 346 drivers/media/common/saa7146/saa7146_fops.c buf = list_entry(q->stream.next, struct videobuf_buffer, stream); q 219 drivers/media/common/saa7146/saa7146_vbi.c static int buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,enum v4l2_field field) q 221 drivers/media/common/saa7146/saa7146_vbi.c struct file *file = q->priv_data; q 241 drivers/media/common/saa7146/saa7146_vbi.c saa7146_dma_free(dev,q,buf); q 254 drivers/media/common/saa7146/saa7146_vbi.c err = videobuf_iolock(q,&buf->vb, NULL); q 269 drivers/media/common/saa7146/saa7146_vbi.c saa7146_dma_free(dev,q,buf); q 274 drivers/media/common/saa7146/saa7146_vbi.c static int buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size) q 289 drivers/media/common/saa7146/saa7146_vbi.c static void buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb) q 291 drivers/media/common/saa7146/saa7146_vbi.c struct file *file = q->priv_data; q 301 drivers/media/common/saa7146/saa7146_vbi.c static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb) q 303 drivers/media/common/saa7146/saa7146_vbi.c struct file *file = q->priv_data; q 309 drivers/media/common/saa7146/saa7146_vbi.c saa7146_dma_free(dev,q,buf); q 382 drivers/media/common/saa7146/saa7146_video.c struct saa7146_dmaqueue *q = &vv->video_dmaq; q 421 drivers/media/common/saa7146/saa7146_video.c if (q->curr) q 422 drivers/media/common/saa7146/saa7146_video.c saa7146_buffer_finish(dev, q, VIDEOBUF_DONE); q 1038 drivers/media/common/saa7146/saa7146_video.c static int buffer_prepare(struct videobuf_queue *q, q 1041 drivers/media/common/saa7146/saa7146_video.c struct file *file = q->priv_data; q 1076 drivers/media/common/saa7146/saa7146_video.c saa7146_dma_free(dev,q,buf); q 1101 drivers/media/common/saa7146/saa7146_video.c err = videobuf_iolock(q,&buf->vb, &vv->ov_fb); q 1115 drivers/media/common/saa7146/saa7146_video.c saa7146_dma_free(dev,q,buf); q 1120 drivers/media/common/saa7146/saa7146_video.c static int buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size) q 1122 drivers/media/common/saa7146/saa7146_video.c struct file *file = q->priv_data; q 1141 drivers/media/common/saa7146/saa7146_video.c static void buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb) q 1143 drivers/media/common/saa7146/saa7146_video.c struct file *file = q->priv_data; q 1153 drivers/media/common/saa7146/saa7146_video.c static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb) q 1155 drivers/media/common/saa7146/saa7146_video.c struct file *file = q->priv_data; q 1162 drivers/media/common/saa7146/saa7146_video.c saa7146_dma_free(dev,q,buf); q 1212 drivers/media/common/saa7146/saa7146_video.c struct videobuf_queue *q = &fh->video_q; q 1219 drivers/media/common/saa7146/saa7146_video.c videobuf_stop(q); q 1227 drivers/media/common/saa7146/saa7146_video.c struct saa7146_dmaqueue *q = &vv->video_dmaq; q 1233 drivers/media/common/saa7146/saa7146_video.c if( NULL != q->curr ) { q 1234 drivers/media/common/saa7146/saa7146_video.c saa7146_buffer_finish(dev,q,VIDEOBUF_DONE); q 1236 drivers/media/common/saa7146/saa7146_video.c saa7146_buffer_next(dev,q,0); q 92 drivers/media/common/videobuf2/videobuf2-core.c #define log_qop(q, op) \ q 93 drivers/media/common/videobuf2/videobuf2-core.c dprintk(2, "call_qop(%p, %s)%s\n", q, #op, \ q 94 drivers/media/common/videobuf2/videobuf2-core.c (q)->ops->op ? "" : " (nop)") q 96 drivers/media/common/videobuf2/videobuf2-core.c #define call_qop(q, op, args...) \ q 100 drivers/media/common/videobuf2/videobuf2-core.c log_qop(q, op); \ q 101 drivers/media/common/videobuf2/videobuf2-core.c err = (q)->ops->op ? (q)->ops->op(args) : 0; \ q 103 drivers/media/common/videobuf2/videobuf2-core.c (q)->cnt_ ## op++; \ q 107 drivers/media/common/videobuf2/videobuf2-core.c #define call_void_qop(q, op, args...) \ q 109 drivers/media/common/videobuf2/videobuf2-core.c log_qop(q, op); \ q 110 drivers/media/common/videobuf2/videobuf2-core.c if ((q)->ops->op) \ q 111 drivers/media/common/videobuf2/videobuf2-core.c (q)->ops->op(args); \ q 112 drivers/media/common/videobuf2/videobuf2-core.c (q)->cnt_ ## op++; \ q 156 drivers/media/common/videobuf2/videobuf2-core.c #define call_qop(q, op, args...) \ q 157 drivers/media/common/videobuf2/videobuf2-core.c ((q)->ops->op ? (q)->ops->op(args) : 0) q 159 drivers/media/common/videobuf2/videobuf2-core.c #define call_void_qop(q, op, args...) \ q 161 drivers/media/common/videobuf2/videobuf2-core.c if ((q)->ops->op) \ q 162 drivers/media/common/videobuf2/videobuf2-core.c (q)->ops->op(args); \ q 176 drivers/media/common/videobuf2/videobuf2-core.c #define call_bufop(q, op, args...) \ q 179 drivers/media/common/videobuf2/videobuf2-core.c if (q && q->buf_ops && q->buf_ops->op) \ q 180 drivers/media/common/videobuf2/videobuf2-core.c ret = q->buf_ops->op(args); \ q 184 drivers/media/common/videobuf2/videobuf2-core.c #define call_void_bufop(q, op, args...) \ q 186 drivers/media/common/videobuf2/videobuf2-core.c if (q && q->buf_ops && q->buf_ops->op) \ q 187 drivers/media/common/videobuf2/videobuf2-core.c q->buf_ops->op(args); \ q 190 drivers/media/common/videobuf2/videobuf2-core.c static void __vb2_queue_cancel(struct vb2_queue *q); q 198 drivers/media/common/videobuf2/videobuf2-core.c struct vb2_queue *q = vb->vb2_queue; q 216 drivers/media/common/videobuf2/videobuf2-core.c q->alloc_devs[plane] ? : q->dev, q 217 drivers/media/common/videobuf2/videobuf2-core.c q->dma_attrs, size, q->dma_dir, q->gfp_flags); q 305 drivers/media/common/videobuf2/videobuf2-core.c struct vb2_queue *q = vb->vb2_queue; q 310 drivers/media/common/videobuf2/videobuf2-core.c struct vb2_buffer *prev = q->bufs[vb->index - 1]; q 334 drivers/media/common/videobuf2/videobuf2-core.c static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory, q 344 drivers/media/common/videobuf2/videobuf2-core.c VB2_MAX_FRAME - q->num_buffers); q 348 drivers/media/common/videobuf2/videobuf2-core.c vb = kzalloc(q->buf_struct_size, GFP_KERNEL); q 355 drivers/media/common/videobuf2/videobuf2-core.c vb->vb2_queue = q; q 357 drivers/media/common/videobuf2/videobuf2-core.c vb->index = q->num_buffers + buffer; q 358 drivers/media/common/videobuf2/videobuf2-core.c vb->type = q->type; q 364 drivers/media/common/videobuf2/videobuf2-core.c call_void_bufop(q, init_buffer, vb); q 366 drivers/media/common/videobuf2/videobuf2-core.c q->bufs[vb->index] = vb; q 374 drivers/media/common/videobuf2/videobuf2-core.c q->bufs[vb->index] = NULL; q 389 drivers/media/common/videobuf2/videobuf2-core.c q->bufs[vb->index] = NULL; q 405 drivers/media/common/videobuf2/videobuf2-core.c static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers) q 410 drivers/media/common/videobuf2/videobuf2-core.c for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; q 412 drivers/media/common/videobuf2/videobuf2-core.c vb = q->bufs[buffer]; q 417 drivers/media/common/videobuf2/videobuf2-core.c if (q->memory == VB2_MEMORY_MMAP) q 419 drivers/media/common/videobuf2/videobuf2-core.c else if (q->memory == VB2_MEMORY_DMABUF) q 431 drivers/media/common/videobuf2/videobuf2-core.c static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers) q 443 drivers/media/common/videobuf2/videobuf2-core.c for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; q 445 drivers/media/common/videobuf2/videobuf2-core.c if (q->bufs[buffer] == NULL) q 447 drivers/media/common/videobuf2/videobuf2-core.c if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) { q 454 drivers/media/common/videobuf2/videobuf2-core.c for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; q 456 drivers/media/common/videobuf2/videobuf2-core.c struct vb2_buffer *vb = q->bufs[buffer]; q 463 drivers/media/common/videobuf2/videobuf2-core.c __vb2_free_mem(q, buffers); q 471 drivers/media/common/videobuf2/videobuf2-core.c if (q->num_buffers) { q 472 drivers/media/common/videobuf2/videobuf2-core.c bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming || q 473 drivers/media/common/videobuf2/videobuf2-core.c q->cnt_wait_prepare != q->cnt_wait_finish; q 476 drivers/media/common/videobuf2/videobuf2-core.c pr_info("counters for queue %p:%s\n", q, q 479 drivers/media/common/videobuf2/videobuf2-core.c q->cnt_queue_setup, q->cnt_start_streaming, q 480 drivers/media/common/videobuf2/videobuf2-core.c q->cnt_stop_streaming); q 482 drivers/media/common/videobuf2/videobuf2-core.c q->cnt_wait_prepare, q->cnt_wait_finish); q 484 drivers/media/common/videobuf2/videobuf2-core.c q->cnt_queue_setup = 0; q 485 drivers/media/common/videobuf2/videobuf2-core.c q->cnt_wait_prepare = 0; q 486 drivers/media/common/videobuf2/videobuf2-core.c q->cnt_wait_finish = 0; q 487 drivers/media/common/videobuf2/videobuf2-core.c q->cnt_start_streaming = 0; q 488 drivers/media/common/videobuf2/videobuf2-core.c q->cnt_stop_streaming = 0; q 490 drivers/media/common/videobuf2/videobuf2-core.c for (buffer = 0; buffer < q->num_buffers; ++buffer) { q 491 drivers/media/common/videobuf2/videobuf2-core.c struct vb2_buffer *vb = q->bufs[buffer]; q 503 drivers/media/common/videobuf2/videobuf2-core.c q, buffer, unbalanced ? " UNBALANCED!" : ""); q 529 drivers/media/common/videobuf2/videobuf2-core.c for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; q 531 drivers/media/common/videobuf2/videobuf2-core.c kfree(q->bufs[buffer]); q 532 drivers/media/common/videobuf2/videobuf2-core.c q->bufs[buffer] = NULL; q 535 drivers/media/common/videobuf2/videobuf2-core.c q->num_buffers -= buffers; q 536 drivers/media/common/videobuf2/videobuf2-core.c if (!q->num_buffers) { q 537 drivers/media/common/videobuf2/videobuf2-core.c q->memory = VB2_MEMORY_UNKNOWN; q 538 drivers/media/common/videobuf2/videobuf2-core.c INIT_LIST_HEAD(&q->queued_list); q 543 drivers/media/common/videobuf2/videobuf2-core.c bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb) q 565 drivers/media/common/videobuf2/videobuf2-core.c static bool __buffers_in_use(struct vb2_queue *q) q 568 drivers/media/common/videobuf2/videobuf2-core.c for (buffer = 0; buffer < q->num_buffers; ++buffer) { q 569 drivers/media/common/videobuf2/videobuf2-core.c if (vb2_buffer_in_use(q, q->bufs[buffer])) q 575 drivers/media/common/videobuf2/videobuf2-core.c void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb) q 577 drivers/media/common/videobuf2/videobuf2-core.c call_void_bufop(q, fill_user_buffer, q->bufs[index], pb); q 585 drivers/media/common/videobuf2/videobuf2-core.c static int __verify_userptr_ops(struct vb2_queue *q) q 587 drivers/media/common/videobuf2/videobuf2-core.c if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr || q 588 drivers/media/common/videobuf2/videobuf2-core.c !q->mem_ops->put_userptr) q 598 drivers/media/common/videobuf2/videobuf2-core.c static int __verify_mmap_ops(struct vb2_queue *q) q 600 drivers/media/common/videobuf2/videobuf2-core.c if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc || q 601 drivers/media/common/videobuf2/videobuf2-core.c !q->mem_ops->put || !q->mem_ops->mmap) q 611 drivers/media/common/videobuf2/videobuf2-core.c static int __verify_dmabuf_ops(struct vb2_queue *q) q 613 drivers/media/common/videobuf2/videobuf2-core.c if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf || q 614 drivers/media/common/videobuf2/videobuf2-core.c !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf || q 615 drivers/media/common/videobuf2/videobuf2-core.c !q->mem_ops->unmap_dmabuf) q 621 drivers/media/common/videobuf2/videobuf2-core.c int vb2_verify_memory_type(struct vb2_queue *q, q 630 drivers/media/common/videobuf2/videobuf2-core.c if (type != q->type) { q 639 drivers/media/common/videobuf2/videobuf2-core.c if (memory == VB2_MEMORY_MMAP && __verify_mmap_ops(q)) { q 644 drivers/media/common/videobuf2/videobuf2-core.c if (memory == VB2_MEMORY_USERPTR && __verify_userptr_ops(q)) { q 649 drivers/media/common/videobuf2/videobuf2-core.c if (memory == VB2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) { q 659 drivers/media/common/videobuf2/videobuf2-core.c if (vb2_fileio_is_active(q)) { q 667 drivers/media/common/videobuf2/videobuf2-core.c int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, q 675 drivers/media/common/videobuf2/videobuf2-core.c if (q->streaming) { q 680 drivers/media/common/videobuf2/videobuf2-core.c if (q->waiting_in_dqbuf && *count) { q 685 drivers/media/common/videobuf2/videobuf2-core.c if (*count == 0 || q->num_buffers != 0 || q 686 drivers/media/common/videobuf2/videobuf2-core.c (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory)) { q 691 drivers/media/common/videobuf2/videobuf2-core.c mutex_lock(&q->mmap_lock); q 692 drivers/media/common/videobuf2/videobuf2-core.c if (debug && q->memory == VB2_MEMORY_MMAP && q 693 drivers/media/common/videobuf2/videobuf2-core.c __buffers_in_use(q)) q 701 drivers/media/common/videobuf2/videobuf2-core.c __vb2_queue_cancel(q); q 702 drivers/media/common/videobuf2/videobuf2-core.c ret = __vb2_queue_free(q, q->num_buffers); q 703 drivers/media/common/videobuf2/videobuf2-core.c mutex_unlock(&q->mmap_lock); q 718 drivers/media/common/videobuf2/videobuf2-core.c WARN_ON(q->min_buffers_needed > VB2_MAX_FRAME); q 719 drivers/media/common/videobuf2/videobuf2-core.c num_buffers = max_t(unsigned int, *count, q->min_buffers_needed); q 721 drivers/media/common/videobuf2/videobuf2-core.c memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); q 722 drivers/media/common/videobuf2/videobuf2-core.c q->memory = memory; q 728 drivers/media/common/videobuf2/videobuf2-core.c ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes, q 729 drivers/media/common/videobuf2/videobuf2-core.c plane_sizes, q->alloc_devs); q 743 drivers/media/common/videobuf2/videobuf2-core.c __vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes); q 753 drivers/media/common/videobuf2/videobuf2-core.c if (allocated_buffers < q->min_buffers_needed) q 769 drivers/media/common/videobuf2/videobuf2-core.c ret = call_qop(q, queue_setup, q, &num_buffers, q 770 drivers/media/common/videobuf2/videobuf2-core.c &num_planes, plane_sizes, q->alloc_devs); q 781 drivers/media/common/videobuf2/videobuf2-core.c mutex_lock(&q->mmap_lock); q 782 drivers/media/common/videobuf2/videobuf2-core.c q->num_buffers = allocated_buffers; q 789 drivers/media/common/videobuf2/videobuf2-core.c __vb2_queue_free(q, allocated_buffers); q 790 drivers/media/common/videobuf2/videobuf2-core.c mutex_unlock(&q->mmap_lock); q 793 drivers/media/common/videobuf2/videobuf2-core.c mutex_unlock(&q->mmap_lock); q 800 drivers/media/common/videobuf2/videobuf2-core.c q->waiting_for_buffers = !q->is_output; q 806 drivers/media/common/videobuf2/videobuf2-core.c int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, q 814 drivers/media/common/videobuf2/videobuf2-core.c if (q->num_buffers == VB2_MAX_FRAME) { q 819 drivers/media/common/videobuf2/videobuf2-core.c if (!q->num_buffers) { q 820 drivers/media/common/videobuf2/videobuf2-core.c if (q->waiting_in_dqbuf && *count) { q 824 drivers/media/common/videobuf2/videobuf2-core.c memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); q 825 drivers/media/common/videobuf2/videobuf2-core.c q->memory = memory; q 826 drivers/media/common/videobuf2/videobuf2-core.c q->waiting_for_buffers = !q->is_output; q 827 drivers/media/common/videobuf2/videobuf2-core.c } else if (q->memory != memory) { q 832 drivers/media/common/videobuf2/videobuf2-core.c num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers); q 843 drivers/media/common/videobuf2/videobuf2-core.c ret = call_qop(q, queue_setup, q, &num_buffers, q 844 drivers/media/common/videobuf2/videobuf2-core.c &num_planes, plane_sizes, q->alloc_devs); q 849 drivers/media/common/videobuf2/videobuf2-core.c allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers, q 866 drivers/media/common/videobuf2/videobuf2-core.c ret = call_qop(q, queue_setup, q, &num_buffers, q 867 drivers/media/common/videobuf2/videobuf2-core.c &num_planes, plane_sizes, q->alloc_devs); q 878 drivers/media/common/videobuf2/videobuf2-core.c mutex_lock(&q->mmap_lock); q 879 drivers/media/common/videobuf2/videobuf2-core.c q->num_buffers += allocated_buffers; q 886 drivers/media/common/videobuf2/videobuf2-core.c __vb2_queue_free(q, allocated_buffers); q 887 drivers/media/common/videobuf2/videobuf2-core.c mutex_unlock(&q->mmap_lock); q 890 drivers/media/common/videobuf2/videobuf2-core.c mutex_unlock(&q->mmap_lock); q 923 drivers/media/common/videobuf2/videobuf2-core.c struct vb2_queue *q = vb->vb2_queue; q 952 drivers/media/common/videobuf2/videobuf2-core.c spin_lock_irqsave(&q->done_lock, flags); q 957 drivers/media/common/videobuf2/videobuf2-core.c list_add_tail(&vb->done_entry, &q->done_list); q 960 drivers/media/common/videobuf2/videobuf2-core.c atomic_dec(&q->owned_by_drv_count); q 967 drivers/media/common/videobuf2/videobuf2-core.c spin_unlock_irqrestore(&q->done_lock, flags); q 969 drivers/media/common/videobuf2/videobuf2-core.c trace_vb2_buf_done(q, vb); q 976 drivers/media/common/videobuf2/videobuf2-core.c wake_up(&q->done_wq); q 982 drivers/media/common/videobuf2/videobuf2-core.c void vb2_discard_done(struct vb2_queue *q) q 987 drivers/media/common/videobuf2/videobuf2-core.c spin_lock_irqsave(&q->done_lock, flags); q 988 drivers/media/common/videobuf2/videobuf2-core.c list_for_each_entry(vb, &q->done_list, done_entry) q 990 drivers/media/common/videobuf2/videobuf2-core.c spin_unlock_irqrestore(&q->done_lock, flags); q 1012 drivers/media/common/videobuf2/videobuf2-core.c struct vb2_queue *q = vb->vb2_queue; q 1063 drivers/media/common/videobuf2/videobuf2-core.c q->alloc_devs[plane] ? : q->dev, q 1065 drivers/media/common/videobuf2/videobuf2-core.c planes[plane].length, q->dma_dir); q 1127 drivers/media/common/videobuf2/videobuf2-core.c struct vb2_queue *q = vb->vb2_queue; q 1187 drivers/media/common/videobuf2/videobuf2-core.c q->alloc_devs[plane] ? : q->dev, q 1188 drivers/media/common/videobuf2/videobuf2-core.c dbuf, planes[plane].length, q->dma_dir); q 1261 drivers/media/common/videobuf2/videobuf2-core.c struct vb2_queue *q = vb->vb2_queue; q 1264 drivers/media/common/videobuf2/videobuf2-core.c atomic_inc(&q->owned_by_drv_count); q 1266 drivers/media/common/videobuf2/videobuf2-core.c trace_vb2_buf_queue(q, vb); q 1273 drivers/media/common/videobuf2/videobuf2-core.c struct vb2_queue *q = vb->vb2_queue; q 1278 drivers/media/common/videobuf2/videobuf2-core.c if (q->error) { q 1287 drivers/media/common/videobuf2/videobuf2-core.c if (q->is_output) { q 1297 drivers/media/common/videobuf2/videobuf2-core.c switch (q->memory) { q 1357 drivers/media/common/videobuf2/videobuf2-core.c int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb, q 1419 drivers/media/common/videobuf2/videobuf2-core.c int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb) q 1424 drivers/media/common/videobuf2/videobuf2-core.c vb = q->bufs[index]; q 1440 drivers/media/common/videobuf2/videobuf2-core.c call_void_bufop(q, fill_user_buffer, vb, pb); q 1459 drivers/media/common/videobuf2/videobuf2-core.c static int vb2_start_streaming(struct vb2_queue *q) q 1468 drivers/media/common/videobuf2/videobuf2-core.c list_for_each_entry(vb, &q->queued_list, queued_entry) q 1472 drivers/media/common/videobuf2/videobuf2-core.c q->start_streaming_called = 1; q 1473 drivers/media/common/videobuf2/videobuf2-core.c ret = call_qop(q, start_streaming, q, q 1474 drivers/media/common/videobuf2/videobuf2-core.c atomic_read(&q->owned_by_drv_count)); q 1478 drivers/media/common/videobuf2/videobuf2-core.c q->start_streaming_called = 0; q 1487 drivers/media/common/videobuf2/videobuf2-core.c if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { q 1494 drivers/media/common/videobuf2/videobuf2-core.c for (i = 0; i < q->num_buffers; ++i) { q 1495 drivers/media/common/videobuf2/videobuf2-core.c vb = q->bufs[i]; q 1500 drivers/media/common/videobuf2/videobuf2-core.c WARN_ON(atomic_read(&q->owned_by_drv_count)); q 1507 drivers/media/common/videobuf2/videobuf2-core.c WARN_ON(!list_empty(&q->done_list)); q 1511 drivers/media/common/videobuf2/videobuf2-core.c int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb, q 1517 drivers/media/common/videobuf2/videobuf2-core.c if (q->error) { q 1522 drivers/media/common/videobuf2/videobuf2-core.c vb = q->bufs[index]; q 1525 drivers/media/common/videobuf2/videobuf2-core.c q->requires_requests) { q 1530 drivers/media/common/videobuf2/videobuf2-core.c if ((req && q->uses_qbuf) || q 1532 drivers/media/common/videobuf2/videobuf2-core.c q->uses_requests)) { q 1540 drivers/media/common/videobuf2/videobuf2-core.c q->uses_requests = 1; q 1547 drivers/media/common/videobuf2/videobuf2-core.c if (q->is_output && !vb->prepared) { q 1562 drivers/media/common/videobuf2/videobuf2-core.c q, true, &vb->req_obj); q 1582 drivers/media/common/videobuf2/videobuf2-core.c call_void_bufop(q, copy_timestamp, vb, pb); q 1583 drivers/media/common/videobuf2/videobuf2-core.c call_void_bufop(q, fill_user_buffer, vb, pb); q 1591 drivers/media/common/videobuf2/videobuf2-core.c q->uses_qbuf = 1; q 1614 drivers/media/common/videobuf2/videobuf2-core.c list_add_tail(&vb->queued_entry, &q->queued_list); q 1615 drivers/media/common/videobuf2/videobuf2-core.c q->queued_count++; q 1616 drivers/media/common/videobuf2/videobuf2-core.c q->waiting_for_buffers = false; q 1620 drivers/media/common/videobuf2/videobuf2-core.c call_void_bufop(q, copy_timestamp, vb, pb); q 1622 drivers/media/common/videobuf2/videobuf2-core.c trace_vb2_qbuf(q, vb); q 1628 drivers/media/common/videobuf2/videobuf2-core.c if (q->start_streaming_called) q 1633 drivers/media/common/videobuf2/videobuf2-core.c call_void_bufop(q, fill_user_buffer, vb, pb); q 1641 drivers/media/common/videobuf2/videobuf2-core.c if (q->streaming && !q->start_streaming_called && q 1642 drivers/media/common/videobuf2/videobuf2-core.c q->queued_count >= q->min_buffers_needed) { q 1643 drivers/media/common/videobuf2/videobuf2-core.c ret = vb2_start_streaming(q); q 1659 drivers/media/common/videobuf2/videobuf2-core.c static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking) q 1673 drivers/media/common/videobuf2/videobuf2-core.c if (q->waiting_in_dqbuf) { q 1678 drivers/media/common/videobuf2/videobuf2-core.c if (!q->streaming) { q 1683 drivers/media/common/videobuf2/videobuf2-core.c if (q->error) { q 1688 drivers/media/common/videobuf2/videobuf2-core.c if (q->last_buffer_dequeued) { q 1693 drivers/media/common/videobuf2/videobuf2-core.c if (!list_empty(&q->done_list)) { q 1705 drivers/media/common/videobuf2/videobuf2-core.c q->waiting_in_dqbuf = 1; q 1711 drivers/media/common/videobuf2/videobuf2-core.c call_void_qop(q, wait_prepare, q); q 1717 drivers/media/common/videobuf2/videobuf2-core.c ret = wait_event_interruptible(q->done_wq, q 1718 drivers/media/common/videobuf2/videobuf2-core.c !list_empty(&q->done_list) || !q->streaming || q 1719 drivers/media/common/videobuf2/videobuf2-core.c q->error); q 1725 drivers/media/common/videobuf2/videobuf2-core.c call_void_qop(q, wait_finish, q); q 1726 drivers/media/common/videobuf2/videobuf2-core.c q->waiting_in_dqbuf = 0; q 1740 drivers/media/common/videobuf2/videobuf2-core.c static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb, q 1749 drivers/media/common/videobuf2/videobuf2-core.c ret = __vb2_wait_for_done_vb(q, nonblocking); q 1757 drivers/media/common/videobuf2/videobuf2-core.c spin_lock_irqsave(&q->done_lock, flags); q 1758 drivers/media/common/videobuf2/videobuf2-core.c *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry); q 1765 drivers/media/common/videobuf2/videobuf2-core.c ret = call_bufop(q, verify_planes_array, *vb, pb); q 1768 drivers/media/common/videobuf2/videobuf2-core.c spin_unlock_irqrestore(&q->done_lock, flags); q 1773 drivers/media/common/videobuf2/videobuf2-core.c int vb2_wait_for_all_buffers(struct vb2_queue *q) q 1775 drivers/media/common/videobuf2/videobuf2-core.c if (!q->streaming) { q 1780 drivers/media/common/videobuf2/videobuf2-core.c if (q->start_streaming_called) q 1781 drivers/media/common/videobuf2/videobuf2-core.c wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count)); q 1791 drivers/media/common/videobuf2/videobuf2-core.c struct vb2_queue *q = vb->vb2_queue; q 1799 drivers/media/common/videobuf2/videobuf2-core.c call_void_bufop(q, init_buffer, vb); q 1802 drivers/media/common/videobuf2/videobuf2-core.c int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb, q 1808 drivers/media/common/videobuf2/videobuf2-core.c ret = __vb2_get_done_vb(q, &vb, pb, nonblocking); q 1832 drivers/media/common/videobuf2/videobuf2-core.c call_void_bufop(q, fill_user_buffer, vb, pb); q 1836 drivers/media/common/videobuf2/videobuf2-core.c q->queued_count--; q 1838 drivers/media/common/videobuf2/videobuf2-core.c trace_vb2_dqbuf(q, vb); q 1865 drivers/media/common/videobuf2/videobuf2-core.c static void __vb2_queue_cancel(struct vb2_queue *q) q 1873 drivers/media/common/videobuf2/videobuf2-core.c if (q->start_streaming_called) q 1874 drivers/media/common/videobuf2/videobuf2-core.c call_void_qop(q, stop_streaming, q); q 1882 drivers/media/common/videobuf2/videobuf2-core.c if (WARN_ON(atomic_read(&q->owned_by_drv_count))) { q 1883 drivers/media/common/videobuf2/videobuf2-core.c for (i = 0; i < q->num_buffers; ++i) q 1884 drivers/media/common/videobuf2/videobuf2-core.c if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) { q 1886 drivers/media/common/videobuf2/videobuf2-core.c q->bufs[i]); q 1887 drivers/media/common/videobuf2/videobuf2-core.c vb2_buffer_done(q->bufs[i], VB2_BUF_STATE_ERROR); q 1890 drivers/media/common/videobuf2/videobuf2-core.c WARN_ON(atomic_read(&q->owned_by_drv_count)); q 1893 drivers/media/common/videobuf2/videobuf2-core.c q->streaming = 0; q 1894 drivers/media/common/videobuf2/videobuf2-core.c q->start_streaming_called = 0; q 1895 drivers/media/common/videobuf2/videobuf2-core.c q->queued_count = 0; q 1896 drivers/media/common/videobuf2/videobuf2-core.c q->error = 0; q 1897 drivers/media/common/videobuf2/videobuf2-core.c q->uses_requests = 0; q 1898 drivers/media/common/videobuf2/videobuf2-core.c q->uses_qbuf = 0; q 1903 drivers/media/common/videobuf2/videobuf2-core.c INIT_LIST_HEAD(&q->queued_list); q 1908 drivers/media/common/videobuf2/videobuf2-core.c INIT_LIST_HEAD(&q->done_list); q 1909 drivers/media/common/videobuf2/videobuf2-core.c atomic_set(&q->owned_by_drv_count, 0); q 1910 drivers/media/common/videobuf2/videobuf2-core.c wake_up_all(&q->done_wq); q 1921 drivers/media/common/videobuf2/videobuf2-core.c for (i = 0; i < q->num_buffers; ++i) { q 1922 drivers/media/common/videobuf2/videobuf2-core.c struct vb2_buffer *vb = q->bufs[i]; q 1969 drivers/media/common/videobuf2/videobuf2-core.c int vb2_core_streamon(struct vb2_queue *q, unsigned int type) q 1973 drivers/media/common/videobuf2/videobuf2-core.c if (type != q->type) { q 1978 drivers/media/common/videobuf2/videobuf2-core.c if (q->streaming) { q 1983 drivers/media/common/videobuf2/videobuf2-core.c if (!q->num_buffers) { q 1988 drivers/media/common/videobuf2/videobuf2-core.c if (q->num_buffers < q->min_buffers_needed) { q 1990 drivers/media/common/videobuf2/videobuf2-core.c q->min_buffers_needed); q 1998 drivers/media/common/videobuf2/videobuf2-core.c if (q->queued_count >= q->min_buffers_needed) { q 1999 drivers/media/common/videobuf2/videobuf2-core.c ret = v4l_vb2q_enable_media_source(q); q 2002 drivers/media/common/videobuf2/videobuf2-core.c ret = vb2_start_streaming(q); q 2007 drivers/media/common/videobuf2/videobuf2-core.c q->streaming = 1; q 2014 drivers/media/common/videobuf2/videobuf2-core.c void vb2_queue_error(struct vb2_queue *q) q 2016 drivers/media/common/videobuf2/videobuf2-core.c q->error = 1; q 2018 drivers/media/common/videobuf2/videobuf2-core.c wake_up_all(&q->done_wq); q 2022 drivers/media/common/videobuf2/videobuf2-core.c int vb2_core_streamoff(struct vb2_queue *q, unsigned int type) q 2024 drivers/media/common/videobuf2/videobuf2-core.c if (type != q->type) { q 2038 drivers/media/common/videobuf2/videobuf2-core.c __vb2_queue_cancel(q); q 2039 drivers/media/common/videobuf2/videobuf2-core.c q->waiting_for_buffers = !q->is_output; q 2040 drivers/media/common/videobuf2/videobuf2-core.c q->last_buffer_dequeued = false; q 2050 drivers/media/common/videobuf2/videobuf2-core.c static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off, q 2061 drivers/media/common/videobuf2/videobuf2-core.c for (buffer = 0; buffer < q->num_buffers; ++buffer) { q 2062 drivers/media/common/videobuf2/videobuf2-core.c vb = q->bufs[buffer]; q 2076 drivers/media/common/videobuf2/videobuf2-core.c int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type, q 2084 drivers/media/common/videobuf2/videobuf2-core.c if (q->memory != VB2_MEMORY_MMAP) { q 2089 drivers/media/common/videobuf2/videobuf2-core.c if (!q->mem_ops->get_dmabuf) { q 2099 drivers/media/common/videobuf2/videobuf2-core.c if (type != q->type) { q 2104 drivers/media/common/videobuf2/videobuf2-core.c if (index >= q->num_buffers) { q 2109 drivers/media/common/videobuf2/videobuf2-core.c vb = q->bufs[index]; q 2116 drivers/media/common/videobuf2/videobuf2-core.c if (vb2_fileio_is_active(q)) { q 2147 drivers/media/common/videobuf2/videobuf2-core.c int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) q 2155 drivers/media/common/videobuf2/videobuf2-core.c if (q->memory != VB2_MEMORY_MMAP) { q 2167 drivers/media/common/videobuf2/videobuf2-core.c if (q->is_output) { q 2179 drivers/media/common/videobuf2/videobuf2-core.c mutex_lock(&q->mmap_lock); q 2181 drivers/media/common/videobuf2/videobuf2-core.c if (vb2_fileio_is_active(q)) { q 2190 drivers/media/common/videobuf2/videobuf2-core.c ret = __find_plane_by_offset(q, off, &buffer, &plane); q 2194 drivers/media/common/videobuf2/videobuf2-core.c vb = q->bufs[buffer]; q 2219 drivers/media/common/videobuf2/videobuf2-core.c mutex_unlock(&q->mmap_lock); q 2229 drivers/media/common/videobuf2/videobuf2-core.c unsigned long vb2_get_unmapped_area(struct vb2_queue *q, q 2241 drivers/media/common/videobuf2/videobuf2-core.c if (q->memory != VB2_MEMORY_MMAP) { q 2249 drivers/media/common/videobuf2/videobuf2-core.c ret = __find_plane_by_offset(q, off, &buffer, &plane); q 2253 drivers/media/common/videobuf2/videobuf2-core.c vb = q->bufs[buffer]; q 2261 drivers/media/common/videobuf2/videobuf2-core.c int vb2_core_queue_init(struct vb2_queue *q) q 2266 drivers/media/common/videobuf2/videobuf2-core.c if (WARN_ON(!q) || q 2267 drivers/media/common/videobuf2/videobuf2-core.c WARN_ON(!q->ops) || q 2268 drivers/media/common/videobuf2/videobuf2-core.c WARN_ON(!q->mem_ops) || q 2269 drivers/media/common/videobuf2/videobuf2-core.c WARN_ON(!q->type) || q 2270 drivers/media/common/videobuf2/videobuf2-core.c WARN_ON(!q->io_modes) || q 2271 drivers/media/common/videobuf2/videobuf2-core.c WARN_ON(!q->ops->queue_setup) || q 2272 drivers/media/common/videobuf2/videobuf2-core.c WARN_ON(!q->ops->buf_queue)) q 2275 drivers/media/common/videobuf2/videobuf2-core.c if (WARN_ON(q->requires_requests && !q->supports_requests)) q 2278 drivers/media/common/videobuf2/videobuf2-core.c INIT_LIST_HEAD(&q->queued_list); q 2279 drivers/media/common/videobuf2/videobuf2-core.c INIT_LIST_HEAD(&q->done_list); q 2280 drivers/media/common/videobuf2/videobuf2-core.c spin_lock_init(&q->done_lock); q 2281 drivers/media/common/videobuf2/videobuf2-core.c mutex_init(&q->mmap_lock); q 2282 drivers/media/common/videobuf2/videobuf2-core.c init_waitqueue_head(&q->done_wq); q 2284 drivers/media/common/videobuf2/videobuf2-core.c q->memory = VB2_MEMORY_UNKNOWN; q 2286 drivers/media/common/videobuf2/videobuf2-core.c if (q->buf_struct_size == 0) q 2287 drivers/media/common/videobuf2/videobuf2-core.c q->buf_struct_size = sizeof(struct vb2_buffer); q 2289 drivers/media/common/videobuf2/videobuf2-core.c if (q->bidirectional) q 2290 drivers/media/common/videobuf2/videobuf2-core.c q->dma_dir = DMA_BIDIRECTIONAL; q 2292 drivers/media/common/videobuf2/videobuf2-core.c q->dma_dir = q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE; q 2298 drivers/media/common/videobuf2/videobuf2-core.c static int __vb2_init_fileio(struct vb2_queue *q, int read); q 2299 drivers/media/common/videobuf2/videobuf2-core.c static int __vb2_cleanup_fileio(struct vb2_queue *q); q 2300 drivers/media/common/videobuf2/videobuf2-core.c void vb2_core_queue_release(struct vb2_queue *q) q 2302 drivers/media/common/videobuf2/videobuf2-core.c __vb2_cleanup_fileio(q); q 2303 drivers/media/common/videobuf2/videobuf2-core.c __vb2_queue_cancel(q); q 2304 drivers/media/common/videobuf2/videobuf2-core.c mutex_lock(&q->mmap_lock); q 2305 drivers/media/common/videobuf2/videobuf2-core.c __vb2_queue_free(q, q->num_buffers); q 2306 drivers/media/common/videobuf2/videobuf2-core.c mutex_unlock(&q->mmap_lock); q 2310 drivers/media/common/videobuf2/videobuf2-core.c __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file, q 2317 drivers/media/common/videobuf2/videobuf2-core.c if (!q->is_output && !(req_events & (EPOLLIN | EPOLLRDNORM))) q 2319 drivers/media/common/videobuf2/videobuf2-core.c if (q->is_output && !(req_events & (EPOLLOUT | EPOLLWRNORM))) q 2322 drivers/media/common/videobuf2/videobuf2-core.c poll_wait(file, &q->done_wq, wait); q 2327 drivers/media/common/videobuf2/videobuf2-core.c if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) { q 2328 drivers/media/common/videobuf2/videobuf2-core.c if (!q->is_output && (q->io_modes & VB2_READ) && q 2330 drivers/media/common/videobuf2/videobuf2-core.c if (__vb2_init_fileio(q, 1)) q 2333 drivers/media/common/videobuf2/videobuf2-core.c if (q->is_output && (q->io_modes & VB2_WRITE) && q 2335 drivers/media/common/videobuf2/videobuf2-core.c if (__vb2_init_fileio(q, 0)) q 2348 drivers/media/common/videobuf2/videobuf2-core.c if (!vb2_is_streaming(q) || q->error) q 2357 drivers/media/common/videobuf2/videobuf2-core.c if (q->quirk_poll_must_check_waiting_for_buffers && q 2358 drivers/media/common/videobuf2/videobuf2-core.c q->waiting_for_buffers && (req_events & (EPOLLIN | EPOLLRDNORM))) q 2365 drivers/media/common/videobuf2/videobuf2-core.c if (q->is_output && q->fileio && q->queued_count < q->num_buffers) q 2368 drivers/media/common/videobuf2/videobuf2-core.c if (list_empty(&q->done_list)) { q 2373 drivers/media/common/videobuf2/videobuf2-core.c if (q->last_buffer_dequeued) q 2380 drivers/media/common/videobuf2/videobuf2-core.c spin_lock_irqsave(&q->done_lock, flags); q 2381 drivers/media/common/videobuf2/videobuf2-core.c if (!list_empty(&q->done_list)) q 2382 drivers/media/common/videobuf2/videobuf2-core.c vb = list_first_entry(&q->done_list, struct vb2_buffer, q 2384 drivers/media/common/videobuf2/videobuf2-core.c spin_unlock_irqrestore(&q->done_lock, flags); q 2388 drivers/media/common/videobuf2/videobuf2-core.c return (q->is_output) ? q 2452 drivers/media/common/videobuf2/videobuf2-core.c static int __vb2_init_fileio(struct vb2_queue *q, int read) q 2461 drivers/media/common/videobuf2/videobuf2-core.c if (WARN_ON((read && !(q->io_modes & VB2_READ)) || q 2462 drivers/media/common/videobuf2/videobuf2-core.c (!read && !(q->io_modes & VB2_WRITE)))) q 2468 drivers/media/common/videobuf2/videobuf2-core.c if (!q->mem_ops->vaddr) q 2474 drivers/media/common/videobuf2/videobuf2-core.c if (q->streaming || q->num_buffers > 0) q 2483 drivers/media/common/videobuf2/videobuf2-core.c (read) ? "read" : "write", count, q->fileio_read_once, q 2484 drivers/media/common/videobuf2/videobuf2-core.c q->fileio_write_immediately); q 2490 drivers/media/common/videobuf2/videobuf2-core.c fileio->read_once = q->fileio_read_once; q 2491 drivers/media/common/videobuf2/videobuf2-core.c fileio->write_immediately = q->fileio_write_immediately; q 2499 drivers/media/common/videobuf2/videobuf2-core.c fileio->type = q->type; q 2500 drivers/media/common/videobuf2/videobuf2-core.c q->fileio = fileio; q 2501 drivers/media/common/videobuf2/videobuf2-core.c ret = vb2_core_reqbufs(q, fileio->memory, &fileio->count); q 2509 drivers/media/common/videobuf2/videobuf2-core.c if (q->bufs[0]->num_planes != 1) { q 2517 drivers/media/common/videobuf2/videobuf2-core.c for (i = 0; i < q->num_buffers; i++) { q 2518 drivers/media/common/videobuf2/videobuf2-core.c fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0); q 2523 drivers/media/common/videobuf2/videobuf2-core.c fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0); q 2533 drivers/media/common/videobuf2/videobuf2-core.c for (i = 0; i < q->num_buffers; i++) { q 2534 drivers/media/common/videobuf2/videobuf2-core.c ret = vb2_core_qbuf(q, i, NULL, NULL); q 2543 drivers/media/common/videobuf2/videobuf2-core.c fileio->initial_index = q->num_buffers; q 2544 drivers/media/common/videobuf2/videobuf2-core.c fileio->cur_index = q->num_buffers; q 2550 drivers/media/common/videobuf2/videobuf2-core.c ret = vb2_core_streamon(q, q->type); q 2558 drivers/media/common/videobuf2/videobuf2-core.c vb2_core_reqbufs(q, fileio->memory, &fileio->count); q 2561 drivers/media/common/videobuf2/videobuf2-core.c q->fileio = NULL; q 2570 drivers/media/common/videobuf2/videobuf2-core.c static int __vb2_cleanup_fileio(struct vb2_queue *q) q 2572 drivers/media/common/videobuf2/videobuf2-core.c struct vb2_fileio_data *fileio = q->fileio; q 2575 drivers/media/common/videobuf2/videobuf2-core.c vb2_core_streamoff(q, q->type); q 2576 drivers/media/common/videobuf2/videobuf2-core.c q->fileio = NULL; q 2578 drivers/media/common/videobuf2/videobuf2-core.c vb2_core_reqbufs(q, fileio->memory, &fileio->count); q 2594 drivers/media/common/videobuf2/videobuf2-core.c static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count, q 2599 drivers/media/common/videobuf2/videobuf2-core.c bool is_multiplanar = q->is_multiplanar; q 2605 drivers/media/common/videobuf2/videobuf2-core.c bool copy_timestamp = !read && q->copy_timestamp; q 2616 drivers/media/common/videobuf2/videobuf2-core.c if (q->waiting_in_dqbuf) { q 2625 drivers/media/common/videobuf2/videobuf2-core.c if (!vb2_fileio_is_active(q)) { q 2626 drivers/media/common/videobuf2/videobuf2-core.c ret = __vb2_init_fileio(q, read); q 2631 drivers/media/common/videobuf2/videobuf2-core.c fileio = q->fileio; q 2637 drivers/media/common/videobuf2/videobuf2-core.c if (index >= q->num_buffers) { q 2643 drivers/media/common/videobuf2/videobuf2-core.c ret = vb2_core_dqbuf(q, &index, NULL, nonblock); q 2651 drivers/media/common/videobuf2/videobuf2-core.c b = q->bufs[index]; q 2658 drivers/media/common/videobuf2/videobuf2-core.c buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0) q 2659 drivers/media/common/videobuf2/videobuf2-core.c : vb2_plane_size(q->bufs[index], 0); q 2702 drivers/media/common/videobuf2/videobuf2-core.c struct vb2_buffer *b = q->bufs[index]; q 2709 drivers/media/common/videobuf2/videobuf2-core.c return __vb2_cleanup_fileio(q); q 2719 drivers/media/common/videobuf2/videobuf2-core.c ret = vb2_core_qbuf(q, index, NULL, NULL); q 2729 drivers/media/common/videobuf2/videobuf2-core.c buf->size = vb2_plane_size(q->bufs[index], 0); q 2735 drivers/media/common/videobuf2/videobuf2-core.c if (fileio->initial_index < q->num_buffers) q 2755 drivers/media/common/videobuf2/videobuf2-core.c size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, q 2758 drivers/media/common/videobuf2/videobuf2-core.c return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1); q 2762 drivers/media/common/videobuf2/videobuf2-core.c size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count, q 2765 drivers/media/common/videobuf2/videobuf2-core.c return __vb2_perform_fileio(q, (char __user *) data, count, q 2779 drivers/media/common/videobuf2/videobuf2-core.c struct vb2_queue *q = data; q 2780 drivers/media/common/videobuf2/videobuf2-core.c struct vb2_threadio_data *threadio = q->threadio; q 2786 drivers/media/common/videobuf2/videobuf2-core.c if (q->is_output) { q 2787 drivers/media/common/videobuf2/videobuf2-core.c prequeue = q->num_buffers; q 2788 drivers/media/common/videobuf2/videobuf2-core.c copy_timestamp = q->copy_timestamp; q 2800 drivers/media/common/videobuf2/videobuf2-core.c vb = q->bufs[index++]; q 2803 drivers/media/common/videobuf2/videobuf2-core.c call_void_qop(q, wait_finish, q); q 2805 drivers/media/common/videobuf2/videobuf2-core.c ret = vb2_core_dqbuf(q, &index, NULL, 0); q 2806 drivers/media/common/videobuf2/videobuf2-core.c call_void_qop(q, wait_prepare, q); q 2809 drivers/media/common/videobuf2/videobuf2-core.c vb = q->bufs[index]; q 2818 drivers/media/common/videobuf2/videobuf2-core.c call_void_qop(q, wait_finish, q); q 2822 drivers/media/common/videobuf2/videobuf2-core.c ret = vb2_core_qbuf(q, vb->index, NULL, NULL); q 2823 drivers/media/common/videobuf2/videobuf2-core.c call_void_qop(q, wait_prepare, q); q 2841 drivers/media/common/videobuf2/videobuf2-core.c int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv, q 2847 drivers/media/common/videobuf2/videobuf2-core.c if (q->threadio) q 2849 drivers/media/common/videobuf2/videobuf2-core.c if (vb2_is_busy(q)) q 2851 drivers/media/common/videobuf2/videobuf2-core.c if (WARN_ON(q->fileio)) q 2860 drivers/media/common/videobuf2/videobuf2-core.c ret = __vb2_init_fileio(q, !q->is_output); q 2864 drivers/media/common/videobuf2/videobuf2-core.c q->threadio = threadio; q 2865 drivers/media/common/videobuf2/videobuf2-core.c threadio->thread = kthread_run(vb2_thread, q, "vb2-%s", thread_name); q 2874 drivers/media/common/videobuf2/videobuf2-core.c __vb2_cleanup_fileio(q); q 2881 drivers/media/common/videobuf2/videobuf2-core.c int vb2_thread_stop(struct vb2_queue *q) q 2883 drivers/media/common/videobuf2/videobuf2-core.c struct vb2_threadio_data *threadio = q->threadio; q 2890 drivers/media/common/videobuf2/videobuf2-core.c vb2_queue_error(q); q 2892 drivers/media/common/videobuf2/videobuf2-core.c __vb2_cleanup_fileio(q); q 2895 drivers/media/common/videobuf2/videobuf2-core.c q->threadio = NULL; q 200 drivers/media/common/videobuf2/videobuf2-dvb.c struct list_head *list, *q; q 220 drivers/media/common/videobuf2/videobuf2-dvb.c list_for_each_safe(list, q, &f->felist) { q 254 drivers/media/common/videobuf2/videobuf2-dvb.c struct list_head *list, *q; q 259 drivers/media/common/videobuf2/videobuf2-dvb.c list_for_each_safe(list, q, &f->felist) { q 276 drivers/media/common/videobuf2/videobuf2-dvb.c struct list_head *list, *q; q 282 drivers/media/common/videobuf2/videobuf2-dvb.c list_for_each_safe(list, q, &f->felist) { q 317 drivers/media/common/videobuf2/videobuf2-dvb.c struct list_head *list, *q; q 321 drivers/media/common/videobuf2/videobuf2-dvb.c list_for_each_safe(list, q, &f->felist) { q 138 drivers/media/common/videobuf2/videobuf2-v4l2.c struct vb2_queue *q = vb->vb2_queue; q 140 drivers/media/common/videobuf2/videobuf2-v4l2.c if (q->is_output) { q 145 drivers/media/common/videobuf2/videobuf2-v4l2.c if (q->copy_timestamp) q 171 drivers/media/common/videobuf2/videobuf2-v4l2.c struct vb2_queue *q = vb->vb2_queue; q 182 drivers/media/common/videobuf2/videobuf2-v4l2.c if (b->field == V4L2_FIELD_ALTERNATE && q->is_output) { q 334 drivers/media/common/videobuf2/videobuf2-v4l2.c static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev, q 344 drivers/media/common/videobuf2/videobuf2-v4l2.c if (b->type != q->type) { q 349 drivers/media/common/videobuf2/videobuf2-v4l2.c if (b->index >= q->num_buffers) { q 354 drivers/media/common/videobuf2/videobuf2-v4l2.c if (q->bufs[b->index] == NULL) { q 360 drivers/media/common/videobuf2/videobuf2-v4l2.c if (b->memory != q->memory) { q 365 drivers/media/common/videobuf2/videobuf2-v4l2.c vb = q->bufs[b->index]; q 390 drivers/media/common/videobuf2/videobuf2-v4l2.c if (q->requires_requests) { q 394 drivers/media/common/videobuf2/videobuf2-v4l2.c if (q->uses_requests) { q 399 drivers/media/common/videobuf2/videobuf2-v4l2.c } else if (!q->supports_requests) { q 402 drivers/media/common/videobuf2/videobuf2-v4l2.c } else if (q->uses_qbuf) { q 412 drivers/media/common/videobuf2/videobuf2-v4l2.c if (WARN_ON(!q->lock || !p_req)) q 420 drivers/media/common/videobuf2/videobuf2-v4l2.c if (WARN_ON(!q->ops->buf_request_complete)) q 427 drivers/media/common/videobuf2/videobuf2-v4l2.c if (WARN_ON((q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT || q 428 drivers/media/common/videobuf2/videobuf2-v4l2.c q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) && q 429 drivers/media/common/videobuf2/videobuf2-v4l2.c !q->ops->buf_out_validate)) q 468 drivers/media/common/videobuf2/videobuf2-v4l2.c struct vb2_queue *q = vb->vb2_queue; q 485 drivers/media/common/videobuf2/videobuf2-v4l2.c if (q->is_multiplanar) { q 497 drivers/media/common/videobuf2/videobuf2-v4l2.c if (q->memory == VB2_MEMORY_MMAP) q 499 drivers/media/common/videobuf2/videobuf2-v4l2.c else if (q->memory == VB2_MEMORY_USERPTR) q 501 drivers/media/common/videobuf2/videobuf2-v4l2.c else if (q->memory == VB2_MEMORY_DMABUF) q 513 drivers/media/common/videobuf2/videobuf2-v4l2.c if (q->memory == VB2_MEMORY_MMAP) q 515 drivers/media/common/videobuf2/videobuf2-v4l2.c else if (q->memory == VB2_MEMORY_USERPTR) q 517 drivers/media/common/videobuf2/videobuf2-v4l2.c else if (q->memory == VB2_MEMORY_DMABUF) q 525 drivers/media/common/videobuf2/videobuf2-v4l2.c b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK; q 526 drivers/media/common/videobuf2/videobuf2-v4l2.c if (!q->copy_timestamp) { q 532 drivers/media/common/videobuf2/videobuf2-v4l2.c b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK; q 560 drivers/media/common/videobuf2/videobuf2-v4l2.c if (vb2_buffer_in_use(q, vb)) q 600 drivers/media/common/videobuf2/videobuf2-v4l2.c int vb2_find_timestamp(const struct vb2_queue *q, u64 timestamp, q 605 drivers/media/common/videobuf2/videobuf2-v4l2.c for (i = start_idx; i < q->num_buffers; i++) q 606 drivers/media/common/videobuf2/videobuf2-v4l2.c if (q->bufs[i]->copied_timestamp && q 607 drivers/media/common/videobuf2/videobuf2-v4l2.c q->bufs[i]->timestamp == timestamp) q 626 drivers/media/common/videobuf2/videobuf2-v4l2.c int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b) q 631 drivers/media/common/videobuf2/videobuf2-v4l2.c if (b->type != q->type) { q 636 drivers/media/common/videobuf2/videobuf2-v4l2.c if (b->index >= q->num_buffers) { q 640 drivers/media/common/videobuf2/videobuf2-v4l2.c vb = q->bufs[b->index]; q 643 drivers/media/common/videobuf2/videobuf2-v4l2.c vb2_core_querybuf(q, b->index, b); q 648 drivers/media/common/videobuf2/videobuf2-v4l2.c static void fill_buf_caps(struct vb2_queue *q, u32 *caps) q 651 drivers/media/common/videobuf2/videobuf2-v4l2.c if (q->io_modes & VB2_MMAP) q 653 drivers/media/common/videobuf2/videobuf2-v4l2.c if (q->io_modes & VB2_USERPTR) q 655 drivers/media/common/videobuf2/videobuf2-v4l2.c if (q->io_modes & VB2_DMABUF) q 658 drivers/media/common/videobuf2/videobuf2-v4l2.c if (q->supports_requests) q 663 drivers/media/common/videobuf2/videobuf2-v4l2.c int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) q 665 drivers/media/common/videobuf2/videobuf2-v4l2.c int ret = vb2_verify_memory_type(q, req->memory, req->type); q 667 drivers/media/common/videobuf2/videobuf2-v4l2.c fill_buf_caps(q, &req->capabilities); q 668 drivers/media/common/videobuf2/videobuf2-v4l2.c return ret ? ret : vb2_core_reqbufs(q, req->memory, &req->count); q 672 drivers/media/common/videobuf2/videobuf2-v4l2.c int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev, q 677 drivers/media/common/videobuf2/videobuf2-v4l2.c if (vb2_fileio_is_active(q)) { q 685 drivers/media/common/videobuf2/videobuf2-v4l2.c ret = vb2_queue_or_prepare_buf(q, mdev, b, true, NULL); q 687 drivers/media/common/videobuf2/videobuf2-v4l2.c return ret ? ret : vb2_core_prepare_buf(q, b->index, b); q 691 drivers/media/common/videobuf2/videobuf2-v4l2.c int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create) q 696 drivers/media/common/videobuf2/videobuf2-v4l2.c int ret = vb2_verify_memory_type(q, create->memory, f->type); q 699 drivers/media/common/videobuf2/videobuf2-v4l2.c fill_buf_caps(q, &create->capabilities); q 700 drivers/media/common/videobuf2/videobuf2-v4l2.c create->index = q->num_buffers; q 742 drivers/media/common/videobuf2/videobuf2-v4l2.c return ret ? ret : vb2_core_create_bufs(q, create->memory, q 747 drivers/media/common/videobuf2/videobuf2-v4l2.c int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev, q 753 drivers/media/common/videobuf2/videobuf2-v4l2.c if (vb2_fileio_is_active(q)) { q 758 drivers/media/common/videobuf2/videobuf2-v4l2.c ret = vb2_queue_or_prepare_buf(q, mdev, b, false, &req); q 761 drivers/media/common/videobuf2/videobuf2-v4l2.c ret = vb2_core_qbuf(q, b->index, b, req); q 768 drivers/media/common/videobuf2/videobuf2-v4l2.c int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking) q 772 drivers/media/common/videobuf2/videobuf2-v4l2.c if (vb2_fileio_is_active(q)) { q 777 drivers/media/common/videobuf2/videobuf2-v4l2.c if (b->type != q->type) { q 782 drivers/media/common/videobuf2/videobuf2-v4l2.c ret = vb2_core_dqbuf(q, NULL, b, nonblocking); q 784 drivers/media/common/videobuf2/videobuf2-v4l2.c if (!q->is_output && q 787 drivers/media/common/videobuf2/videobuf2-v4l2.c q->last_buffer_dequeued = true; q 799 drivers/media/common/videobuf2/videobuf2-v4l2.c int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type) q 801 drivers/media/common/videobuf2/videobuf2-v4l2.c if (vb2_fileio_is_active(q)) { q 805 drivers/media/common/videobuf2/videobuf2-v4l2.c return vb2_core_streamon(q, type); q 809 drivers/media/common/videobuf2/videobuf2-v4l2.c int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type) q 811 drivers/media/common/videobuf2/videobuf2-v4l2.c if (vb2_fileio_is_active(q)) { q 815 drivers/media/common/videobuf2/videobuf2-v4l2.c return vb2_core_streamoff(q, type); q 819 drivers/media/common/videobuf2/videobuf2-v4l2.c int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb) q 821 drivers/media/common/videobuf2/videobuf2-v4l2.c return vb2_core_expbuf(q, &eb->fd, eb->type, eb->index, q 826 drivers/media/common/videobuf2/videobuf2-v4l2.c int vb2_queue_init(struct vb2_queue *q) q 831 drivers/media/common/videobuf2/videobuf2-v4l2.c if (WARN_ON(!q) || q 832 drivers/media/common/videobuf2/videobuf2-v4l2.c WARN_ON(q->timestamp_flags & q 838 drivers/media/common/videobuf2/videobuf2-v4l2.c WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) == q 847 drivers/media/common/videobuf2/videobuf2-v4l2.c if (q->buf_struct_size == 0) q 848 drivers/media/common/videobuf2/videobuf2-v4l2.c q->buf_struct_size = sizeof(struct vb2_v4l2_buffer); q 850 drivers/media/common/videobuf2/videobuf2-v4l2.c q->buf_ops = &v4l2_buf_ops; q 851 drivers/media/common/videobuf2/videobuf2-v4l2.c q->is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type); q 852 drivers/media/common/videobuf2/videobuf2-v4l2.c q->is_output = V4L2_TYPE_IS_OUTPUT(q->type); q 853 drivers/media/common/videobuf2/videobuf2-v4l2.c q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) q 860 drivers/media/common/videobuf2/videobuf2-v4l2.c q->quirk_poll_must_check_waiting_for_buffers = true; q 862 drivers/media/common/videobuf2/videobuf2-v4l2.c return vb2_core_queue_init(q); q 866 drivers/media/common/videobuf2/videobuf2-v4l2.c void vb2_queue_release(struct vb2_queue *q) q 868 drivers/media/common/videobuf2/videobuf2-v4l2.c vb2_core_queue_release(q); q 872 drivers/media/common/videobuf2/videobuf2-v4l2.c __poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait) q 877 drivers/media/common/videobuf2/videobuf2-v4l2.c res = vb2_core_poll(q, file, wait); q 1110 drivers/media/common/videobuf2/videobuf2-v4l2.c struct vb2_queue *q = vdev->queue; q 1111 drivers/media/common/videobuf2/videobuf2-v4l2.c struct mutex *lock = q->lock ? q->lock : vdev->lock; q 1124 drivers/media/common/videobuf2/videobuf2-v4l2.c fileio = q->fileio; q 1129 drivers/media/common/videobuf2/videobuf2-v4l2.c if (!fileio && q->fileio) q 1130 drivers/media/common/videobuf2/videobuf2-v4l2.c q->owner = file->private_data; q 551 drivers/media/dvb-core/dvb_demux.c const u8 *q; q 578 drivers/media/dvb-core/dvb_demux.c q = &buf[p]; q 580 drivers/media/dvb-core/dvb_demux.c if (pktsize == 204 && (*q == 0xB8)) { q 581 drivers/media/dvb-core/dvb_demux.c memcpy(demux->tsbuf, q, 188); q 583 drivers/media/dvb-core/dvb_demux.c q = demux->tsbuf; q 585 drivers/media/dvb-core/dvb_demux.c dvb_dmx_swfilter_packet(demux, q); q 169 drivers/media/dvb-core/dvb_vb2.c struct vb2_queue *q = &ctx->vb_q; q 173 drivers/media/dvb-core/dvb_vb2.c q->type = DVB_BUF_TYPE_CAPTURE; q 175 drivers/media/dvb-core/dvb_vb2.c q->is_output = 0; q 177 drivers/media/dvb-core/dvb_vb2.c q->io_modes = VB2_MMAP; q 178 drivers/media/dvb-core/dvb_vb2.c q->drv_priv = ctx; q 179 drivers/media/dvb-core/dvb_vb2.c q->buf_struct_size = sizeof(struct dvb_buffer); q 180 drivers/media/dvb-core/dvb_vb2.c q->min_buffers_needed = 1; q 181 drivers/media/dvb-core/dvb_vb2.c q->ops = &dvb_vb2_qops; q 182 drivers/media/dvb-core/dvb_vb2.c q->mem_ops = &vb2_vmalloc_memops; q 183 drivers/media/dvb-core/dvb_vb2.c q->buf_ops = &dvb_vb2_buf_ops; q 184 drivers/media/dvb-core/dvb_vb2.c q->num_buffers = 0; q 185 drivers/media/dvb-core/dvb_vb2.c ret = vb2_core_queue_init(q); q 207 drivers/media/dvb-core/dvb_vb2.c struct vb2_queue *q = (struct vb2_queue *)&ctx->vb_q; q 210 drivers/media/dvb-core/dvb_vb2.c vb2_core_queue_release(q); q 220 drivers/media/dvb-core/dvb_vb2.c struct vb2_queue *q = &ctx->vb_q; q 223 drivers/media/dvb-core/dvb_vb2.c ret = vb2_core_streamon(q, q->type); q 237 drivers/media/dvb-core/dvb_vb2.c struct vb2_queue *q = (struct vb2_queue *)&ctx->vb_q; q 241 drivers/media/dvb-core/dvb_vb2.c ret = vb2_core_streamoff(q, q->type); q 368 drivers/media/dvb-core/dvb_vb2.c struct vb2_queue *q = &ctx->vb_q; q 371 drivers/media/dvb-core/dvb_vb2.c ret = vb2_core_expbuf(&ctx->vb_q, &exp->fd, q->type, exp->index, q 1143 drivers/media/dvb-frontends/rtl2832_sdr.c struct vb2_queue *q = &dev->vb_queue; q 1149 drivers/media/dvb-frontends/rtl2832_sdr.c if (vb2_is_busy(q)) q 289 drivers/media/dvb-frontends/sp887x.c unsigned int q, r; q 292 drivers/media/dvb-frontends/sp887x.c q = (r / d); q 295 drivers/media/dvb-frontends/sp887x.c *quotient_i = q; q 299 drivers/media/dvb-frontends/sp887x.c q = (q << 8) | (r / d); q 301 drivers/media/dvb-frontends/sp887x.c *quotient_f = (q << 8) | (r / d); q 1310 drivers/media/i2c/adv7511-v4l2.c u8 q = HDMI_QUANTIZATION_RANGE_DEFAULT; q 1413 drivers/media/i2c/adv7511-v4l2.c q = y ? HDMI_QUANTIZATION_RANGE_DEFAULT : q 1415 drivers/media/i2c/adv7511-v4l2.c yq = q ? q - 1 : HDMI_YCC_QUANTIZATION_RANGE_FULL; q 1418 drivers/media/i2c/adv7511-v4l2.c q = y ? HDMI_QUANTIZATION_RANGE_DEFAULT : q 1420 drivers/media/i2c/adv7511-v4l2.c yq = q ? q - 1 : HDMI_YCC_QUANTIZATION_RANGE_LIMITED; q 1427 drivers/media/i2c/adv7511-v4l2.c adv7511_wr_and_or(sd, 0x57, 0x83, (ec << 4) | (q << 2) | (itc << 7)); q 697 drivers/media/i2c/cx25840/cx25840-core.c struct workqueue_struct *q; q 723 drivers/media/i2c/cx25840/cx25840-core.c q = create_singlethread_workqueue("cx25840_fw"); q 724 drivers/media/i2c/cx25840/cx25840-core.c if (q) { q 726 drivers/media/i2c/cx25840/cx25840-core.c queue_work(q, &state->fw_work); q 729 drivers/media/i2c/cx25840/cx25840-core.c destroy_workqueue(q); q 775 drivers/media/i2c/cx25840/cx25840-core.c struct workqueue_struct *q; q 960 drivers/media/i2c/cx25840/cx25840-core.c q = create_singlethread_workqueue("cx25840_fw"); q 961 drivers/media/i2c/cx25840/cx25840-core.c if (q) { q 963 drivers/media/i2c/cx25840/cx25840-core.c queue_work(q, &state->fw_work); q 966 drivers/media/i2c/cx25840/cx25840-core.c destroy_workqueue(q); q 1034 drivers/media/i2c/cx25840/cx25840-core.c struct workqueue_struct *q; q 1085 drivers/media/i2c/cx25840/cx25840-core.c q = create_singlethread_workqueue("cx25840_fw"); q 1086 drivers/media/i2c/cx25840/cx25840-core.c if (q) { q 1088 drivers/media/i2c/cx25840/cx25840-core.c queue_work(q, &state->fw_work); q 1091 drivers/media/i2c/cx25840/cx25840-core.c destroy_workqueue(q); q 1528 drivers/media/pci/bt8xx/bttv-driver.c static int bttv_prepare_buffer(struct videobuf_queue *q,struct bttv *btv, q 1534 drivers/media/pci/bt8xx/bttv-driver.c struct bttv_fh *fh = q->priv_data; q 1612 drivers/media/pci/bt8xx/bttv-driver.c if (0 != (rc = videobuf_iolock(q,&buf->vb,&btv->fbuf))) q 1624 drivers/media/pci/bt8xx/bttv-driver.c bttv_dma_free(q,btv,buf); q 1629 drivers/media/pci/bt8xx/bttv-driver.c buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size) q 1631 drivers/media/pci/bt8xx/bttv-driver.c struct bttv_fh *fh = q->priv_data; q 1642 drivers/media/pci/bt8xx/bttv-driver.c buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb, q 1646 drivers/media/pci/bt8xx/bttv-driver.c struct bttv_fh *fh = q->priv_data; q 1648 drivers/media/pci/bt8xx/bttv-driver.c return bttv_prepare_buffer(q,fh->btv, buf, fh->fmt, q 1653 drivers/media/pci/bt8xx/bttv-driver.c buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb) q 1656 drivers/media/pci/bt8xx/bttv-driver.c struct bttv_fh *fh = q->priv_data; q 1667 drivers/media/pci/bt8xx/bttv-driver.c static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb) q 1670 drivers/media/pci/bt8xx/bttv-driver.c struct bttv_fh *fh = q->priv_data; q 1672 drivers/media/pci/bt8xx/bttv-driver.c bttv_dma_free(q,fh->btv,buf); q 2204 drivers/media/pci/bt8xx/bttv-driver.c struct videobuf_queue* q = NULL; q 2208 drivers/media/pci/bt8xx/bttv-driver.c q = &fh->cap; q 2211 drivers/media/pci/bt8xx/bttv-driver.c q = &fh->vbi; q 2216 drivers/media/pci/bt8xx/bttv-driver.c return q; q 2238 drivers/media/pci/bt8xx/bttv-driver.c struct videobuf_queue *q = bttv_queue(fh); q 2243 drivers/media/pci/bt8xx/bttv-driver.c if (videobuf_queue_is_busy(q)) q 571 drivers/media/pci/bt8xx/bttv-risc.c bttv_dma_free(struct videobuf_queue *q,struct bttv *btv, struct bttv_buffer *buf) q 576 drivers/media/pci/bt8xx/bttv-risc.c videobuf_waiton(q, &buf->vb, 0, 0); q 577 drivers/media/pci/bt8xx/bttv-risc.c videobuf_dma_unmap(q->dev, dma); q 70 drivers/media/pci/bt8xx/bttv-vbi.c static int vbi_buffer_setup(struct videobuf_queue *q, q 73 drivers/media/pci/bt8xx/bttv-vbi.c struct bttv_fh *fh = q->priv_data; q 91 drivers/media/pci/bt8xx/bttv-vbi.c static int vbi_buffer_prepare(struct videobuf_queue *q, q 95 drivers/media/pci/bt8xx/bttv-vbi.c struct bttv_fh *fh = q->priv_data; q 141 drivers/media/pci/bt8xx/bttv-vbi.c if (0 != (rc = videobuf_iolock(q, &buf->vb, NULL))) q 194 drivers/media/pci/bt8xx/bttv-vbi.c bttv_dma_free(q,btv,buf); q 199 drivers/media/pci/bt8xx/bttv-vbi.c vbi_buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb) q 201 drivers/media/pci/bt8xx/bttv-vbi.c struct bttv_fh *fh = q->priv_data; q 214 drivers/media/pci/bt8xx/bttv-vbi.c static void vbi_buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb) q 216 drivers/media/pci/bt8xx/bttv-vbi.c struct bttv_fh *fh = q->priv_data; q 221 drivers/media/pci/bt8xx/bttv-vbi.c bttv_dma_free(q,fh->btv,buf); q 256 drivers/media/pci/bt8xx/bttvp.h void bttv_dma_free(struct videobuf_queue *q, struct bttv *btv, q 221 drivers/media/pci/cobalt/cobalt-alsa-pcm.c rc = vb2_thread_start(&s->q, alsa_fnc, s, s->vdev.name); q 237 drivers/media/pci/cobalt/cobalt-alsa-pcm.c vb2_thread_stop(&s->q); q 430 drivers/media/pci/cobalt/cobalt-alsa-pcm.c rc = vb2_thread_start(&s->q, alsa_pb_fnc, s, s->vdev.name); q 447 drivers/media/pci/cobalt/cobalt-alsa-pcm.c vb2_thread_stop(&s->q); q 533 drivers/media/pci/cobalt/cobalt-alsa-pcm.c s->q.gfp_flags |= __GFP_ZERO; q 211 drivers/media/pci/cobalt/cobalt-driver.h struct vb2_queue q; q 167 drivers/media/pci/cobalt/cobalt-irq.c if ((edge & mask & dma_fifo_mask) && vb2_is_streaming(&s->q)) { q 34 drivers/media/pci/cobalt/cobalt-v4l2.c static int cobalt_queue_setup(struct vb2_queue *q, q 38 drivers/media/pci/cobalt/cobalt-v4l2.c struct cobalt_stream *s = q->drv_priv; q 125 drivers/media/pci/cobalt/cobalt-v4l2.c struct vb2_queue *q = vb->vb2_queue; q 126 drivers/media/pci/cobalt/cobalt-v4l2.c struct cobalt_stream *s = q->drv_priv; q 279 drivers/media/pci/cobalt/cobalt-v4l2.c static int cobalt_start_streaming(struct vb2_queue *q, unsigned int count) q 281 drivers/media/pci/cobalt/cobalt-v4l2.c struct cobalt_stream *s = q->drv_priv; q 379 drivers/media/pci/cobalt/cobalt-v4l2.c if (!wait_event_timeout(s->q.done_wq, is_dma_done(s), q 388 drivers/media/pci/cobalt/cobalt-v4l2.c static void cobalt_stop_streaming(struct vb2_queue *q) q 390 drivers/media/pci/cobalt/cobalt-v4l2.c struct cobalt_stream *s = q->drv_priv; q 636 drivers/media/pci/cobalt/cobalt-v4l2.c if (vb2_is_busy(&s->q)) q 798 drivers/media/pci/cobalt/cobalt-v4l2.c if (vb2_is_busy(&s->q)) q 915 drivers/media/pci/cobalt/cobalt-v4l2.c if (vb2_is_busy(&s->q) && (pix->pixelformat != s->pixfmt || q 981 drivers/media/pci/cobalt/cobalt-v4l2.c if (vb2_is_busy(&s->q)) q 1205 drivers/media/pci/cobalt/cobalt-v4l2.c struct vb2_queue *q = &s->q; q 1253 drivers/media/pci/cobalt/cobalt-v4l2.c q->type = s->is_output ? V4L2_BUF_TYPE_VIDEO_OUTPUT : q 1255 drivers/media/pci/cobalt/cobalt-v4l2.c q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; q 1256 drivers/media/pci/cobalt/cobalt-v4l2.c q->io_modes |= s->is_output ? VB2_WRITE : VB2_READ; q 1257 drivers/media/pci/cobalt/cobalt-v4l2.c q->drv_priv = s; q 1258 drivers/media/pci/cobalt/cobalt-v4l2.c q->buf_struct_size = sizeof(struct cobalt_buffer); q 1259 drivers/media/pci/cobalt/cobalt-v4l2.c q->ops = &cobalt_qops; q 1260 drivers/media/pci/cobalt/cobalt-v4l2.c q->mem_ops = &vb2_dma_sg_memops; q 1261 drivers/media/pci/cobalt/cobalt-v4l2.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1262 drivers/media/pci/cobalt/cobalt-v4l2.c q->min_buffers_needed = 2; q 1263 drivers/media/pci/cobalt/cobalt-v4l2.c q->lock = &s->lock; q 1264 drivers/media/pci/cobalt/cobalt-v4l2.c q->dev = &cobalt->pci_dev->dev; q 1265 drivers/media/pci/cobalt/cobalt-v4l2.c vdev->queue = q; q 1273 drivers/media/pci/cobalt/cobalt-v4l2.c ret = vb2_queue_init(q); q 291 drivers/media/pci/cx18/cx18-fileops.c const u8 *q; q 297 drivers/media/pci/cx18/cx18-fileops.c q = memchr(p, 0, start + len - p); q 298 drivers/media/pci/cx18/cx18-fileops.c if (q == NULL) q 300 drivers/media/pci/cx18/cx18-fileops.c p = q + 1; q 306 drivers/media/pci/cx18/cx18-fileops.c if ((char *)q + 15 >= buf->buf + buf->bytesused || q 307 drivers/media/pci/cx18/cx18-fileops.c q[1] != 0 || q[2] != 1 || q[3] != ch) q 313 drivers/media/pci/cx18/cx18-fileops.c if ((q[6] & 0xc0) != 0x80) q 316 drivers/media/pci/cx18/cx18-fileops.c if (((q[7] & 0xc0) == 0x80 && /* PTS only */ q 317 drivers/media/pci/cx18/cx18-fileops.c (q[9] & 0xf0) == 0x20) || /* PTS only */ q 318 drivers/media/pci/cx18/cx18-fileops.c ((q[7] & 0xc0) == 0xc0 && /* PTS & DTS */ q 319 drivers/media/pci/cx18/cx18-fileops.c (q[9] & 0xf0) == 0x30)) { /* DTS follows */ q 323 drivers/media/pci/cx18/cx18-fileops.c p = q + 9; /* Skip this video PES hdr */ q 331 drivers/media/pci/cx18/cx18-fileops.c stuffing = q[13] & 7; q 334 drivers/media/pci/cx18/cx18-fileops.c if (q[14 + i] != 0xff) q 337 drivers/media/pci/cx18/cx18-fileops.c (q[4] & 0xc4) == 0x44 && /* marker check */ q 338 drivers/media/pci/cx18/cx18-fileops.c (q[12] & 3) == 3 && /* marker check */ q 339 drivers/media/pci/cx18/cx18-fileops.c q[14 + stuffing] == 0 && /* PES Pack or Sys Hdr */ q 340 drivers/media/pci/cx18/cx18-fileops.c q[15 + stuffing] == 0 && q 341 drivers/media/pci/cx18/cx18-fileops.c q[16 + stuffing] == 1) { q 344 drivers/media/pci/cx18/cx18-fileops.c len = (char *)q - start; q 800 drivers/media/pci/cx18/cx18-ioctl.c struct videobuf_queue *q = NULL; q 806 drivers/media/pci/cx18/cx18-ioctl.c q = &s->vbuf_q; q 813 drivers/media/pci/cx18/cx18-ioctl.c return q; q 36 drivers/media/pci/cx18/cx18-queue.c void cx18_queue_init(struct cx18_queue *q) q 38 drivers/media/pci/cx18/cx18-queue.c INIT_LIST_HEAD(&q->list); q 39 drivers/media/pci/cx18/cx18-queue.c atomic_set(&q->depth, 0); q 40 drivers/media/pci/cx18/cx18-queue.c q->bytesused = 0; q 44 drivers/media/pci/cx18/cx18-queue.c struct cx18_queue *q, int to_front) q 47 drivers/media/pci/cx18/cx18-queue.c if (q != &s->q_full) { q 56 drivers/media/pci/cx18/cx18-queue.c if (q == &s->q_busy && q 57 drivers/media/pci/cx18/cx18-queue.c atomic_read(&q->depth) >= CX18_MAX_FW_MDLS_PER_STREAM) q 58 drivers/media/pci/cx18/cx18-queue.c q = &s->q_free; q 60 drivers/media/pci/cx18/cx18-queue.c spin_lock(&q->lock); q 63 drivers/media/pci/cx18/cx18-queue.c list_add(&mdl->list, &q->list); /* LIFO */ q 65 drivers/media/pci/cx18/cx18-queue.c list_add_tail(&mdl->list, &q->list); /* FIFO */ q 66 drivers/media/pci/cx18/cx18-queue.c q->bytesused += mdl->bytesused - mdl->readpos; q 67 drivers/media/pci/cx18/cx18-queue.c atomic_inc(&q->depth); q 69 drivers/media/pci/cx18/cx18-queue.c spin_unlock(&q->lock); q 70 drivers/media/pci/cx18/cx18-queue.c return q; q 73 drivers/media/pci/cx18/cx18-queue.c struct cx18_mdl *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q) q 77 drivers/media/pci/cx18/cx18-queue.c spin_lock(&q->lock); q 78 drivers/media/pci/cx18/cx18-queue.c if (!list_empty(&q->list)) { q 79 drivers/media/pci/cx18/cx18-queue.c mdl = list_first_entry(&q->list, struct cx18_mdl, list); q 81 drivers/media/pci/cx18/cx18-queue.c q->bytesused -= mdl->bytesused - mdl->readpos; q 83 drivers/media/pci/cx18/cx18-queue.c atomic_dec(&q->depth); q 85 drivers/media/pci/cx18/cx18-queue.c spin_unlock(&q->lock); q 56 drivers/media/pci/cx18/cx18-queue.h struct cx18_queue *q, int to_front); q 60 drivers/media/pci/cx18/cx18-queue.h struct cx18_queue *q) q 62 drivers/media/pci/cx18/cx18-queue.h return _cx18_enqueue(s, mdl, q, 0); /* FIFO */ q 67 drivers/media/pci/cx18/cx18-queue.h struct cx18_queue *q) q 69 drivers/media/pci/cx18/cx18-queue.h return _cx18_enqueue(s, mdl, q, 1); /* LIFO */ q 72 drivers/media/pci/cx18/cx18-queue.h void cx18_queue_init(struct cx18_queue *q); q 73 drivers/media/pci/cx18/cx18-queue.h struct cx18_mdl *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q); q 95 drivers/media/pci/cx18/cx18-streams.c static void cx18_dma_free(struct videobuf_queue *q, q 98 drivers/media/pci/cx18/cx18-streams.c videobuf_waiton(q, &buf->vb, 0, 0); q 103 drivers/media/pci/cx18/cx18-streams.c static int cx18_prepare_buffer(struct videobuf_queue *q, q 140 drivers/media/pci/cx18/cx18-streams.c cx18_dma_free(q, s, buf); q 162 drivers/media/pci/cx18/cx18-streams.c rc = videobuf_iolock(q, &buf->vb, NULL); q 170 drivers/media/pci/cx18/cx18-streams.c cx18_dma_free(q, s, buf); q 181 drivers/media/pci/cx18/cx18-streams.c static int buffer_setup(struct videobuf_queue *q, q 184 drivers/media/pci/cx18/cx18-streams.c struct cx18_stream *s = q->priv_data; q 194 drivers/media/pci/cx18/cx18-streams.c q->field = V4L2_FIELD_INTERLACED; q 195 drivers/media/pci/cx18/cx18-streams.c q->last = V4L2_FIELD_INTERLACED; q 200 drivers/media/pci/cx18/cx18-streams.c static int buffer_prepare(struct videobuf_queue *q, q 206 drivers/media/pci/cx18/cx18-streams.c struct cx18_stream *s = q->priv_data; q 209 drivers/media/pci/cx18/cx18-streams.c return cx18_prepare_buffer(q, s, buf, s->pixelformat, q 213 drivers/media/pci/cx18/cx18-streams.c static void buffer_release(struct videobuf_queue *q, q 218 drivers/media/pci/cx18/cx18-streams.c struct cx18_stream *s = q->priv_data; q 220 drivers/media/pci/cx18/cx18-streams.c cx18_dma_free(q, s, buf); q 223 drivers/media/pci/cx18/cx18-streams.c static void buffer_queue(struct videobuf_queue *q, struct videobuf_buffer *vb) q 227 drivers/media/pci/cx18/cx18-streams.c struct cx18_stream *s = q->priv_data; q 678 drivers/media/pci/cx18/cx18-streams.c struct cx18_queue *q; q 686 drivers/media/pci/cx18/cx18-streams.c q = cx18_enqueue(s, mdl, &s->q_busy); q 687 drivers/media/pci/cx18/cx18-streams.c if (q != &s->q_busy) q 688 drivers/media/pci/cx18/cx18-streams.c return q; /* The firmware has the max MDLs it can handle */ q 694 drivers/media/pci/cx18/cx18-streams.c return q; q 700 drivers/media/pci/cx18/cx18-streams.c struct cx18_queue *q; q 712 drivers/media/pci/cx18/cx18-streams.c q = _cx18_stream_put_mdl_fw(s, mdl); q 714 drivers/media/pci/cx18/cx18-streams.c && q == &s->q_busy); q 99 drivers/media/pci/cx18/cx18-vbi.c u8 *q = buf; q 116 drivers/media/pci/cx18/cx18-vbi.c memcpy(q, p + 4, line_size - 4 - hdr_size); q 117 drivers/media/pci/cx18/cx18-vbi.c q += line_size - 4 - hdr_size; q 119 drivers/media/pci/cx18/cx18-vbi.c memset(q, (int) *p, hdr_size); q 121 drivers/media/pci/cx18/cx18-vbi.c memcpy(q, p + 4, line_size - 4); q 122 drivers/media/pci/cx18/cx18-vbi.c q += line_size - 4; q 1123 drivers/media/pci/cx23885/cx23885-417.c static int queue_setup(struct vb2_queue *q, q 1127 drivers/media/pci/cx23885/cx23885-417.c struct cx23885_dev *dev = q->drv_priv; q 1167 drivers/media/pci/cx23885/cx23885-417.c static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count) q 1169 drivers/media/pci/cx23885/cx23885-417.c struct cx23885_dev *dev = q->drv_priv; q 1194 drivers/media/pci/cx23885/cx23885-417.c static void cx23885_stop_streaming(struct vb2_queue *q) q 1196 drivers/media/pci/cx23885/cx23885-417.c struct cx23885_dev *dev = q->drv_priv; q 1496 drivers/media/pci/cx23885/cx23885-417.c struct vb2_queue *q; q 1524 drivers/media/pci/cx23885/cx23885-417.c q = &dev->vb2_mpegq; q 1525 drivers/media/pci/cx23885/cx23885-417.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q 1526 drivers/media/pci/cx23885/cx23885-417.c q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ; q 1527 drivers/media/pci/cx23885/cx23885-417.c q->gfp_flags = GFP_DMA32; q 1528 drivers/media/pci/cx23885/cx23885-417.c q->min_buffers_needed = 2; q 1529 drivers/media/pci/cx23885/cx23885-417.c q->drv_priv = dev; q 1530 drivers/media/pci/cx23885/cx23885-417.c q->buf_struct_size = sizeof(struct cx23885_buffer); q 1531 drivers/media/pci/cx23885/cx23885-417.c q->ops = &cx23885_qops; q 1532 drivers/media/pci/cx23885/cx23885-417.c q->mem_ops = &vb2_dma_sg_memops; q 1533 drivers/media/pci/cx23885/cx23885-417.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1534 drivers/media/pci/cx23885/cx23885-417.c q->lock = &dev->lock; q 1535 drivers/media/pci/cx23885/cx23885-417.c q->dev = &dev->pci->dev; q 1537 drivers/media/pci/cx23885/cx23885-417.c err = vb2_queue_init(q); q 1542 drivers/media/pci/cx23885/cx23885-417.c dev->v4l_device->queue = q; q 425 drivers/media/pci/cx23885/cx23885-core.c struct cx23885_dmaqueue *q, u32 count) q 432 drivers/media/pci/cx23885/cx23885-core.c if (list_empty(&q->active)) q 434 drivers/media/pci/cx23885/cx23885-core.c buf = list_entry(q->active.next, q 438 drivers/media/pci/cx23885/cx23885-core.c buf->vb.sequence = q->count++; q 439 drivers/media/pci/cx23885/cx23885-core.c if (count != (q->count % 65536)) { q 441 drivers/media/pci/cx23885/cx23885-core.c buf->vb.vb2_buf.index, count, q->count); q 444 drivers/media/pci/cx23885/cx23885-core.c buf->vb.vb2_buf.index, count, q->count); q 450 drivers/media/pci/cx23885/cx23885-core.c count_delta = ((int)count - (int)(q->count % 65536)); q 1394 drivers/media/pci/cx23885/cx23885-core.c struct cx23885_dmaqueue *q, q 1449 drivers/media/pci/cx23885/cx23885-core.c q->count = 0; q 1645 drivers/media/pci/cx23885/cx23885-core.c struct cx23885_dmaqueue *q = &port->mpegq; q 1650 drivers/media/pci/cx23885/cx23885-core.c while (!list_empty(&q->active)) { q 1651 drivers/media/pci/cx23885/cx23885-core.c buf = list_entry(q->active.next, struct cx23885_buffer, q 88 drivers/media/pci/cx23885/cx23885-dvb.c static int queue_setup(struct vb2_queue *q, q 92 drivers/media/pci/cx23885/cx23885-dvb.c struct cx23885_tsport *port = q->drv_priv; q 150 drivers/media/pci/cx23885/cx23885-dvb.c static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count) q 152 drivers/media/pci/cx23885/cx23885-dvb.c struct cx23885_tsport *port = q->drv_priv; q 161 drivers/media/pci/cx23885/cx23885-dvb.c static void cx23885_stop_streaming(struct vb2_queue *q) q 163 drivers/media/pci/cx23885/cx23885-dvb.c struct cx23885_tsport *port = q->drv_priv; q 2631 drivers/media/pci/cx23885/cx23885-dvb.c struct vb2_queue *q; q 2653 drivers/media/pci/cx23885/cx23885-dvb.c q = &fe0->dvb.dvbq; q 2654 drivers/media/pci/cx23885/cx23885-dvb.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q 2655 drivers/media/pci/cx23885/cx23885-dvb.c q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ; q 2656 drivers/media/pci/cx23885/cx23885-dvb.c q->gfp_flags = GFP_DMA32; q 2657 drivers/media/pci/cx23885/cx23885-dvb.c q->min_buffers_needed = 2; q 2658 drivers/media/pci/cx23885/cx23885-dvb.c q->drv_priv = port; q 2659 drivers/media/pci/cx23885/cx23885-dvb.c q->buf_struct_size = sizeof(struct cx23885_buffer); q 2660 drivers/media/pci/cx23885/cx23885-dvb.c q->ops = &dvb_qops; q 2661 drivers/media/pci/cx23885/cx23885-dvb.c q->mem_ops = &vb2_dma_sg_memops; q 2662 drivers/media/pci/cx23885/cx23885-dvb.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 2663 drivers/media/pci/cx23885/cx23885-dvb.c q->lock = &dev->lock; q 2664 drivers/media/pci/cx23885/cx23885-dvb.c q->dev = &dev->pci->dev; q 2666 drivers/media/pci/cx23885/cx23885-dvb.c err = vb2_queue_init(q); q 87 drivers/media/pci/cx23885/cx23885-vbi.c struct cx23885_dmaqueue *q, q 99 drivers/media/pci/cx23885/cx23885-vbi.c q->count = 0; q 114 drivers/media/pci/cx23885/cx23885-vbi.c static int queue_setup(struct vb2_queue *q, q 118 drivers/media/pci/cx23885/cx23885-vbi.c struct cx23885_dev *dev = q->drv_priv; q 189 drivers/media/pci/cx23885/cx23885-vbi.c struct cx23885_dmaqueue *q = &dev->vbiq; q 197 drivers/media/pci/cx23885/cx23885-vbi.c if (list_empty(&q->active)) { q 199 drivers/media/pci/cx23885/cx23885-vbi.c list_add_tail(&buf->queue, &q->active); q 206 drivers/media/pci/cx23885/cx23885-vbi.c prev = list_entry(q->active.prev, struct cx23885_buffer, q 209 drivers/media/pci/cx23885/cx23885-vbi.c list_add_tail(&buf->queue, &q->active); q 217 drivers/media/pci/cx23885/cx23885-vbi.c static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count) q 219 drivers/media/pci/cx23885/cx23885-vbi.c struct cx23885_dev *dev = q->drv_priv; q 228 drivers/media/pci/cx23885/cx23885-vbi.c static void cx23885_stop_streaming(struct vb2_queue *q) q 230 drivers/media/pci/cx23885/cx23885-vbi.c struct cx23885_dev *dev = q->drv_priv; q 89 drivers/media/pci/cx23885/cx23885-video.c struct cx23885_dmaqueue *q, u32 count) q 93 drivers/media/pci/cx23885/cx23885-video.c if (list_empty(&q->active)) q 95 drivers/media/pci/cx23885/cx23885-video.c buf = list_entry(q->active.next, q 98 drivers/media/pci/cx23885/cx23885-video.c buf->vb.sequence = q->count++; q 101 drivers/media/pci/cx23885/cx23885-video.c buf->vb.vb2_buf.index, count, q->count); q 302 drivers/media/pci/cx23885/cx23885-video.c struct cx23885_dmaqueue *q, q 316 drivers/media/pci/cx23885/cx23885-video.c q->count = 0; q 329 drivers/media/pci/cx23885/cx23885-video.c static int queue_setup(struct vb2_queue *q, q 333 drivers/media/pci/cx23885/cx23885-video.c struct cx23885_dev *dev = q->drv_priv; q 458 drivers/media/pci/cx23885/cx23885-video.c struct cx23885_dmaqueue *q = &dev->vidq; q 468 drivers/media/pci/cx23885/cx23885-video.c if (list_empty(&q->active)) { q 469 drivers/media/pci/cx23885/cx23885-video.c list_add_tail(&buf->queue, &q->active); q 474 drivers/media/pci/cx23885/cx23885-video.c prev = list_entry(q->active.prev, struct cx23885_buffer, q 476 drivers/media/pci/cx23885/cx23885-video.c list_add_tail(&buf->queue, &q->active); q 484 drivers/media/pci/cx23885/cx23885-video.c static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count) q 486 drivers/media/pci/cx23885/cx23885-video.c struct cx23885_dev *dev = q->drv_priv; q 495 drivers/media/pci/cx23885/cx23885-video.c static void cx23885_stop_streaming(struct vb2_queue *q) q 497 drivers/media/pci/cx23885/cx23885-video.c struct cx23885_dev *dev = q->drv_priv; q 1184 drivers/media/pci/cx23885/cx23885-video.c struct vb2_queue *q; q 1265 drivers/media/pci/cx23885/cx23885-video.c q = &dev->vb2_vidq; q 1266 drivers/media/pci/cx23885/cx23885-video.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q 1267 drivers/media/pci/cx23885/cx23885-video.c q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ; q 1268 drivers/media/pci/cx23885/cx23885-video.c q->gfp_flags = GFP_DMA32; q 1269 drivers/media/pci/cx23885/cx23885-video.c q->min_buffers_needed = 2; q 1270 drivers/media/pci/cx23885/cx23885-video.c q->drv_priv = dev; q 1271 drivers/media/pci/cx23885/cx23885-video.c q->buf_struct_size = sizeof(struct cx23885_buffer); q 1272 drivers/media/pci/cx23885/cx23885-video.c q->ops = &cx23885_video_qops; q 1273 drivers/media/pci/cx23885/cx23885-video.c q->mem_ops = &vb2_dma_sg_memops; q 1274 drivers/media/pci/cx23885/cx23885-video.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1275 drivers/media/pci/cx23885/cx23885-video.c q->lock = &dev->lock; q 1276 drivers/media/pci/cx23885/cx23885-video.c q->dev = &dev->pci->dev; q 1278 drivers/media/pci/cx23885/cx23885-video.c err = vb2_queue_init(q); q 1282 drivers/media/pci/cx23885/cx23885-video.c q = &dev->vb2_vbiq; q 1283 drivers/media/pci/cx23885/cx23885-video.c q->type = V4L2_BUF_TYPE_VBI_CAPTURE; q 1284 drivers/media/pci/cx23885/cx23885-video.c q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ; q 1285 drivers/media/pci/cx23885/cx23885-video.c q->gfp_flags = GFP_DMA32; q 1286 drivers/media/pci/cx23885/cx23885-video.c q->min_buffers_needed = 2; q 1287 drivers/media/pci/cx23885/cx23885-video.c q->drv_priv = dev; q 1288 drivers/media/pci/cx23885/cx23885-video.c q->buf_struct_size = sizeof(struct cx23885_buffer); q 1289 drivers/media/pci/cx23885/cx23885-video.c q->ops = &cx23885_vbi_qops; q 1290 drivers/media/pci/cx23885/cx23885-video.c q->mem_ops = &vb2_dma_sg_memops; q 1291 drivers/media/pci/cx23885/cx23885-video.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1292 drivers/media/pci/cx23885/cx23885-video.c q->lock = &dev->lock; q 1293 drivers/media/pci/cx23885/cx23885-video.c q->dev = &dev->pci->dev; q 1295 drivers/media/pci/cx23885/cx23885-video.c err = vb2_queue_init(q); q 527 drivers/media/pci/cx23885/cx23885.h struct cx23885_dmaqueue *q, q 578 drivers/media/pci/cx23885/cx23885.h struct cx23885_dmaqueue *q, u32 count); q 62 drivers/media/pci/cx25821/cx25821-alsa.c struct cx25821_dmaqueue q; q 59 drivers/media/pci/cx25821/cx25821-video.c struct cx25821_dmaqueue *q, q 127 drivers/media/pci/cx25821/cx25821-video.c static int cx25821_queue_setup(struct vb2_queue *q, q 131 drivers/media/pci/cx25821/cx25821-video.c struct cx25821_channel *chan = q->drv_priv; q 243 drivers/media/pci/cx25821/cx25821-video.c struct cx25821_dmaqueue *q = &dev->channels[chan->id].dma_vidq; q 250 drivers/media/pci/cx25821/cx25821-video.c if (list_empty(&q->active)) { q 251 drivers/media/pci/cx25821/cx25821-video.c list_add_tail(&buf->queue, &q->active); q 254 drivers/media/pci/cx25821/cx25821-video.c prev = list_entry(q->active.prev, struct cx25821_buffer, q 256 drivers/media/pci/cx25821/cx25821-video.c list_add_tail(&buf->queue, &q->active); q 261 drivers/media/pci/cx25821/cx25821-video.c static int cx25821_start_streaming(struct vb2_queue *q, unsigned int count) q 263 drivers/media/pci/cx25821/cx25821-video.c struct cx25821_channel *chan = q->drv_priv; q 274 drivers/media/pci/cx25821/cx25821-video.c static void cx25821_stop_streaming(struct vb2_queue *q) q 276 drivers/media/pci/cx25821/cx25821-video.c struct cx25821_channel *chan = q->drv_priv; q 681 drivers/media/pci/cx25821/cx25821-video.c struct vb2_queue *q; q 726 drivers/media/pci/cx25821/cx25821-video.c q = &chan->vidq; q 728 drivers/media/pci/cx25821/cx25821-video.c q->type = is_output ? V4L2_BUF_TYPE_VIDEO_OUTPUT : q 730 drivers/media/pci/cx25821/cx25821-video.c q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; q 731 drivers/media/pci/cx25821/cx25821-video.c q->io_modes |= is_output ? VB2_WRITE : VB2_READ; q 732 drivers/media/pci/cx25821/cx25821-video.c q->gfp_flags = GFP_DMA32; q 733 drivers/media/pci/cx25821/cx25821-video.c q->min_buffers_needed = 2; q 734 drivers/media/pci/cx25821/cx25821-video.c q->drv_priv = chan; q 735 drivers/media/pci/cx25821/cx25821-video.c q->buf_struct_size = sizeof(struct cx25821_buffer); q 736 drivers/media/pci/cx25821/cx25821-video.c q->ops = &cx25821_video_qops; q 737 drivers/media/pci/cx25821/cx25821-video.c q->mem_ops = &vb2_dma_sg_memops; q 738 drivers/media/pci/cx25821/cx25821-video.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 739 drivers/media/pci/cx25821/cx25821-video.c q->lock = &dev->lock; q 740 drivers/media/pci/cx25821/cx25821-video.c q->dev = &dev->pci->dev; q 743 drivers/media/pci/cx25821/cx25821-video.c err = vb2_queue_init(q); q 756 drivers/media/pci/cx25821/cx25821-video.c vdev->queue = q; q 40 drivers/media/pci/cx25821/cx25821-video.h struct cx25821_dmaqueue *q, u32 count); q 43 drivers/media/pci/cx25821/cx25821-video.h struct cx25821_dmaqueue *q, q 55 drivers/media/pci/cx88/cx88-alsa.c struct cx88_dmaqueue q; q 658 drivers/media/pci/cx88/cx88-blackbird.c static int queue_setup(struct vb2_queue *q, q 662 drivers/media/pci/cx88/cx88-blackbird.c struct cx8802_dev *dev = q->drv_priv; q 701 drivers/media/pci/cx88/cx88-blackbird.c static int start_streaming(struct vb2_queue *q, unsigned int count) q 703 drivers/media/pci/cx88/cx88-blackbird.c struct cx8802_dev *dev = q->drv_priv; q 751 drivers/media/pci/cx88/cx88-blackbird.c static void stop_streaming(struct vb2_queue *q) q 753 drivers/media/pci/cx88/cx88-blackbird.c struct cx8802_dev *dev = q->drv_priv; q 1157 drivers/media/pci/cx88/cx88-blackbird.c struct vb2_queue *q; q 1194 drivers/media/pci/cx88/cx88-blackbird.c q = &dev->vb2_mpegq; q 1195 drivers/media/pci/cx88/cx88-blackbird.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q 1196 drivers/media/pci/cx88/cx88-blackbird.c q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ; q 1197 drivers/media/pci/cx88/cx88-blackbird.c q->gfp_flags = GFP_DMA32; q 1198 drivers/media/pci/cx88/cx88-blackbird.c q->min_buffers_needed = 2; q 1199 drivers/media/pci/cx88/cx88-blackbird.c q->drv_priv = dev; q 1200 drivers/media/pci/cx88/cx88-blackbird.c q->buf_struct_size = sizeof(struct cx88_buffer); q 1201 drivers/media/pci/cx88/cx88-blackbird.c q->ops = &blackbird_qops; q 1202 drivers/media/pci/cx88/cx88-blackbird.c q->mem_ops = &vb2_dma_sg_memops; q 1203 drivers/media/pci/cx88/cx88-blackbird.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1204 drivers/media/pci/cx88/cx88-blackbird.c q->lock = &core->lock; q 1205 drivers/media/pci/cx88/cx88-blackbird.c q->dev = &dev->pci->dev; q 1207 drivers/media/pci/cx88/cx88-blackbird.c err = vb2_queue_init(q); q 519 drivers/media/pci/cx88/cx88-core.c struct cx88_dmaqueue *q, u32 count) q 523 drivers/media/pci/cx88/cx88-core.c buf = list_entry(q->active.next, q 527 drivers/media/pci/cx88/cx88-core.c buf->vb.sequence = q->count++; q 75 drivers/media/pci/cx88/cx88-dvb.c static int queue_setup(struct vb2_queue *q, q 79 drivers/media/pci/cx88/cx88-dvb.c struct cx8802_dev *dev = q->drv_priv; q 119 drivers/media/pci/cx88/cx88-dvb.c static int start_streaming(struct vb2_queue *q, unsigned int count) q 121 drivers/media/pci/cx88/cx88-dvb.c struct cx8802_dev *dev = q->drv_priv; q 130 drivers/media/pci/cx88/cx88-dvb.c static void stop_streaming(struct vb2_queue *q) q 132 drivers/media/pci/cx88/cx88-dvb.c struct cx8802_dev *dev = q->drv_priv; q 1764 drivers/media/pci/cx88/cx88-dvb.c struct vb2_queue *q; q 1773 drivers/media/pci/cx88/cx88-dvb.c q = &fe->dvb.dvbq; q 1774 drivers/media/pci/cx88/cx88-dvb.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q 1775 drivers/media/pci/cx88/cx88-dvb.c q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ; q 1776 drivers/media/pci/cx88/cx88-dvb.c q->gfp_flags = GFP_DMA32; q 1777 drivers/media/pci/cx88/cx88-dvb.c q->min_buffers_needed = 2; q 1778 drivers/media/pci/cx88/cx88-dvb.c q->drv_priv = dev; q 1779 drivers/media/pci/cx88/cx88-dvb.c q->buf_struct_size = sizeof(struct cx88_buffer); q 1780 drivers/media/pci/cx88/cx88-dvb.c q->ops = &dvb_qops; q 1781 drivers/media/pci/cx88/cx88-dvb.c q->mem_ops = &vb2_dma_sg_memops; q 1782 drivers/media/pci/cx88/cx88-dvb.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1783 drivers/media/pci/cx88/cx88-dvb.c q->lock = &core->lock; q 1784 drivers/media/pci/cx88/cx88-dvb.c q->dev = &dev->pci->dev; q 1786 drivers/media/pci/cx88/cx88-dvb.c err = vb2_queue_init(q); q 73 drivers/media/pci/cx88/cx88-mpeg.c struct cx88_dmaqueue *q, q 163 drivers/media/pci/cx88/cx88-mpeg.c q->count = 0; q 196 drivers/media/pci/cx88/cx88-mpeg.c struct cx88_dmaqueue *q) q 201 drivers/media/pci/cx88/cx88-mpeg.c if (list_empty(&q->active)) q 204 drivers/media/pci/cx88/cx88-mpeg.c buf = list_entry(q->active.next, struct cx88_buffer, list); q 207 drivers/media/pci/cx88/cx88-mpeg.c cx8802_start_dma(dev, q, buf); q 213 drivers/media/pci/cx88/cx88-mpeg.c int cx8802_buf_prepare(struct vb2_queue *q, struct cx8802_dev *dev, q 271 drivers/media/pci/cx88/cx88-mpeg.c struct cx88_dmaqueue *q = &dev->mpegq; q 276 drivers/media/pci/cx88/cx88-mpeg.c while (!list_empty(&q->active)) { q 277 drivers/media/pci/cx88/cx88-mpeg.c buf = list_entry(q->active.next, struct cx88_buffer, list); q 52 drivers/media/pci/cx88/cx88-vbi.c struct cx88_dmaqueue *q, q 67 drivers/media/pci/cx88/cx88-vbi.c q->count = 0; q 99 drivers/media/pci/cx88/cx88-vbi.c struct cx88_dmaqueue *q) q 103 drivers/media/pci/cx88/cx88-vbi.c if (list_empty(&q->active)) q 106 drivers/media/pci/cx88/cx88-vbi.c buf = list_entry(q->active.next, struct cx88_buffer, list); q 109 drivers/media/pci/cx88/cx88-vbi.c cx8800_start_vbi_dma(dev, q, buf); q 115 drivers/media/pci/cx88/cx88-vbi.c static int queue_setup(struct vb2_queue *q, q 119 drivers/media/pci/cx88/cx88-vbi.c struct cx8800_dev *dev = q->drv_priv; q 172 drivers/media/pci/cx88/cx88-vbi.c struct cx88_dmaqueue *q = &dev->vbiq; q 179 drivers/media/pci/cx88/cx88-vbi.c if (list_empty(&q->active)) { q 180 drivers/media/pci/cx88/cx88-vbi.c list_add_tail(&buf->list, &q->active); q 186 drivers/media/pci/cx88/cx88-vbi.c prev = list_entry(q->active.prev, struct cx88_buffer, list); q 187 drivers/media/pci/cx88/cx88-vbi.c list_add_tail(&buf->list, &q->active); q 194 drivers/media/pci/cx88/cx88-vbi.c static int start_streaming(struct vb2_queue *q, unsigned int count) q 196 drivers/media/pci/cx88/cx88-vbi.c struct cx8800_dev *dev = q->drv_priv; q 205 drivers/media/pci/cx88/cx88-vbi.c static void stop_streaming(struct vb2_queue *q) q 207 drivers/media/pci/cx88/cx88-vbi.c struct cx8800_dev *dev = q->drv_priv; q 350 drivers/media/pci/cx88/cx88-video.c struct cx88_dmaqueue *q, q 363 drivers/media/pci/cx88/cx88-video.c q->count = 0; q 406 drivers/media/pci/cx88/cx88-video.c struct cx88_dmaqueue *q) q 410 drivers/media/pci/cx88/cx88-video.c if (!list_empty(&q->active)) { q 411 drivers/media/pci/cx88/cx88-video.c buf = list_entry(q->active.next, struct cx88_buffer, list); q 414 drivers/media/pci/cx88/cx88-video.c start_video_dma(dev, q, buf); q 422 drivers/media/pci/cx88/cx88-video.c static int queue_setup(struct vb2_queue *q, q 426 drivers/media/pci/cx88/cx88-video.c struct cx8800_dev *dev = q->drv_priv; q 507 drivers/media/pci/cx88/cx88-video.c struct cx88_dmaqueue *q = &dev->vidq; q 514 drivers/media/pci/cx88/cx88-video.c if (list_empty(&q->active)) { q 515 drivers/media/pci/cx88/cx88-video.c list_add_tail(&buf->list, &q->active); q 521 drivers/media/pci/cx88/cx88-video.c prev = list_entry(q->active.prev, struct cx88_buffer, list); q 522 drivers/media/pci/cx88/cx88-video.c list_add_tail(&buf->list, &q->active); q 529 drivers/media/pci/cx88/cx88-video.c static int start_streaming(struct vb2_queue *q, unsigned int count) q 531 drivers/media/pci/cx88/cx88-video.c struct cx8800_dev *dev = q->drv_priv; q 540 drivers/media/pci/cx88/cx88-video.c static void stop_streaming(struct vb2_queue *q) q 542 drivers/media/pci/cx88/cx88-video.c struct cx8800_dev *dev = q->drv_priv; q 1263 drivers/media/pci/cx88/cx88-video.c struct vb2_queue *q; q 1410 drivers/media/pci/cx88/cx88-video.c q = &dev->vb2_vidq; q 1411 drivers/media/pci/cx88/cx88-video.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q 1412 drivers/media/pci/cx88/cx88-video.c q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ; q 1413 drivers/media/pci/cx88/cx88-video.c q->gfp_flags = GFP_DMA32; q 1414 drivers/media/pci/cx88/cx88-video.c q->min_buffers_needed = 2; q 1415 drivers/media/pci/cx88/cx88-video.c q->drv_priv = dev; q 1416 drivers/media/pci/cx88/cx88-video.c q->buf_struct_size = sizeof(struct cx88_buffer); q 1417 drivers/media/pci/cx88/cx88-video.c q->ops = &cx8800_video_qops; q 1418 drivers/media/pci/cx88/cx88-video.c q->mem_ops = &vb2_dma_sg_memops; q 1419 drivers/media/pci/cx88/cx88-video.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1420 drivers/media/pci/cx88/cx88-video.c q->lock = &core->lock; q 1421 drivers/media/pci/cx88/cx88-video.c q->dev = &dev->pci->dev; q 1423 drivers/media/pci/cx88/cx88-video.c err = vb2_queue_init(q); q 1427 drivers/media/pci/cx88/cx88-video.c q = &dev->vb2_vbiq; q 1428 drivers/media/pci/cx88/cx88-video.c q->type = V4L2_BUF_TYPE_VBI_CAPTURE; q 1429 drivers/media/pci/cx88/cx88-video.c q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ; q 1430 drivers/media/pci/cx88/cx88-video.c q->gfp_flags = GFP_DMA32; q 1431 drivers/media/pci/cx88/cx88-video.c q->min_buffers_needed = 2; q 1432 drivers/media/pci/cx88/cx88-video.c q->drv_priv = dev; q 1433 drivers/media/pci/cx88/cx88-video.c q->buf_struct_size = sizeof(struct cx88_buffer); q 1434 drivers/media/pci/cx88/cx88-video.c q->ops = &cx8800_vbi_qops; q 1435 drivers/media/pci/cx88/cx88-video.c q->mem_ops = &vb2_dma_sg_memops; q 1436 drivers/media/pci/cx88/cx88-video.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1437 drivers/media/pci/cx88/cx88-video.c q->lock = &core->lock; q 1438 drivers/media/pci/cx88/cx88-video.c q->dev = &dev->pci->dev; q 1440 drivers/media/pci/cx88/cx88-video.c err = vb2_queue_init(q); q 616 drivers/media/pci/cx88/cx88.h struct cx88_dmaqueue *q, u32 count); q 662 drivers/media/pci/cx88/cx88.h int cx8800_restart_vbi_queue(struct cx8800_dev *dev, struct cx88_dmaqueue *q); q 714 drivers/media/pci/cx88/cx88.h int cx8802_buf_prepare(struct vb2_queue *q, struct cx8802_dev *dev, q 719 drivers/media/pci/cx88/cx88.h struct cx88_dmaqueue *q, q 215 drivers/media/pci/ddbridge/ddbridge-mci.h s16 q; q 148 drivers/media/pci/dt3155/dt3155.c static int dt3155_start_streaming(struct vb2_queue *q, unsigned count) q 150 drivers/media/pci/dt3155/dt3155.c struct dt3155_priv *pd = vb2_get_drv_priv(q); q 176 drivers/media/pci/dt3155/dt3155.c static void dt3155_stop_streaming(struct vb2_queue *q) q 178 drivers/media/pci/dt3155/dt3155.c struct dt3155_priv *pd = vb2_get_drv_priv(q); q 217 drivers/media/pci/intel/ipu3/ipu3-cio2.c static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q) q 221 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr, q 223 drivers/media/pci/intel/ipu3/ipu3-cio2.c if (!q->fbpt) q 229 drivers/media/pci/intel/ipu3/ipu3-cio2.c static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev) q 231 drivers/media/pci/intel/ipu3/ipu3-cio2.c dma_free_coherent(dev, CIO2_FBPT_SIZE, q->fbpt, q->fbpt_bus_addr); q 294 drivers/media/pci/intel/ipu3/ipu3-cio2.c static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q, q 303 drivers/media/pci/intel/ipu3/ipu3-cio2.c if (!q->sensor) q 306 drivers/media/pci/intel/ipu3/ipu3-cio2.c link_freq = v4l2_ctrl_find(q->sensor->ctrl_handler, V4L2_CID_LINK_FREQ); q 313 drivers/media/pci/intel/ipu3/ipu3-cio2.c r = v4l2_querymenu(q->sensor->ctrl_handler, &qm); q 350 drivers/media/pci/intel/ipu3/ipu3-cio2.c static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q) q 360 drivers/media/pci/intel/ipu3/ipu3-cio2.c u8 lanes, csi2bus = q->csi2.port; q 365 drivers/media/pci/intel/ipu3/ipu3-cio2.c fmt = cio2_find_format(NULL, &q->subdev_fmt.code); q 369 drivers/media/pci/intel/ipu3/ipu3-cio2.c lanes = q->csi2.lanes; q 371 drivers/media/pci/intel/ipu3/ipu3-cio2.c r = cio2_csi2_calc_timing(cio2, q, &timing); q 375 drivers/media/pci/intel/ipu3/ipu3-cio2.c writel(timing.clk_termen, q->csi_rx_base + q 377 drivers/media/pci/intel/ipu3/ipu3-cio2.c writel(timing.clk_settle, q->csi_rx_base + q 381 drivers/media/pci/intel/ipu3/ipu3-cio2.c writel(timing.dat_termen, q->csi_rx_base + q 383 drivers/media/pci/intel/ipu3/ipu3-cio2.c writel(timing.dat_settle, q->csi_rx_base + q 405 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_HS); q 407 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_LP); q 414 drivers/media/pci/intel/ipu3/ipu3-cio2.c writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_SP_LUT_ENTRY(i)); q 419 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(i)); q 421 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->csi_rx_base + CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD); q 424 drivers/media/pci/intel/ipu3/ipu3-cio2.c writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK); q 425 drivers/media/pci/intel/ipu3/ipu3-cio2.c writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE); q 426 drivers/media/pci/intel/ipu3/ipu3-cio2.c writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_EDGE); q 427 drivers/media/pci/intel/ipu3/ipu3-cio2.c writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE); q 440 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(ENTRY)); q 441 drivers/media/pci/intel/ipu3/ipu3-cio2.c writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_COMP_FORMAT(sensor_vc)); q 442 drivers/media/pci/intel/ipu3/ipu3-cio2.c writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_FORCE_RAW8); q 445 drivers/media/pci/intel/ipu3/ipu3-cio2.c writel(lanes, q->csi_rx_base + CIO2_REG_CSIRX_NOF_ENABLED_LANES); q 478 drivers/media/pci/intel/ipu3/ipu3-cio2.c writel(q->fbpt_bus_addr >> PAGE_SHIFT, q 498 drivers/media/pci/intel/ipu3/ipu3-cio2.c base + CIO2_REG_PXM_FRF_CFG(q->csi2.port)); q 501 drivers/media/pci/intel/ipu3/ipu3-cio2.c writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_CLEAR); q 507 drivers/media/pci/intel/ipu3/ipu3-cio2.c writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE); q 508 drivers/media/pci/intel/ipu3/ipu3-cio2.c writel(1, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE); q 513 drivers/media/pci/intel/ipu3/ipu3-cio2.c static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q) q 519 drivers/media/pci/intel/ipu3/ipu3-cio2.c writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK); q 520 drivers/media/pci/intel/ipu3/ipu3-cio2.c writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE); q 521 drivers/media/pci/intel/ipu3/ipu3-cio2.c writel(0, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE); q 522 drivers/media/pci/intel/ipu3/ipu3-cio2.c writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE); q 547 drivers/media/pci/intel/ipu3/ipu3-cio2.c struct cio2_queue *q = cio2->cur_queue; q 559 drivers/media/pci/intel/ipu3/ipu3-cio2.c &q->fbpt[q->bufs_first * CIO2_MAX_LOPS]; q 565 drivers/media/pci/intel/ipu3/ipu3-cio2.c b = q->bufs[q->bufs_first]; q 569 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->bufs[q->bufs_first] = NULL; q 570 drivers/media/pci/intel/ipu3/ipu3-cio2.c atomic_dec(&q->bufs_queued); q 576 drivers/media/pci/intel/ipu3/ipu3-cio2.c b->vbb.sequence = atomic_read(&q->frame_sequence); q 583 drivers/media/pci/intel/ipu3/ipu3-cio2.c atomic_inc(&q->frame_sequence); q 585 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS; q 595 drivers/media/pci/intel/ipu3/ipu3-cio2.c static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q) q 604 drivers/media/pci/intel/ipu3/ipu3-cio2.c .u.frame_sync.frame_sequence = atomic_read(&q->frame_sequence), q 607 drivers/media/pci/intel/ipu3/ipu3-cio2.c v4l2_event_queue(q->subdev.devnode, &event); q 792 drivers/media/pci/intel/ipu3/ipu3-cio2.c static void cio2_vb2_return_all_buffers(struct cio2_queue *q, q 798 drivers/media/pci/intel/ipu3/ipu3-cio2.c if (q->bufs[i]) { q 799 drivers/media/pci/intel/ipu3/ipu3-cio2.c atomic_dec(&q->bufs_queued); q 800 drivers/media/pci/intel/ipu3/ipu3-cio2.c vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf, q 813 drivers/media/pci/intel/ipu3/ipu3-cio2.c struct cio2_queue *q = vb2q_to_cio2_queue(vq); q 816 drivers/media/pci/intel/ipu3/ipu3-cio2.c *num_planes = q->format.num_planes; q 819 drivers/media/pci/intel/ipu3/ipu3-cio2.c sizes[i] = q->format.plane_fmt[i].sizeimage; q 827 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->bufs[i] = NULL; q 828 drivers/media/pci/intel/ipu3/ipu3-cio2.c cio2_fbpt_entry_init_dummy(cio2, &q->fbpt[i * CIO2_MAX_LOPS]); q 830 drivers/media/pci/intel/ipu3/ipu3-cio2.c atomic_set(&q->bufs_queued, 0); q 831 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->bufs_first = 0; q 832 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->bufs_next = 0; q 900 drivers/media/pci/intel/ipu3/ipu3-cio2.c struct cio2_queue *q = q 906 drivers/media/pci/intel/ipu3/ipu3-cio2.c unsigned int i, j, next = q->bufs_next; q 907 drivers/media/pci/intel/ipu3/ipu3-cio2.c int bufs_queued = atomic_inc_return(&q->bufs_queued); q 949 drivers/media/pci/intel/ipu3/ipu3-cio2.c if (!q->bufs[next]) { q 950 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->bufs[next] = b; q 951 drivers/media/pci/intel/ipu3/ipu3-cio2.c entry = &q->fbpt[next * CIO2_MAX_LOPS]; q 954 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->bufs_next = (next + 1) % CIO2_MAX_BUFFERS; q 957 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->format.plane_fmt[j].sizeimage); q 967 drivers/media/pci/intel/ipu3/ipu3-cio2.c atomic_dec(&q->bufs_queued); q 989 drivers/media/pci/intel/ipu3/ipu3-cio2.c struct cio2_queue *q = vb2q_to_cio2_queue(vq); q 993 drivers/media/pci/intel/ipu3/ipu3-cio2.c cio2->cur_queue = q; q 994 drivers/media/pci/intel/ipu3/ipu3-cio2.c atomic_set(&q->frame_sequence, 0); q 1003 drivers/media/pci/intel/ipu3/ipu3-cio2.c r = media_pipeline_start(&q->vdev.entity, &q->pipe); q 1007 drivers/media/pci/intel/ipu3/ipu3-cio2.c r = cio2_hw_init(cio2, q); q 1012 drivers/media/pci/intel/ipu3/ipu3-cio2.c r = v4l2_subdev_call(q->sensor, video, s_stream, 1); q 1021 drivers/media/pci/intel/ipu3/ipu3-cio2.c cio2_hw_exit(cio2, q); q 1023 drivers/media/pci/intel/ipu3/ipu3-cio2.c media_pipeline_stop(&q->vdev.entity); q 1026 drivers/media/pci/intel/ipu3/ipu3-cio2.c cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED); q 1034 drivers/media/pci/intel/ipu3/ipu3-cio2.c struct cio2_queue *q = vb2q_to_cio2_queue(vq); q 1037 drivers/media/pci/intel/ipu3/ipu3-cio2.c if (v4l2_subdev_call(q->sensor, video, s_stream, 0)) q 1041 drivers/media/pci/intel/ipu3/ipu3-cio2.c cio2_hw_exit(cio2, q); q 1043 drivers/media/pci/intel/ipu3/ipu3-cio2.c cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR); q 1044 drivers/media/pci/intel/ipu3/ipu3-cio2.c media_pipeline_stop(&q->vdev.entity); q 1089 drivers/media/pci/intel/ipu3/ipu3-cio2.c struct cio2_queue *q = file_to_cio2_queue(file); q 1091 drivers/media/pci/intel/ipu3/ipu3-cio2.c f->fmt.pix_mp = q->format; q 1132 drivers/media/pci/intel/ipu3/ipu3-cio2.c struct cio2_queue *q = file_to_cio2_queue(file); q 1135 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->format = f->fmt.pix_mp; q 1245 drivers/media/pci/intel/ipu3/ipu3-cio2.c struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev); q 1262 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->subdev_fmt.colorspace = format.format.colorspace; q 1263 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->subdev_fmt.ycbcr_enc = format.format.ycbcr_enc; q 1264 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->subdev_fmt.quantization = format.format.quantization; q 1265 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->subdev_fmt.xfer_func = format.format.xfer_func; q 1268 drivers/media/pci/intel/ipu3/ipu3-cio2.c fmt->format = q->subdev_fmt; q 1284 drivers/media/pci/intel/ipu3/ipu3-cio2.c struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev); q 1297 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->subdev_fmt.width = fmt->format.width; q 1298 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->subdev_fmt.height = fmt->format.height; q 1299 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->subdev_fmt.code = fmt->format.code; q 1300 drivers/media/pci/intel/ipu3/ipu3-cio2.c fmt->format = q->subdev_fmt; q 1336 drivers/media/pci/intel/ipu3/ipu3-cio2.c struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev); q 1351 drivers/media/pci/intel/ipu3/ipu3-cio2.c if (source_fmt.format.width != q->format.width || q 1352 drivers/media/pci/intel/ipu3/ipu3-cio2.c source_fmt.format.height != q->format.height) { q 1355 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->format.width, q->format.height, q 1360 drivers/media/pci/intel/ipu3/ipu3-cio2.c if (!cio2_find_format(&q->format.pixelformat, &source_fmt.format.code)) q 1403 drivers/media/pci/intel/ipu3/ipu3-cio2.c struct cio2_queue *q; q 1408 drivers/media/pci/intel/ipu3/ipu3-cio2.c q = &cio2->queue[s_asd->csi2.port]; q 1410 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->csi2 = s_asd->csi2; q 1411 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->sensor = sd; q 1412 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->csi_rx_base = cio2->base + CIO2_REG_PIPE_BASE(q->csi2.port); q 1437 drivers/media/pci/intel/ipu3/ipu3-cio2.c struct cio2_queue *q; q 1443 drivers/media/pci/intel/ipu3/ipu3-cio2.c q = &cio2->queue[s_asd->csi2.port]; q 1445 drivers/media/pci/intel/ipu3/ipu3-cio2.c for (pad = 0; pad < q->sensor->entity.num_pads; pad++) q 1446 drivers/media/pci/intel/ipu3/ipu3-cio2.c if (q->sensor->entity.pads[pad].flags & q 1450 drivers/media/pci/intel/ipu3/ipu3-cio2.c if (pad == q->sensor->entity.num_pads) { q 1453 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->sensor->name); q 1458 drivers/media/pci/intel/ipu3/ipu3-cio2.c &q->sensor->entity, pad, q 1459 drivers/media/pci/intel/ipu3/ipu3-cio2.c &q->subdev.entity, CIO2_PAD_SINK, q 1464 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->sensor->name); q 1547 drivers/media/pci/intel/ipu3/ipu3-cio2.c static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q) q 1553 drivers/media/pci/intel/ipu3/ipu3-cio2.c struct video_device *vdev = &q->vdev; q 1554 drivers/media/pci/intel/ipu3/ipu3-cio2.c struct vb2_queue *vbq = &q->vbq; q 1555 drivers/media/pci/intel/ipu3/ipu3-cio2.c struct v4l2_subdev *subdev = &q->subdev; q 1560 drivers/media/pci/intel/ipu3/ipu3-cio2.c mutex_init(&q->lock); q 1563 drivers/media/pci/intel/ipu3/ipu3-cio2.c fmt = &q->subdev_fmt; q 1569 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->format.width = default_width; q 1570 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->format.height = default_height; q 1571 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->format.pixelformat = dflt_fmt.fourcc; q 1572 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->format.colorspace = V4L2_COLORSPACE_RAW; q 1573 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->format.field = V4L2_FIELD_NONE; q 1574 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->format.num_planes = 1; q 1575 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->format.plane_fmt[0].bytesperline = q 1576 drivers/media/pci/intel/ipu3/ipu3-cio2.c cio2_bytesperline(q->format.width); q 1577 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->format.plane_fmt[0].sizeimage = q->format.plane_fmt[0].bytesperline * q 1578 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->format.height; q 1581 drivers/media/pci/intel/ipu3/ipu3-cio2.c r = cio2_fbpt_init(cio2, q); q 1586 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->subdev_pads[CIO2_PAD_SINK].flags = MEDIA_PAD_FL_SINK | q 1588 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->subdev_pads[CIO2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE; q 1591 drivers/media/pci/intel/ipu3/ipu3-cio2.c r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads); q 1598 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->vdev_pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; q 1600 drivers/media/pci/intel/ipu3/ipu3-cio2.c r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad); q 1612 drivers/media/pci/intel/ipu3/ipu3-cio2.c CIO2_ENTITY_NAME " %td", q - cio2->queue); q 1631 drivers/media/pci/intel/ipu3/ipu3-cio2.c vbq->lock = &q->lock; q 1641 drivers/media/pci/intel/ipu3/ipu3-cio2.c "%s %td", CIO2_NAME, q - cio2->queue); q 1647 drivers/media/pci/intel/ipu3/ipu3-cio2.c vdev->queue = &q->vbq; q 1667 drivers/media/pci/intel/ipu3/ipu3-cio2.c video_unregister_device(&q->vdev); q 1677 drivers/media/pci/intel/ipu3/ipu3-cio2.c cio2_fbpt_exit(q, &cio2->pci_dev->dev); q 1679 drivers/media/pci/intel/ipu3/ipu3-cio2.c mutex_destroy(&q->lock); q 1684 drivers/media/pci/intel/ipu3/ipu3-cio2.c static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q) q 1686 drivers/media/pci/intel/ipu3/ipu3-cio2.c video_unregister_device(&q->vdev); q 1687 drivers/media/pci/intel/ipu3/ipu3-cio2.c media_entity_cleanup(&q->vdev.entity); q 1688 drivers/media/pci/intel/ipu3/ipu3-cio2.c vb2_queue_release(&q->vbq); q 1689 drivers/media/pci/intel/ipu3/ipu3-cio2.c v4l2_device_unregister_subdev(&q->subdev); q 1690 drivers/media/pci/intel/ipu3/ipu3-cio2.c media_entity_cleanup(&q->subdev.entity); q 1691 drivers/media/pci/intel/ipu3/ipu3-cio2.c cio2_fbpt_exit(q, &cio2->pci_dev->dev); q 1692 drivers/media/pci/intel/ipu3/ipu3-cio2.c mutex_destroy(&q->lock); q 1955 drivers/media/pci/intel/ipu3/ipu3-cio2.c static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q) q 1959 drivers/media/pci/intel/ipu3/ipu3-cio2.c for (i = 0, j = q->bufs_first; i < CIO2_MAX_BUFFERS; q 1961 drivers/media/pci/intel/ipu3/ipu3-cio2.c if (q->bufs[j]) q 1968 drivers/media/pci/intel/ipu3/ipu3-cio2.c arrange(q->fbpt, sizeof(struct cio2_fbpt_entry) * CIO2_MAX_LOPS, q 1970 drivers/media/pci/intel/ipu3/ipu3-cio2.c arrange(q->bufs, sizeof(struct cio2_buffer *), q 1982 drivers/media/pci/intel/ipu3/ipu3-cio2.c cio2_fbpt_entry_enable(cio2, q->fbpt + i * CIO2_MAX_LOPS); q 1989 drivers/media/pci/intel/ipu3/ipu3-cio2.c struct cio2_queue *q = cio2->cur_queue; q 1996 drivers/media/pci/intel/ipu3/ipu3-cio2.c cio2_hw_exit(cio2, q); q 2005 drivers/media/pci/intel/ipu3/ipu3-cio2.c cio2_fbpt_rearrange(cio2, q); q 2006 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->bufs_first = 0; q 2007 drivers/media/pci/intel/ipu3/ipu3-cio2.c q->bufs_next = 0; q 2016 drivers/media/pci/intel/ipu3/ipu3-cio2.c struct cio2_queue *q = cio2->cur_queue; q 2029 drivers/media/pci/intel/ipu3/ipu3-cio2.c r = cio2_hw_init(cio2, q); q 297 drivers/media/pci/ivtv/ivtv-fileops.c const u8 *q; q 301 drivers/media/pci/ivtv/ivtv-fileops.c while (start + len > p && (q = memchr(p, 0, start + len - p))) { q 302 drivers/media/pci/ivtv/ivtv-fileops.c p = q + 1; q 303 drivers/media/pci/ivtv/ivtv-fileops.c if ((char *)q + 15 >= buf->buf + buf->bytesused || q 304 drivers/media/pci/ivtv/ivtv-fileops.c q[1] != 0 || q[2] != 1 || q[3] != ch) { q 308 drivers/media/pci/ivtv/ivtv-fileops.c if ((q[6] & 0xc0) != 0x80) q 310 drivers/media/pci/ivtv/ivtv-fileops.c if (((q[7] & 0xc0) == 0x80 && (q[9] & 0xf0) == 0x20) || q 311 drivers/media/pci/ivtv/ivtv-fileops.c ((q[7] & 0xc0) == 0xc0 && (q[9] & 0xf0) == 0x30)) { q 314 drivers/media/pci/ivtv/ivtv-fileops.c p = q + 9; q 318 drivers/media/pci/ivtv/ivtv-fileops.c stuffing = q[13] & 7; q 321 drivers/media/pci/ivtv/ivtv-fileops.c if (q[14 + i] != 0xff) q 323 drivers/media/pci/ivtv/ivtv-fileops.c if (i == stuffing && (q[4] & 0xc4) == 0x44 && (q[12] & 3) == 3 && q 324 drivers/media/pci/ivtv/ivtv-fileops.c q[14 + stuffing] == 0 && q[15 + stuffing] == 0 && q 325 drivers/media/pci/ivtv/ivtv-fileops.c q[16 + stuffing] == 1) { q 327 drivers/media/pci/ivtv/ivtv-fileops.c len = (char *)q - start; q 543 drivers/media/pci/ivtv/ivtv-fileops.c struct ivtv_queue q; q 576 drivers/media/pci/ivtv/ivtv-fileops.c ivtv_queue_init(&q); q 612 drivers/media/pci/ivtv/ivtv-fileops.c while (q.length - q.bytesused < count && (buf = ivtv_dequeue(s, &s->q_io))) q 613 drivers/media/pci/ivtv/ivtv-fileops.c ivtv_enqueue(s, buf, &q); q 614 drivers/media/pci/ivtv/ivtv-fileops.c while (q.length - q.bytesused < count && (buf = ivtv_dequeue(s, &s->q_free))) { q 615 drivers/media/pci/ivtv/ivtv-fileops.c ivtv_enqueue(s, buf, &q); q 617 drivers/media/pci/ivtv/ivtv-fileops.c if (q.buffers) q 635 drivers/media/pci/ivtv/ivtv-fileops.c while ((buf = ivtv_dequeue(s, &q))) { q 647 drivers/media/pci/ivtv/ivtv-fileops.c ivtv_queue_move(s, &q, NULL, &s->q_free, 0); q 32 drivers/media/pci/ivtv/ivtv-queue.c void ivtv_queue_init(struct ivtv_queue *q) q 34 drivers/media/pci/ivtv/ivtv-queue.c INIT_LIST_HEAD(&q->list); q 35 drivers/media/pci/ivtv/ivtv-queue.c q->buffers = 0; q 36 drivers/media/pci/ivtv/ivtv-queue.c q->length = 0; q 37 drivers/media/pci/ivtv/ivtv-queue.c q->bytesused = 0; q 40 drivers/media/pci/ivtv/ivtv-queue.c void ivtv_enqueue(struct ivtv_stream *s, struct ivtv_buffer *buf, struct ivtv_queue *q) q 45 drivers/media/pci/ivtv/ivtv-queue.c if (q == &s->q_free) { q 52 drivers/media/pci/ivtv/ivtv-queue.c list_add_tail(&buf->list, &q->list); q 53 drivers/media/pci/ivtv/ivtv-queue.c q->buffers++; q 54 drivers/media/pci/ivtv/ivtv-queue.c q->length += s->buf_size; q 55 drivers/media/pci/ivtv/ivtv-queue.c q->bytesused += buf->bytesused - buf->readpos; q 59 drivers/media/pci/ivtv/ivtv-queue.c struct ivtv_buffer *ivtv_dequeue(struct ivtv_stream *s, struct ivtv_queue *q) q 65 drivers/media/pci/ivtv/ivtv-queue.c if (!list_empty(&q->list)) { q 66 drivers/media/pci/ivtv/ivtv-queue.c buf = list_entry(q->list.next, struct ivtv_buffer, list); q 67 drivers/media/pci/ivtv/ivtv-queue.c list_del_init(q->list.next); q 68 drivers/media/pci/ivtv/ivtv-queue.c q->buffers--; q 69 drivers/media/pci/ivtv/ivtv-queue.c q->length -= s->buf_size; q 70 drivers/media/pci/ivtv/ivtv-queue.c q->bytesused -= buf->bytesused - buf->readpos; q 59 drivers/media/pci/ivtv/ivtv-queue.h void ivtv_queue_init(struct ivtv_queue *q); q 60 drivers/media/pci/ivtv/ivtv-queue.h void ivtv_enqueue(struct ivtv_stream *s, struct ivtv_buffer *buf, struct ivtv_queue *q); q 61 drivers/media/pci/ivtv/ivtv-queue.h struct ivtv_buffer *ivtv_dequeue(struct ivtv_stream *s, struct ivtv_queue *q); q 305 drivers/media/pci/ivtv/ivtv-vbi.c u8 *q = buf; q 316 drivers/media/pci/ivtv/ivtv-vbi.c memcpy(q, p + 4, line_size - 4); q 317 drivers/media/pci/ivtv/ivtv-vbi.c q += line_size - 4; q 330 drivers/media/pci/netup_unidvb/netup_unidvb_core.c static int netup_unidvb_start_streaming(struct vb2_queue *q, unsigned int count) q 332 drivers/media/pci/netup_unidvb/netup_unidvb_core.c struct netup_dma *dma = vb2_get_drv_priv(q); q 339 drivers/media/pci/netup_unidvb/netup_unidvb_core.c static void netup_unidvb_stop_streaming(struct vb2_queue *q) q 341 drivers/media/pci/netup_unidvb/netup_unidvb_core.c struct netup_dma *dma = vb2_get_drv_priv(q); q 264 drivers/media/pci/saa7134/saa7134-core.c struct saa7134_dmaqueue *q, q 272 drivers/media/pci/saa7134/saa7134-core.c if (NULL == q->curr) { q 273 drivers/media/pci/saa7134/saa7134-core.c if (!q->need_two) { q 274 drivers/media/pci/saa7134/saa7134-core.c q->curr = buf; q 276 drivers/media/pci/saa7134/saa7134-core.c } else if (list_empty(&q->queue)) { q 277 drivers/media/pci/saa7134/saa7134-core.c list_add_tail(&buf->entry, &q->queue); q 279 drivers/media/pci/saa7134/saa7134-core.c next = list_entry(q->queue.next, struct saa7134_buf, q 281 drivers/media/pci/saa7134/saa7134-core.c q->curr = buf; q 285 drivers/media/pci/saa7134/saa7134-core.c list_add_tail(&buf->entry, &q->queue); q 292 drivers/media/pci/saa7134/saa7134-core.c struct saa7134_dmaqueue *q, q 295 drivers/media/pci/saa7134/saa7134-core.c core_dbg("buffer_finish %p\n", q->curr); q 298 drivers/media/pci/saa7134/saa7134-core.c q->curr->vb2.vb2_buf.timestamp = ktime_get_ns(); q 299 drivers/media/pci/saa7134/saa7134-core.c q->curr->vb2.sequence = q->seq_nr++; q 300 drivers/media/pci/saa7134/saa7134-core.c vb2_buffer_done(&q->curr->vb2.vb2_buf, state); q 301 drivers/media/pci/saa7134/saa7134-core.c q->curr = NULL; q 305 drivers/media/pci/saa7134/saa7134-core.c struct saa7134_dmaqueue *q) q 310 drivers/media/pci/saa7134/saa7134-core.c BUG_ON(NULL != q->curr); q 312 drivers/media/pci/saa7134/saa7134-core.c if (!list_empty(&q->queue)) { q 314 drivers/media/pci/saa7134/saa7134-core.c buf = list_entry(q->queue.next, struct saa7134_buf, entry); q 316 drivers/media/pci/saa7134/saa7134-core.c buf, q->queue.prev, q->queue.next); q 318 drivers/media/pci/saa7134/saa7134-core.c if (!list_empty(&q->queue)) q 319 drivers/media/pci/saa7134/saa7134-core.c next = list_entry(q->queue.next, struct saa7134_buf, entry); q 320 drivers/media/pci/saa7134/saa7134-core.c q->curr = buf; q 323 drivers/media/pci/saa7134/saa7134-core.c q->queue.prev, q->queue.next); q 328 drivers/media/pci/saa7134/saa7134-core.c del_timer(&q->timeout); q 334 drivers/media/pci/saa7134/saa7134-core.c struct saa7134_dmaqueue *q = from_timer(q, t, timeout); q 335 drivers/media/pci/saa7134/saa7134-core.c struct saa7134_dev *dev = q->dev; q 347 drivers/media/pci/saa7134/saa7134-core.c if (q->curr) { q 348 drivers/media/pci/saa7134/saa7134-core.c core_dbg("timeout on %p\n", q->curr); q 349 drivers/media/pci/saa7134/saa7134-core.c saa7134_buffer_finish(dev, q, VB2_BUF_STATE_ERROR); q 351 drivers/media/pci/saa7134/saa7134-core.c saa7134_buffer_next(dev, q); q 355 drivers/media/pci/saa7134/saa7134-core.c void saa7134_stop_streaming(struct saa7134_dev *dev, struct saa7134_dmaqueue *q) q 362 drivers/media/pci/saa7134/saa7134-core.c if (!list_empty(&q->queue)) { q 363 drivers/media/pci/saa7134/saa7134-core.c list_for_each_safe(pos, n, &q->queue) { q 372 drivers/media/pci/saa7134/saa7134-core.c saa7134_buffer_timeout(&q->timeout); /* also calls del_timer(&q->timeout) */ q 1377 drivers/media/pci/saa7134/saa7134-core.c struct saa7134_dmaqueue *q) q 1383 drivers/media/pci/saa7134/saa7134-core.c buf = q->curr; q 1392 drivers/media/pci/saa7134/saa7134-core.c if (!list_empty(&q->queue)) q 1393 drivers/media/pci/saa7134/saa7134-core.c next = list_entry(q->queue.next, struct saa7134_buf, q 1202 drivers/media/pci/saa7134/saa7134-dvb.c struct vb2_queue *q; q 1219 drivers/media/pci/saa7134/saa7134-dvb.c q = &fe0->dvb.dvbq; q 1220 drivers/media/pci/saa7134/saa7134-dvb.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q 1221 drivers/media/pci/saa7134/saa7134-dvb.c q->io_modes = VB2_MMAP | VB2_READ; q 1222 drivers/media/pci/saa7134/saa7134-dvb.c q->drv_priv = &dev->ts_q; q 1223 drivers/media/pci/saa7134/saa7134-dvb.c q->ops = &saa7134_ts_qops; q 1224 drivers/media/pci/saa7134/saa7134-dvb.c q->mem_ops = &vb2_dma_sg_memops; q 1225 drivers/media/pci/saa7134/saa7134-dvb.c q->buf_struct_size = sizeof(struct saa7134_buf); q 1226 drivers/media/pci/saa7134/saa7134-dvb.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1227 drivers/media/pci/saa7134/saa7134-dvb.c q->lock = &dev->lock; q 1228 drivers/media/pci/saa7134/saa7134-dvb.c q->dev = &dev->pci->dev; q 1229 drivers/media/pci/saa7134/saa7134-dvb.c ret = vb2_queue_init(q); q 242 drivers/media/pci/saa7134/saa7134-empress.c struct vb2_queue *q; q 268 drivers/media/pci/saa7134/saa7134-empress.c q = &dev->empress_vbq; q 269 drivers/media/pci/saa7134/saa7134-empress.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q 275 drivers/media/pci/saa7134/saa7134-empress.c q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ; q 276 drivers/media/pci/saa7134/saa7134-empress.c q->drv_priv = &dev->ts_q; q 277 drivers/media/pci/saa7134/saa7134-empress.c q->ops = &saa7134_empress_qops; q 278 drivers/media/pci/saa7134/saa7134-empress.c q->gfp_flags = GFP_DMA32; q 279 drivers/media/pci/saa7134/saa7134-empress.c q->mem_ops = &vb2_dma_sg_memops; q 280 drivers/media/pci/saa7134/saa7134-empress.c q->buf_struct_size = sizeof(struct saa7134_buf); q 281 drivers/media/pci/saa7134/saa7134-empress.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 282 drivers/media/pci/saa7134/saa7134-empress.c q->lock = &dev->lock; q 283 drivers/media/pci/saa7134/saa7134-empress.c q->dev = &dev->pci->dev; q 284 drivers/media/pci/saa7134/saa7134-empress.c err = vb2_queue_init(q); q 287 drivers/media/pci/saa7134/saa7134-empress.c dev->empress_dev->queue = q; q 106 drivers/media/pci/saa7134/saa7134-ts.c int saa7134_ts_queue_setup(struct vb2_queue *q, q 110 drivers/media/pci/saa7134/saa7134-ts.c struct saa7134_dmaqueue *dmaq = q->drv_priv; q 128 drivers/media/pci/saa7134/saa7134-vbi.c static int queue_setup(struct vb2_queue *q, q 132 drivers/media/pci/saa7134/saa7134-vbi.c struct saa7134_dmaqueue *dmaq = q->drv_priv; q 937 drivers/media/pci/saa7134/saa7134-video.c static int queue_setup(struct vb2_queue *q, q 941 drivers/media/pci/saa7134/saa7134-video.c struct saa7134_dmaqueue *dmaq = q->drv_priv; q 2045 drivers/media/pci/saa7134/saa7134-video.c struct vb2_queue *q; q 2108 drivers/media/pci/saa7134/saa7134-video.c q = &dev->video_vbq; q 2109 drivers/media/pci/saa7134/saa7134-video.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q 2117 drivers/media/pci/saa7134/saa7134-video.c q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ; q 2119 drivers/media/pci/saa7134/saa7134-video.c q->io_modes |= VB2_USERPTR; q 2120 drivers/media/pci/saa7134/saa7134-video.c q->drv_priv = &dev->video_q; q 2121 drivers/media/pci/saa7134/saa7134-video.c q->ops = &vb2_qops; q 2122 drivers/media/pci/saa7134/saa7134-video.c q->gfp_flags = GFP_DMA32; q 2123 drivers/media/pci/saa7134/saa7134-video.c q->mem_ops = &vb2_dma_sg_memops; q 2124 drivers/media/pci/saa7134/saa7134-video.c q->buf_struct_size = sizeof(struct saa7134_buf); q 2125 drivers/media/pci/saa7134/saa7134-video.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 2126 drivers/media/pci/saa7134/saa7134-video.c q->lock = &dev->lock; q 2127 drivers/media/pci/saa7134/saa7134-video.c q->dev = &dev->pci->dev; q 2128 drivers/media/pci/saa7134/saa7134-video.c ret = vb2_queue_init(q); q 2133 drivers/media/pci/saa7134/saa7134-video.c q = &dev->vbi_vbq; q 2134 drivers/media/pci/saa7134/saa7134-video.c q->type = V4L2_BUF_TYPE_VBI_CAPTURE; q 2136 drivers/media/pci/saa7134/saa7134-video.c q->io_modes = VB2_MMAP | VB2_READ; q 2138 drivers/media/pci/saa7134/saa7134-video.c q->io_modes |= VB2_USERPTR; q 2139 drivers/media/pci/saa7134/saa7134-video.c q->drv_priv = &dev->vbi_q; q 2140 drivers/media/pci/saa7134/saa7134-video.c q->ops = &saa7134_vbi_qops; q 2141 drivers/media/pci/saa7134/saa7134-video.c q->gfp_flags = GFP_DMA32; q 2142 drivers/media/pci/saa7134/saa7134-video.c q->mem_ops = &vb2_dma_sg_memops; q 2143 drivers/media/pci/saa7134/saa7134-video.c q->buf_struct_size = sizeof(struct saa7134_buf); q 2144 drivers/media/pci/saa7134/saa7134-video.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 2145 drivers/media/pci/saa7134/saa7134-video.c q->lock = &dev->lock; q 2146 drivers/media/pci/saa7134/saa7134-video.c q->dev = &dev->pci->dev; q 2147 drivers/media/pci/saa7134/saa7134-video.c ret = vb2_queue_init(q); q 765 drivers/media/pci/saa7134/saa7134.h int saa7134_buffer_queue(struct saa7134_dev *dev, struct saa7134_dmaqueue *q, q 767 drivers/media/pci/saa7134/saa7134.h void saa7134_buffer_finish(struct saa7134_dev *dev, struct saa7134_dmaqueue *q, q 769 drivers/media/pci/saa7134/saa7134.h void saa7134_buffer_next(struct saa7134_dev *dev, struct saa7134_dmaqueue *q); q 771 drivers/media/pci/saa7134/saa7134.h void saa7134_stop_streaming(struct saa7134_dev *dev, struct saa7134_dmaqueue *q); q 844 drivers/media/pci/saa7134/saa7134.h int saa7134_ts_queue_setup(struct vb2_queue *q, q 73 drivers/media/pci/saa7164/saa7164-cmd.c wait_queue_head_t *q = NULL; q 86 drivers/media/pci/saa7164/saa7164-cmd.c q = &dev->cmds[tRsp.seqno].wait; q 94 drivers/media/pci/saa7164/saa7164-cmd.c wake_up(q); q 125 drivers/media/pci/saa7164/saa7164-cmd.c wait_queue_head_t *q = NULL; q 139 drivers/media/pci/saa7164/saa7164-cmd.c q = &dev->cmds[tRsp.seqno].wait; q 167 drivers/media/pci/saa7164/saa7164-cmd.c wake_up(q); q 247 drivers/media/pci/saa7164/saa7164-cmd.c wait_queue_head_t *q = NULL; q 260 drivers/media/pci/saa7164/saa7164-cmd.c q = &dev->cmds[seqno].wait; q 264 drivers/media/pci/saa7164/saa7164-cmd.c if (q) { q 279 drivers/media/pci/saa7164/saa7164-cmd.c wait_event_timeout(*q, dev->cmds[seqno].signalled, q 195 drivers/media/pci/saa7164/saa7164-dvb.c struct list_head *p, *q; q 206 drivers/media/pci/saa7164/saa7164-dvb.c list_for_each_safe(p, q, &port->dmaqueue.list) { q 61 drivers/media/pci/saa7164/saa7164-encoder.c struct list_head *c, *n, *p, *q, *l, *v; q 77 drivers/media/pci/saa7164/saa7164-encoder.c list_for_each_safe(p, q, &port->list_buf_used.list) { q 30 drivers/media/pci/saa7164/saa7164-vbi.c struct list_head *c, *n, *p, *q, *l, *v; q 46 drivers/media/pci/saa7164/saa7164-vbi.c list_for_each_safe(p, q, &port->list_buf_used.list) { q 655 drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c static int solo_enc_queue_setup(struct vb2_queue *q, q 708 drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c static int solo_enc_start_streaming(struct vb2_queue *q, unsigned int count) q 710 drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c struct solo_enc_dev *solo_enc = vb2_get_drv_priv(q); q 715 drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c static void solo_enc_stop_streaming(struct vb2_queue *q) q 717 drivers/media/pci/solo6x10/solo6x10-v4l2-enc.c struct solo_enc_dev *solo_enc = vb2_get_drv_priv(q); q 307 drivers/media/pci/solo6x10/solo6x10-v4l2.c static int solo_queue_setup(struct vb2_queue *q, q 311 drivers/media/pci/solo6x10/solo6x10-v4l2.c struct solo_dev *solo_dev = vb2_get_drv_priv(q); q 322 drivers/media/pci/solo6x10/solo6x10-v4l2.c static int solo_start_streaming(struct vb2_queue *q, unsigned int count) q 324 drivers/media/pci/solo6x10/solo6x10-v4l2.c struct solo_dev *solo_dev = vb2_get_drv_priv(q); q 330 drivers/media/pci/solo6x10/solo6x10-v4l2.c static void solo_stop_streaming(struct vb2_queue *q) q 332 drivers/media/pci/solo6x10/solo6x10-v4l2.c struct solo_dev *solo_dev = vb2_get_drv_priv(q); q 182 drivers/media/pci/tw5864/tw5864-video.c static int tw5864_queue_setup(struct vb2_queue *q, unsigned int *num_buffers, q 427 drivers/media/pci/tw5864/tw5864-video.c static int tw5864_start_streaming(struct vb2_queue *q, unsigned int count) q 429 drivers/media/pci/tw5864/tw5864-video.c struct tw5864_input *input = vb2_get_drv_priv(q); q 446 drivers/media/pci/tw5864/tw5864-video.c static void tw5864_stop_streaming(struct vb2_queue *q) q 449 drivers/media/pci/tw5864/tw5864-video.c struct tw5864_input *input = vb2_get_drv_priv(q); q 358 drivers/media/pci/tw68/tw68-video.c static int tw68_queue_setup(struct vb2_queue *q, q 362 drivers/media/pci/tw68/tw68-video.c struct tw68_dev *dev = vb2_get_drv_priv(q); q 363 drivers/media/pci/tw68/tw68-video.c unsigned tot_bufs = q->num_buffers + *num_buffers; q 369 drivers/media/pci/tw68/tw68-video.c *num_buffers = tot_bufs - q->num_buffers; q 491 drivers/media/pci/tw68/tw68-video.c static int tw68_start_streaming(struct vb2_queue *q, unsigned int count) q 493 drivers/media/pci/tw68/tw68-video.c struct tw68_dev *dev = vb2_get_drv_priv(q); q 502 drivers/media/pci/tw68/tw68-video.c static void tw68_stop_streaming(struct vb2_queue *q) q 504 drivers/media/pci/tw68/tw68-video.c struct tw68_dev *dev = vb2_get_drv_priv(q); q 2336 drivers/media/platform/am437x/am437x-vpfe.c struct vb2_queue *q; q 2353 drivers/media/platform/am437x/am437x-vpfe.c q = &vpfe->buffer_queue; q 2354 drivers/media/platform/am437x/am437x-vpfe.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q 2355 drivers/media/platform/am437x/am437x-vpfe.c q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ; q 2356 drivers/media/platform/am437x/am437x-vpfe.c q->drv_priv = vpfe; q 2357 drivers/media/platform/am437x/am437x-vpfe.c q->ops = &vpfe_video_qops; q 2358 drivers/media/platform/am437x/am437x-vpfe.c q->mem_ops = &vb2_dma_contig_memops; q 2359 drivers/media/platform/am437x/am437x-vpfe.c q->buf_struct_size = sizeof(struct vpfe_cap_buffer); q 2360 drivers/media/platform/am437x/am437x-vpfe.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 2361 drivers/media/platform/am437x/am437x-vpfe.c q->lock = &vpfe->lock; q 2362 drivers/media/platform/am437x/am437x-vpfe.c q->min_buffers_needed = 1; q 2363 drivers/media/platform/am437x/am437x-vpfe.c q->dev = vpfe->pdev; q 2365 drivers/media/platform/am437x/am437x-vpfe.c err = vb2_queue_init(q); q 2380 drivers/media/platform/am437x/am437x-vpfe.c vdev->queue = q; q 1397 drivers/media/platform/aspeed-video.c static int aspeed_video_queue_setup(struct vb2_queue *q, q 1403 drivers/media/platform/aspeed-video.c struct aspeed_video *video = vb2_get_drv_priv(q); q 1428 drivers/media/platform/aspeed-video.c static int aspeed_video_start_streaming(struct vb2_queue *q, q 1432 drivers/media/platform/aspeed-video.c struct aspeed_video *video = vb2_get_drv_priv(q); q 1446 drivers/media/platform/aspeed-video.c static void aspeed_video_stop_streaming(struct vb2_queue *q) q 1449 drivers/media/platform/aspeed-video.c struct aspeed_video *video = vb2_get_drv_priv(q); q 2018 drivers/media/platform/atmel/atmel-isc-base.c struct vb2_queue *q = &isc->vb2_vidq; q 2035 drivers/media/platform/atmel/atmel-isc-base.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q 2036 drivers/media/platform/atmel/atmel-isc-base.c q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ; q 2037 drivers/media/platform/atmel/atmel-isc-base.c q->drv_priv = isc; q 2038 drivers/media/platform/atmel/atmel-isc-base.c q->buf_struct_size = sizeof(struct isc_buffer); q 2039 drivers/media/platform/atmel/atmel-isc-base.c q->ops = &isc_vb2_ops; q 2040 drivers/media/platform/atmel/atmel-isc-base.c q->mem_ops = &vb2_dma_contig_memops; q 2041 drivers/media/platform/atmel/atmel-isc-base.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 2042 drivers/media/platform/atmel/atmel-isc-base.c q->lock = &isc->lock; q 2043 drivers/media/platform/atmel/atmel-isc-base.c q->min_buffers_needed = 1; q 2044 drivers/media/platform/atmel/atmel-isc-base.c q->dev = isc->dev; q 2046 drivers/media/platform/atmel/atmel-isc-base.c ret = vb2_queue_init(q); q 2084 drivers/media/platform/atmel/atmel-isc-base.c vdev->queue = q; q 1158 drivers/media/platform/atmel/atmel-isi.c struct vb2_queue *q; q 1181 drivers/media/platform/atmel/atmel-isi.c q = &isi->queue; q 1207 drivers/media/platform/atmel/atmel-isi.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q 1208 drivers/media/platform/atmel/atmel-isi.c q->io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF; q 1209 drivers/media/platform/atmel/atmel-isi.c q->lock = &isi->lock; q 1210 drivers/media/platform/atmel/atmel-isi.c q->drv_priv = isi; q 1211 drivers/media/platform/atmel/atmel-isi.c q->buf_struct_size = sizeof(struct frame_buffer); q 1212 drivers/media/platform/atmel/atmel-isi.c q->ops = &isi_video_qops; q 1213 drivers/media/platform/atmel/atmel-isi.c q->mem_ops = &vb2_dma_contig_memops; q 1214 drivers/media/platform/atmel/atmel-isi.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1215 drivers/media/platform/atmel/atmel-isi.c q->min_buffers_needed = 2; q 1216 drivers/media/platform/atmel/atmel-isi.c q->dev = &pdev->dev; q 1218 drivers/media/platform/atmel/atmel-isi.c ret = vb2_queue_init(q); q 1834 drivers/media/platform/coda/coda-common.c static int coda_start_streaming(struct vb2_queue *q, unsigned int count) q 1836 drivers/media/platform/coda/coda-common.c struct coda_ctx *ctx = vb2_get_drv_priv(q); q 1847 drivers/media/platform/coda/coda-common.c coda_dbg(1, ctx, "start streaming %s\n", v4l2_type_names[q->type]); q 1852 drivers/media/platform/coda/coda-common.c if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { q 1916 drivers/media/platform/coda/coda-common.c if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { q 1925 drivers/media/platform/coda/coda-common.c if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { q 1939 drivers/media/platform/coda/coda-common.c static void coda_stop_streaming(struct vb2_queue *q) q 1941 drivers/media/platform/coda/coda-common.c struct coda_ctx *ctx = vb2_get_drv_priv(q); q 1948 drivers/media/platform/coda/coda-common.c coda_dbg(1, ctx, "stop streaming %s\n", v4l2_type_names[q->type]); q 1950 drivers/media/platform/coda/coda-common.c if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { q 196 drivers/media/platform/davinci/vpbe_display.c struct vb2_queue *q = vb->vb2_queue; q 197 drivers/media/platform/davinci/vpbe_display.c struct vpbe_layer *layer = vb2_get_drv_priv(q); q 1368 drivers/media/platform/davinci/vpbe_display.c struct vb2_queue *q; q 1426 drivers/media/platform/davinci/vpbe_display.c q = &disp_dev->dev[i]->buffer_queue; q 1427 drivers/media/platform/davinci/vpbe_display.c memset(q, 0, sizeof(*q)); q 1428 drivers/media/platform/davinci/vpbe_display.c q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; q 1429 drivers/media/platform/davinci/vpbe_display.c q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; q 1430 drivers/media/platform/davinci/vpbe_display.c q->drv_priv = disp_dev->dev[i]; q 1431 drivers/media/platform/davinci/vpbe_display.c q->ops = &video_qops; q 1432 drivers/media/platform/davinci/vpbe_display.c q->mem_ops = &vb2_dma_contig_memops; q 1433 drivers/media/platform/davinci/vpbe_display.c q->buf_struct_size = sizeof(struct vpbe_disp_buffer); q 1434 drivers/media/platform/davinci/vpbe_display.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1435 drivers/media/platform/davinci/vpbe_display.c q->min_buffers_needed = 1; q 1436 drivers/media/platform/davinci/vpbe_display.c q->lock = &disp_dev->dev[i]->opslock; q 1437 drivers/media/platform/davinci/vpbe_display.c q->dev = disp_dev->vpbe_dev->pdev; q 1438 drivers/media/platform/davinci/vpbe_display.c err = vb2_queue_init(q); q 71 drivers/media/platform/davinci/vpif_capture.c struct vb2_queue *q = vb->vb2_queue; q 72 drivers/media/platform/davinci/vpif_capture.c struct channel_obj *ch = vb2_get_drv_priv(q); q 1414 drivers/media/platform/davinci/vpif_capture.c struct vb2_queue *q; q 1436 drivers/media/platform/davinci/vpif_capture.c q = &common->buffer_queue; q 1437 drivers/media/platform/davinci/vpif_capture.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q 1438 drivers/media/platform/davinci/vpif_capture.c q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; q 1439 drivers/media/platform/davinci/vpif_capture.c q->drv_priv = ch; q 1440 drivers/media/platform/davinci/vpif_capture.c q->ops = &video_qops; q 1441 drivers/media/platform/davinci/vpif_capture.c q->mem_ops = &vb2_dma_contig_memops; q 1442 drivers/media/platform/davinci/vpif_capture.c q->buf_struct_size = sizeof(struct vpif_cap_buffer); q 1443 drivers/media/platform/davinci/vpif_capture.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1444 drivers/media/platform/davinci/vpif_capture.c q->min_buffers_needed = 1; q 1445 drivers/media/platform/davinci/vpif_capture.c q->lock = &common->lock; q 1446 drivers/media/platform/davinci/vpif_capture.c q->dev = vpif_dev; q 1448 drivers/media/platform/davinci/vpif_capture.c err = vb2_queue_init(q); q 1464 drivers/media/platform/davinci/vpif_capture.c vdev->queue = q; q 1142 drivers/media/platform/davinci/vpif_display.c struct vb2_queue *q; q 1182 drivers/media/platform/davinci/vpif_display.c q = &common->buffer_queue; q 1183 drivers/media/platform/davinci/vpif_display.c q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; q 1184 drivers/media/platform/davinci/vpif_display.c q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; q 1185 drivers/media/platform/davinci/vpif_display.c q->drv_priv = ch; q 1186 drivers/media/platform/davinci/vpif_display.c q->ops = &video_qops; q 1187 drivers/media/platform/davinci/vpif_display.c q->mem_ops = &vb2_dma_contig_memops; q 1188 drivers/media/platform/davinci/vpif_display.c q->buf_struct_size = sizeof(struct vpif_disp_buffer); q 1189 drivers/media/platform/davinci/vpif_display.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1190 drivers/media/platform/davinci/vpif_display.c q->min_buffers_needed = 1; q 1191 drivers/media/platform/davinci/vpif_display.c q->lock = &common->lock; q 1192 drivers/media/platform/davinci/vpif_display.c q->dev = vpif_dev; q 1193 drivers/media/platform/davinci/vpif_display.c err = vb2_queue_init(q); q 1213 drivers/media/platform/davinci/vpif_display.c vdev->queue = q; q 56 drivers/media/platform/exynos-gsc/gsc-m2m.c static int gsc_m2m_start_streaming(struct vb2_queue *q, unsigned int count) q 58 drivers/media/platform/exynos-gsc/gsc-m2m.c struct gsc_ctx *ctx = q->drv_priv; q 80 drivers/media/platform/exynos-gsc/gsc-m2m.c static void gsc_m2m_stop_streaming(struct vb2_queue *q) q 82 drivers/media/platform/exynos-gsc/gsc-m2m.c struct gsc_ctx *ctx = q->drv_priv; q 258 drivers/media/platform/exynos4-is/fimc-capture.c static int start_streaming(struct vb2_queue *q, unsigned int count) q 260 drivers/media/platform/exynos4-is/fimc-capture.c struct fimc_ctx *ctx = q->drv_priv; q 289 drivers/media/platform/exynos4-is/fimc-capture.c static void stop_streaming(struct vb2_queue *q) q 291 drivers/media/platform/exynos4-is/fimc-capture.c struct fimc_ctx *ctx = q->drv_priv; q 1734 drivers/media/platform/exynos4-is/fimc-capture.c struct vb2_queue *q = &fimc->vid_cap.vbq; q 1759 drivers/media/platform/exynos4-is/fimc-capture.c vfd->queue = q; q 1772 drivers/media/platform/exynos4-is/fimc-capture.c memset(q, 0, sizeof(*q)); q 1773 drivers/media/platform/exynos4-is/fimc-capture.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; q 1774 drivers/media/platform/exynos4-is/fimc-capture.c q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; q 1775 drivers/media/platform/exynos4-is/fimc-capture.c q->drv_priv = ctx; q 1776 drivers/media/platform/exynos4-is/fimc-capture.c q->ops = &fimc_capture_qops; q 1777 drivers/media/platform/exynos4-is/fimc-capture.c q->mem_ops = &vb2_dma_contig_memops; q 1778 drivers/media/platform/exynos4-is/fimc-capture.c q->buf_struct_size = sizeof(struct fimc_vid_buffer); q 1779 drivers/media/platform/exynos4-is/fimc-capture.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1780 drivers/media/platform/exynos4-is/fimc-capture.c q->lock = &fimc->lock; q 1781 drivers/media/platform/exynos4-is/fimc-capture.c q->dev = &fimc->pdev->dev; q 1783 drivers/media/platform/exynos4-is/fimc-capture.c ret = vb2_queue_init(q); q 76 drivers/media/platform/exynos4-is/fimc-isp-video.c static int isp_video_capture_start_streaming(struct vb2_queue *q, q 79 drivers/media/platform/exynos4-is/fimc-isp-video.c struct fimc_isp *isp = vb2_get_drv_priv(q); q 119 drivers/media/platform/exynos4-is/fimc-isp-video.c static void isp_video_capture_stop_streaming(struct vb2_queue *q) q 121 drivers/media/platform/exynos4-is/fimc-isp-video.c struct fimc_isp *isp = vb2_get_drv_priv(q); q 568 drivers/media/platform/exynos4-is/fimc-isp-video.c struct vb2_queue *q = &isp->video_capture.vb_queue; q 588 drivers/media/platform/exynos4-is/fimc-isp-video.c memset(q, 0, sizeof(*q)); q 589 drivers/media/platform/exynos4-is/fimc-isp-video.c q->type = type; q 590 drivers/media/platform/exynos4-is/fimc-isp-video.c q->io_modes = VB2_MMAP | VB2_USERPTR; q 591 drivers/media/platform/exynos4-is/fimc-isp-video.c q->ops = &isp_video_capture_qops; q 592 drivers/media/platform/exynos4-is/fimc-isp-video.c q->mem_ops = &vb2_dma_contig_memops; q 593 drivers/media/platform/exynos4-is/fimc-isp-video.c q->buf_struct_size = sizeof(struct isp_video_buf); q 594 drivers/media/platform/exynos4-is/fimc-isp-video.c q->drv_priv = isp; q 595 drivers/media/platform/exynos4-is/fimc-isp-video.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 596 drivers/media/platform/exynos4-is/fimc-isp-video.c q->lock = &isp->video_lock; q 597 drivers/media/platform/exynos4-is/fimc-isp-video.c q->dev = &isp->pdev->dev; q 599 drivers/media/platform/exynos4-is/fimc-isp-video.c ret = vb2_queue_init(q); q 606 drivers/media/platform/exynos4-is/fimc-isp-video.c vdev->queue = q; q 304 drivers/media/platform/exynos4-is/fimc-lite.c static int start_streaming(struct vb2_queue *q, unsigned int count) q 306 drivers/media/platform/exynos4-is/fimc-lite.c struct fimc_lite *fimc = q->drv_priv; q 338 drivers/media/platform/exynos4-is/fimc-lite.c static void stop_streaming(struct vb2_queue *q) q 340 drivers/media/platform/exynos4-is/fimc-lite.c struct fimc_lite *fimc = q->drv_priv; q 1255 drivers/media/platform/exynos4-is/fimc-lite.c struct vb2_queue *q = &fimc->vb_queue; q 1270 drivers/media/platform/exynos4-is/fimc-lite.c vfd->queue = q; q 1277 drivers/media/platform/exynos4-is/fimc-lite.c memset(q, 0, sizeof(*q)); q 1278 drivers/media/platform/exynos4-is/fimc-lite.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; q 1279 drivers/media/platform/exynos4-is/fimc-lite.c q->io_modes = VB2_MMAP | VB2_USERPTR; q 1280 drivers/media/platform/exynos4-is/fimc-lite.c q->ops = &fimc_lite_qops; q 1281 drivers/media/platform/exynos4-is/fimc-lite.c q->mem_ops = &vb2_dma_contig_memops; q 1282 drivers/media/platform/exynos4-is/fimc-lite.c q->buf_struct_size = sizeof(struct flite_buffer); q 1283 drivers/media/platform/exynos4-is/fimc-lite.c q->drv_priv = fimc; q 1284 drivers/media/platform/exynos4-is/fimc-lite.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1285 drivers/media/platform/exynos4-is/fimc-lite.c q->lock = &fimc->lock; q 1286 drivers/media/platform/exynos4-is/fimc-lite.c q->dev = &fimc->pdev->dev; q 1288 drivers/media/platform/exynos4-is/fimc-lite.c ret = vb2_queue_init(q); q 73 drivers/media/platform/exynos4-is/fimc-m2m.c static int start_streaming(struct vb2_queue *q, unsigned int count) q 75 drivers/media/platform/exynos4-is/fimc-m2m.c struct fimc_ctx *ctx = q->drv_priv; q 82 drivers/media/platform/exynos4-is/fimc-m2m.c static void stop_streaming(struct vb2_queue *q) q 84 drivers/media/platform/exynos4-is/fimc-m2m.c struct fimc_ctx *ctx = q->drv_priv; q 1256 drivers/media/platform/fsl-viu.c struct videobuf_queue *q = &fh->vb_vidq; q 1268 drivers/media/platform/fsl-viu.c res |= videobuf_poll_stream(file, q, wait); q 1431 drivers/media/platform/imx-pxp.c static int pxp_start_streaming(struct vb2_queue *q, unsigned int count) q 1433 drivers/media/platform/imx-pxp.c struct pxp_ctx *ctx = vb2_get_drv_priv(q); q 1434 drivers/media/platform/imx-pxp.c struct pxp_q_data *q_data = get_q_data(ctx, q->type); q 1440 drivers/media/platform/imx-pxp.c static void pxp_stop_streaming(struct vb2_queue *q) q 1442 drivers/media/platform/imx-pxp.c struct pxp_ctx *ctx = vb2_get_drv_priv(q); q 1447 drivers/media/platform/imx-pxp.c if (V4L2_TYPE_IS_OUTPUT(q->type)) q 556 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c static int mtk_jpeg_queue_setup(struct vb2_queue *q, q 562 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q); q 568 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c q->type, *num_buffers); q 570 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c q_data = mtk_jpeg_get_q_data(ctx, q->type); q 706 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c static int mtk_jpeg_start_streaming(struct vb2_queue *q, unsigned int count) q 708 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q); q 718 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c while ((vb = mtk_jpeg_buf_remove(ctx, q->type))) q 723 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c static void mtk_jpeg_stop_streaming(struct vb2_queue *q) q 725 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q); q 734 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c !V4L2_TYPE_IS_OUTPUT(q->type)) { q 741 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c } else if (V4L2_TYPE_IS_OUTPUT(q->type)) { q 745 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c while ((vb = mtk_jpeg_buf_remove(ctx, q->type))) q 958 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c struct mtk_jpeg_q_data *q = &ctx->out_q; q 966 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c q->fmt = mtk_jpeg_find_format(ctx, V4L2_PIX_FMT_JPEG, q 968 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c q->w = MTK_JPEG_MIN_WIDTH; q 969 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c q->h = MTK_JPEG_MIN_HEIGHT; q 970 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c q->bytesperline[0] = 0; q 971 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c q->sizeimage[0] = MTK_JPEG_DEFAULT_SIZEIMAGE; q 973 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c q = &ctx->cap_q; q 974 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c q->fmt = mtk_jpeg_find_format(ctx, V4L2_PIX_FMT_YUV420M, q 976 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c q->w = MTK_JPEG_MIN_WIDTH; q 977 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c q->h = MTK_JPEG_MIN_HEIGHT; q 979 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c for (i = 0; i < q->fmt->colplanes; i++) { q 980 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c u32 stride = q->w * q->fmt->h_sample[i] / 4; q 981 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c u32 h = q->h * q->fmt->v_sample[i] / 4; q 983 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c q->bytesperline[i] = stride; q 984 drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c q->sizeimage[i] = stride * h; q 400 drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c static int mtk_mdp_m2m_start_streaming(struct vb2_queue *q, unsigned int count) q 402 drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c struct mtk_mdp_ctx *ctx = q->drv_priv; q 422 drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c static void mtk_mdp_m2m_stop_streaming(struct vb2_queue *q) q 424 drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c struct mtk_mdp_ctx *ctx = q->drv_priv; q 427 drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c vb = mtk_mdp_m2m_buf_remove(ctx, q->type); q 430 drivers/media/platform/mtk-mdp/mtk_mdp_m2m.c vb = mtk_mdp_m2m_buf_remove(ctx, q->type); q 845 drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c vb2_is_busy(&ctx->m2m_ctx->out_q_ctx.q)) { q 851 drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c vb2_is_busy(&ctx->m2m_ctx->cap_q_ctx.q)) { q 1281 drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c static int vb2ops_vdec_start_streaming(struct vb2_queue *q, unsigned int count) q 1283 drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q); q 1291 drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c static void vb2ops_vdec_stop_streaming(struct vb2_queue *q) q 1294 drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q); q 1297 drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c ctx->id, q->type, ctx->state, ctx->decoded_frame_cnt); q 1299 drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { q 817 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count) q 819 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q); q 833 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c if (V4L2_TYPE_IS_OUTPUT(q->type)) { q 834 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c if (!vb2_start_streaming_called(&ctx->m2m_ctx->cap_q_ctx.q)) q 837 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c if (!vb2_start_streaming_called(&ctx->m2m_ctx->out_q_ctx.q)) q 867 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c for (i = 0; i < q->num_buffers; ++i) { q 868 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c struct vb2_buffer *buf = vb2_get_buffer(q, i); q 876 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c ctx->id, i, q->type, q 886 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c static void vb2ops_venc_stop_streaming(struct vb2_queue *q) q 888 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q); q 892 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c mtk_v4l2_debug(2, "[%d]-> type=%d", ctx->id, q->type); q 894 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { q 904 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c if ((q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && q 905 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c vb2_is_streaming(&ctx->m2m_ctx->out_q_ctx.q)) || q 906 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && q 907 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c vb2_is_streaming(&ctx->m2m_ctx->cap_q_ctx.q))) { q 909 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c ctx->id, q->type, q 910 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c vb2_is_streaming(&ctx->m2m_ctx->out_q_ctx.q), q 911 drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c vb2_is_streaming(&ctx->m2m_ctx->cap_q_ctx.q)); q 191 drivers/media/platform/omap3isp/ispvideo.h #define isp_video_queue_to_isp_video_fh(q) \ q 192 drivers/media/platform/omap3isp/ispvideo.h container_of(q, struct isp_video_fh, queue) q 320 drivers/media/platform/qcom/camss/camss-video.c static int video_queue_setup(struct vb2_queue *q, q 324 drivers/media/platform/qcom/camss/camss-video.c struct camss_video *video = vb2_get_drv_priv(q); q 430 drivers/media/platform/qcom/camss/camss-video.c static int video_start_streaming(struct vb2_queue *q, unsigned int count) q 432 drivers/media/platform/qcom/camss/camss-video.c struct camss_video *video = vb2_get_drv_priv(q); q 475 drivers/media/platform/qcom/camss/camss-video.c static void video_stop_streaming(struct vb2_queue *q) q 477 drivers/media/platform/qcom/camss/camss-video.c struct camss_video *video = vb2_get_drv_priv(q); q 854 drivers/media/platform/qcom/camss/camss-video.c struct vb2_queue *q; q 861 drivers/media/platform/qcom/camss/camss-video.c q = &video->vb2_q; q 862 drivers/media/platform/qcom/camss/camss-video.c q->drv_priv = video; q 863 drivers/media/platform/qcom/camss/camss-video.c q->mem_ops = &vb2_dma_sg_memops; q 864 drivers/media/platform/qcom/camss/camss-video.c q->ops = &msm_video_vb2_q_ops; q 865 drivers/media/platform/qcom/camss/camss-video.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; q 866 drivers/media/platform/qcom/camss/camss-video.c q->io_modes = VB2_DMABUF | VB2_MMAP | VB2_READ; q 867 drivers/media/platform/qcom/camss/camss-video.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 868 drivers/media/platform/qcom/camss/camss-video.c q->buf_struct_size = sizeof(struct camss_buffer); q 869 drivers/media/platform/qcom/camss/camss-video.c q->dev = video->camss->dev; q 870 drivers/media/platform/qcom/camss/camss-video.c q->lock = &video->q_lock; q 871 drivers/media/platform/qcom/camss/camss-video.c ret = vb2_queue_init(q); q 1120 drivers/media/platform/qcom/venus/helpers.c void venus_helper_vb2_stop_streaming(struct vb2_queue *q) q 1122 drivers/media/platform/qcom/venus/helpers.c struct venus_inst *inst = vb2_get_drv_priv(q); q 1149 drivers/media/platform/qcom/venus/helpers.c if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) q 22 drivers/media/platform/qcom/venus/helpers.h void venus_helper_vb2_stop_streaming(struct vb2_queue *q); q 718 drivers/media/platform/qcom/venus/vdec.c static int vdec_queue_setup(struct vb2_queue *q, q 722 drivers/media/platform/qcom/venus/vdec.c struct venus_inst *inst = vb2_get_drv_priv(q); q 729 drivers/media/platform/qcom/venus/vdec.c if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && q 733 drivers/media/platform/qcom/venus/vdec.c if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && q 737 drivers/media/platform/qcom/venus/vdec.c if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && q 741 drivers/media/platform/qcom/venus/vdec.c if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && q 756 drivers/media/platform/qcom/venus/vdec.c switch (q->type) { q 942 drivers/media/platform/qcom/venus/vdec.c static int vdec_start_streaming(struct vb2_queue *q, unsigned int count) q 944 drivers/media/platform/qcom/venus/vdec.c struct venus_inst *inst = vb2_get_drv_priv(q); q 949 drivers/media/platform/qcom/venus/vdec.c if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) q 1022 drivers/media/platform/qcom/venus/vdec.c static void vdec_stop_streaming(struct vb2_queue *q) q 1024 drivers/media/platform/qcom/venus/vdec.c struct venus_inst *inst = vb2_get_drv_priv(q); q 1029 drivers/media/platform/qcom/venus/vdec.c if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) q 1039 drivers/media/platform/qcom/venus/vdec.c if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) q 873 drivers/media/platform/qcom/venus/venc.c static int venc_queue_setup(struct vb2_queue *q, q 877 drivers/media/platform/qcom/venus/venc.c struct venus_inst *inst = vb2_get_drv_priv(q); q 882 drivers/media/platform/qcom/venus/venc.c if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && q 886 drivers/media/platform/qcom/venus/venc.c if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && q 890 drivers/media/platform/qcom/venus/venc.c if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE && q 894 drivers/media/platform/qcom/venus/venc.c if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && q 901 drivers/media/platform/qcom/venus/venc.c switch (q->type) { q 964 drivers/media/platform/qcom/venus/venc.c static int venc_start_streaming(struct vb2_queue *q, unsigned int count) q 966 drivers/media/platform/qcom/venus/venc.c struct venus_inst *inst = vb2_get_drv_priv(q); q 971 drivers/media/platform/qcom/venus/venc.c if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) q 1015 drivers/media/platform/qcom/venus/venc.c if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) q 1271 drivers/media/platform/rcar-vin/rcar-dma.c struct vb2_queue *q = &vin->queue; q 1290 drivers/media/platform/rcar-vin/rcar-dma.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q 1291 drivers/media/platform/rcar-vin/rcar-dma.c q->io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF; q 1292 drivers/media/platform/rcar-vin/rcar-dma.c q->lock = &vin->lock; q 1293 drivers/media/platform/rcar-vin/rcar-dma.c q->drv_priv = vin; q 1294 drivers/media/platform/rcar-vin/rcar-dma.c q->buf_struct_size = sizeof(struct rvin_buffer); q 1295 drivers/media/platform/rcar-vin/rcar-dma.c q->ops = &rvin_qops; q 1296 drivers/media/platform/rcar-vin/rcar-dma.c q->mem_ops = &vb2_dma_contig_memops; q 1297 drivers/media/platform/rcar-vin/rcar-dma.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1298 drivers/media/platform/rcar-vin/rcar-dma.c q->min_buffers_needed = 4; q 1299 drivers/media/platform/rcar-vin/rcar-dma.c q->dev = vin->dev; q 1301 drivers/media/platform/rcar-vin/rcar-dma.c ret = vb2_queue_init(q); q 926 drivers/media/platform/rcar_drif.c struct vb2_queue *q = &sdr->vb_queue; q 929 drivers/media/platform/rcar_drif.c if (vb2_is_busy(q)) q 1926 drivers/media/platform/rcar_fdp1.c static int fdp1_start_streaming(struct vb2_queue *q, unsigned int count) q 1928 drivers/media/platform/rcar_fdp1.c struct fdp1_ctx *ctx = vb2_get_drv_priv(q); q 1929 drivers/media/platform/rcar_fdp1.c struct fdp1_q_data *q_data = get_q_data(ctx, q->type); q 1931 drivers/media/platform/rcar_fdp1.c if (V4L2_TYPE_IS_OUTPUT(q->type)) { q 1965 drivers/media/platform/rcar_fdp1.c static void fdp1_stop_streaming(struct vb2_queue *q) q 1967 drivers/media/platform/rcar_fdp1.c struct fdp1_ctx *ctx = vb2_get_drv_priv(q); q 1972 drivers/media/platform/rcar_fdp1.c if (V4L2_TYPE_IS_OUTPUT(q->type)) q 1984 drivers/media/platform/rcar_fdp1.c if (V4L2_TYPE_IS_OUTPUT(q->type)) { q 1401 drivers/media/platform/renesas-ceu.c struct vb2_queue *q = &ceudev->vb2_vq; q 1406 drivers/media/platform/renesas-ceu.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; q 1407 drivers/media/platform/renesas-ceu.c q->io_modes = VB2_MMAP | VB2_DMABUF; q 1408 drivers/media/platform/renesas-ceu.c q->drv_priv = ceudev; q 1409 drivers/media/platform/renesas-ceu.c q->ops = &ceu_vb2_ops; q 1410 drivers/media/platform/renesas-ceu.c q->mem_ops = &vb2_dma_contig_memops; q 1411 drivers/media/platform/renesas-ceu.c q->buf_struct_size = sizeof(struct ceu_buffer); q 1412 drivers/media/platform/renesas-ceu.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1413 drivers/media/platform/renesas-ceu.c q->min_buffers_needed = 2; q 1414 drivers/media/platform/renesas-ceu.c q->lock = &ceudev->mlock; q 1415 drivers/media/platform/renesas-ceu.c q->dev = ceudev->v4l2_dev.dev; q 1417 drivers/media/platform/renesas-ceu.c ret = vb2_queue_init(q); q 59 drivers/media/platform/rockchip/rga/rga-buf.c static void rga_buf_return_buffers(struct vb2_queue *q, q 62 drivers/media/platform/rockchip/rga/rga-buf.c struct rga_ctx *ctx = vb2_get_drv_priv(q); q 66 drivers/media/platform/rockchip/rga/rga-buf.c if (V4L2_TYPE_IS_OUTPUT(q->type)) q 76 drivers/media/platform/rockchip/rga/rga-buf.c static int rga_buf_start_streaming(struct vb2_queue *q, unsigned int count) q 78 drivers/media/platform/rockchip/rga/rga-buf.c struct rga_ctx *ctx = vb2_get_drv_priv(q); q 84 drivers/media/platform/rockchip/rga/rga-buf.c rga_buf_return_buffers(q, VB2_BUF_STATE_QUEUED); q 91 drivers/media/platform/rockchip/rga/rga-buf.c static void rga_buf_stop_streaming(struct vb2_queue *q) q 93 drivers/media/platform/rockchip/rga/rga-buf.c struct rga_ctx *ctx = vb2_get_drv_priv(q); q 96 drivers/media/platform/rockchip/rga/rga-buf.c rga_buf_return_buffers(q, VB2_BUF_STATE_ERROR); q 1102 drivers/media/platform/s3c-camif/camif-capture.c struct vb2_queue *q = &vp->vb_queue; q 1122 drivers/media/platform/s3c-camif/camif-capture.c memset(q, 0, sizeof(*q)); q 1123 drivers/media/platform/s3c-camif/camif-capture.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q 1124 drivers/media/platform/s3c-camif/camif-capture.c q->io_modes = VB2_MMAP | VB2_USERPTR; q 1125 drivers/media/platform/s3c-camif/camif-capture.c q->ops = &s3c_camif_qops; q 1126 drivers/media/platform/s3c-camif/camif-capture.c q->mem_ops = &vb2_dma_contig_memops; q 1127 drivers/media/platform/s3c-camif/camif-capture.c q->buf_struct_size = sizeof(struct camif_buffer); q 1128 drivers/media/platform/s3c-camif/camif-capture.c q->drv_priv = vp; q 1129 drivers/media/platform/s3c-camif/camif-capture.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1130 drivers/media/platform/s3c-camif/camif-capture.c q->lock = &vp->camif->lock; q 1131 drivers/media/platform/s3c-camif/camif-capture.c q->dev = camif->v4l2_dev.dev; q 1133 drivers/media/platform/s3c-camif/camif-capture.c ret = vb2_queue_init(q); q 2579 drivers/media/platform/s5p-jpeg/jpeg-core.c static int s5p_jpeg_start_streaming(struct vb2_queue *q, unsigned int count) q 2581 drivers/media/platform/s5p-jpeg/jpeg-core.c struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(q); q 2589 drivers/media/platform/s5p-jpeg/jpeg-core.c static void s5p_jpeg_stop_streaming(struct vb2_queue *q) q 2591 drivers/media/platform/s5p-jpeg/jpeg-core.c struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(q); q 2599 drivers/media/platform/s5p-jpeg/jpeg-core.c q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { q 756 drivers/media/platform/s5p-mfc/s5p_mfc.c struct vb2_queue *q; q 843 drivers/media/platform/s5p-mfc/s5p_mfc.c q = &ctx->vq_dst; q 844 drivers/media/platform/s5p-mfc/s5p_mfc.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; q 845 drivers/media/platform/s5p-mfc/s5p_mfc.c q->drv_priv = &ctx->fh; q 846 drivers/media/platform/s5p-mfc/s5p_mfc.c q->lock = &dev->mfc_mutex; q 848 drivers/media/platform/s5p-mfc/s5p_mfc.c q->io_modes = VB2_MMAP; q 849 drivers/media/platform/s5p-mfc/s5p_mfc.c q->ops = get_dec_queue_ops(); q 851 drivers/media/platform/s5p-mfc/s5p_mfc.c q->io_modes = VB2_MMAP | VB2_USERPTR; q 852 drivers/media/platform/s5p-mfc/s5p_mfc.c q->ops = get_enc_queue_ops(); q 861 drivers/media/platform/s5p-mfc/s5p_mfc.c q->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES; q 862 drivers/media/platform/s5p-mfc/s5p_mfc.c q->mem_ops = &vb2_dma_contig_memops; q 863 drivers/media/platform/s5p-mfc/s5p_mfc.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; q 864 drivers/media/platform/s5p-mfc/s5p_mfc.c ret = vb2_queue_init(q); q 870 drivers/media/platform/s5p-mfc/s5p_mfc.c q = &ctx->vq_src; q 871 drivers/media/platform/s5p-mfc/s5p_mfc.c q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; q 872 drivers/media/platform/s5p-mfc/s5p_mfc.c q->drv_priv = &ctx->fh; q 873 drivers/media/platform/s5p-mfc/s5p_mfc.c q->lock = &dev->mfc_mutex; q 875 drivers/media/platform/s5p-mfc/s5p_mfc.c q->io_modes = VB2_MMAP; q 876 drivers/media/platform/s5p-mfc/s5p_mfc.c q->ops = get_dec_queue_ops(); q 878 drivers/media/platform/s5p-mfc/s5p_mfc.c q->io_modes = VB2_MMAP | VB2_USERPTR; q 879 drivers/media/platform/s5p-mfc/s5p_mfc.c q->ops = get_enc_queue_ops(); q 890 drivers/media/platform/s5p-mfc/s5p_mfc.c q->allow_zero_bytesused = 1; q 896 drivers/media/platform/s5p-mfc/s5p_mfc.c q->dma_attrs = DMA_ATTR_ALLOC_SINGLE_PAGES; q 897 drivers/media/platform/s5p-mfc/s5p_mfc.c q->mem_ops = &vb2_dma_contig_memops; q 898 drivers/media/platform/s5p-mfc/s5p_mfc.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; q 899 drivers/media/platform/s5p-mfc/s5p_mfc.c ret = vb2_queue_init(q); q 994 drivers/media/platform/s5p-mfc/s5p_mfc_dec.c static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count) q 996 drivers/media/platform/s5p-mfc/s5p_mfc_dec.c struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv); q 1010 drivers/media/platform/s5p-mfc/s5p_mfc_dec.c static void s5p_mfc_stop_streaming(struct vb2_queue *q) q 1013 drivers/media/platform/s5p-mfc/s5p_mfc_dec.c struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv); q 1028 drivers/media/platform/s5p-mfc/s5p_mfc_dec.c if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { q 1044 drivers/media/platform/s5p-mfc/s5p_mfc_dec.c } else if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { q 2489 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count) q 2491 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv); q 2495 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) { q 2519 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c static void s5p_mfc_stop_streaming(struct vb2_queue *q) q 2522 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv); q 2534 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { q 2539 drivers/media/platform/s5p-mfc/s5p_mfc_enc.c if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { q 1224 drivers/media/platform/sh_vou.c struct vb2_queue *q; q 1288 drivers/media/platform/sh_vou.c q = &vou_dev->queue; q 1289 drivers/media/platform/sh_vou.c q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; q 1290 drivers/media/platform/sh_vou.c q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_WRITE; q 1291 drivers/media/platform/sh_vou.c q->drv_priv = vou_dev; q 1292 drivers/media/platform/sh_vou.c q->buf_struct_size = sizeof(struct sh_vou_buffer); q 1293 drivers/media/platform/sh_vou.c q->ops = &sh_vou_qops; q 1294 drivers/media/platform/sh_vou.c q->mem_ops = &vb2_dma_contig_memops; q 1295 drivers/media/platform/sh_vou.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1296 drivers/media/platform/sh_vou.c q->min_buffers_needed = 2; q 1297 drivers/media/platform/sh_vou.c q->lock = &vou_dev->fop_lock; q 1298 drivers/media/platform/sh_vou.c q->dev = &pdev->dev; q 1299 drivers/media/platform/sh_vou.c ret = vb2_queue_init(q); q 1303 drivers/media/platform/sh_vou.c vdev->queue = q; q 498 drivers/media/platform/sti/bdisp/bdisp-v4l2.c static int bdisp_start_streaming(struct vb2_queue *q, unsigned int count) q 500 drivers/media/platform/sti/bdisp/bdisp-v4l2.c struct bdisp_ctx *ctx = q->drv_priv; q 507 drivers/media/platform/sti/bdisp/bdisp-v4l2.c if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { q 521 drivers/media/platform/sti/bdisp/bdisp-v4l2.c static void bdisp_stop_streaming(struct vb2_queue *q) q 523 drivers/media/platform/sti/bdisp/bdisp-v4l2.c struct bdisp_ctx *ctx = q->drv_priv; q 1117 drivers/media/platform/sti/delta/delta-v4l2.c struct vb2_queue *q = vb->vb2_queue; q 1118 drivers/media/platform/sti/delta/delta-v4l2.c struct delta_ctx *ctx = vb2_get_drv_priv(q); q 1297 drivers/media/platform/sti/delta/delta-v4l2.c struct vb2_queue *q = vb->vb2_queue; q 1298 drivers/media/platform/sti/delta/delta-v4l2.c struct delta_ctx *ctx = vb2_get_drv_priv(q); q 1304 drivers/media/platform/sti/delta/delta-v4l2.c static int delta_vb2_au_start_streaming(struct vb2_queue *q, q 1307 drivers/media/platform/sti/delta/delta-v4l2.c struct delta_ctx *ctx = vb2_get_drv_priv(q); q 1398 drivers/media/platform/sti/delta/delta-v4l2.c static void delta_vb2_au_stop_streaming(struct vb2_queue *q) q 1400 drivers/media/platform/sti/delta/delta-v4l2.c struct delta_ctx *ctx = vb2_get_drv_priv(q); q 1463 drivers/media/platform/sti/delta/delta-v4l2.c struct vb2_queue *q = vb->vb2_queue; q 1464 drivers/media/platform/sti/delta/delta-v4l2.c struct delta_ctx *ctx = vb2_get_drv_priv(q); q 1509 drivers/media/platform/sti/delta/delta-v4l2.c struct vb2_queue *q = vb->vb2_queue; q 1510 drivers/media/platform/sti/delta/delta-v4l2.c struct delta_ctx *ctx = vb2_get_drv_priv(q); q 1528 drivers/media/platform/sti/delta/delta-v4l2.c static void delta_vb2_frame_stop_streaming(struct vb2_queue *q) q 1530 drivers/media/platform/sti/delta/delta-v4l2.c struct delta_ctx *ctx = vb2_get_drv_priv(q); q 1586 drivers/media/platform/sti/delta/delta-v4l2.c struct vb2_queue *q; q 1592 drivers/media/platform/sti/delta/delta-v4l2.c q = src_vq; q 1593 drivers/media/platform/sti/delta/delta-v4l2.c q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; q 1594 drivers/media/platform/sti/delta/delta-v4l2.c q->io_modes = VB2_MMAP | VB2_DMABUF; q 1595 drivers/media/platform/sti/delta/delta-v4l2.c q->drv_priv = ctx; q 1597 drivers/media/platform/sti/delta/delta-v4l2.c q->buf_struct_size = sizeof(struct delta_au); q 1598 drivers/media/platform/sti/delta/delta-v4l2.c q->ops = &delta_vb2_au_ops; q 1599 drivers/media/platform/sti/delta/delta-v4l2.c q->mem_ops = &vb2_dma_contig_memops; q 1600 drivers/media/platform/sti/delta/delta-v4l2.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; q 1601 drivers/media/platform/sti/delta/delta-v4l2.c q->lock = &delta->lock; q 1602 drivers/media/platform/sti/delta/delta-v4l2.c q->dev = delta->dev; q 1604 drivers/media/platform/sti/delta/delta-v4l2.c ret = vb2_queue_init(q); q 1609 drivers/media/platform/sti/delta/delta-v4l2.c q = dst_vq; q 1610 drivers/media/platform/sti/delta/delta-v4l2.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q 1611 drivers/media/platform/sti/delta/delta-v4l2.c q->io_modes = VB2_MMAP | VB2_DMABUF; q 1612 drivers/media/platform/sti/delta/delta-v4l2.c q->drv_priv = ctx; q 1614 drivers/media/platform/sti/delta/delta-v4l2.c q->buf_struct_size = sizeof(struct delta_frame) q 1616 drivers/media/platform/sti/delta/delta-v4l2.c q->ops = &delta_vb2_frame_ops; q 1617 drivers/media/platform/sti/delta/delta-v4l2.c q->mem_ops = &vb2_dma_contig_memops; q 1618 drivers/media/platform/sti/delta/delta-v4l2.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY; q 1619 drivers/media/platform/sti/delta/delta-v4l2.c q->lock = &delta->lock; q 1620 drivers/media/platform/sti/delta/delta-v4l2.c q->dev = delta->dev; q 1622 drivers/media/platform/sti/delta/delta-v4l2.c return vb2_queue_init(q); q 1009 drivers/media/platform/sti/hva/hva-v4l2.c if (!vb2_start_streaming_called(&ctx->fh.m2m_ctx->cap_q_ctx.q)) q 1012 drivers/media/platform/sti/hva/hva-v4l2.c if (!vb2_start_streaming_called(&ctx->fh.m2m_ctx->out_q_ctx.q)) q 1089 drivers/media/platform/sti/hva/hva-v4l2.c vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q)) || q 1091 drivers/media/platform/sti/hva/hva-v4l2.c vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q))) { q 1094 drivers/media/platform/sti/hva/hva-v4l2.c vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q), q 1095 drivers/media/platform/sti/hva/hva-v4l2.c vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q)); q 1838 drivers/media/platform/stm32/stm32-dcmi.c struct vb2_queue *q; q 1930 drivers/media/platform/stm32/stm32-dcmi.c q = &dcmi->queue; q 1984 drivers/media/platform/stm32/stm32-dcmi.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q 1985 drivers/media/platform/stm32/stm32-dcmi.c q->io_modes = VB2_MMAP | VB2_READ | VB2_DMABUF; q 1986 drivers/media/platform/stm32/stm32-dcmi.c q->lock = &dcmi->lock; q 1987 drivers/media/platform/stm32/stm32-dcmi.c q->drv_priv = dcmi; q 1988 drivers/media/platform/stm32/stm32-dcmi.c q->buf_struct_size = sizeof(struct dcmi_buf); q 1989 drivers/media/platform/stm32/stm32-dcmi.c q->ops = &dcmi_video_qops; q 1990 drivers/media/platform/stm32/stm32-dcmi.c q->mem_ops = &vb2_dma_contig_memops; q 1991 drivers/media/platform/stm32/stm32-dcmi.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1992 drivers/media/platform/stm32/stm32-dcmi.c q->min_buffers_needed = 2; q 1993 drivers/media/platform/stm32/stm32-dcmi.c q->dev = &pdev->dev; q 1995 drivers/media/platform/stm32/stm32-dcmi.c ret = vb2_queue_init(q); q 403 drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c struct vb2_queue *q = &csi->queue; q 414 drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c q->min_buffers_needed = 3; q 415 drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; q 416 drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c q->io_modes = VB2_MMAP; q 417 drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c q->lock = &csi->lock; q 418 drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c q->drv_priv = csi; q 419 drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c q->buf_struct_size = sizeof(struct sun4i_csi_buffer); q 420 drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c q->ops = &sun4i_csi_qops; q 421 drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c q->mem_ops = &vb2_dma_contig_memops; q 422 drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 423 drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c q->dev = csi->dev; q 425 drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c ret = vb2_queue_init(q); q 450 drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c vb2_queue_release(q); q 1073 drivers/media/platform/ti-vpe/cal.c struct vb2_queue *q = &ctx->vb_vidq; q 1078 drivers/media/platform/ti-vpe/cal.c if (vb2_is_busy(q)) { q 1523 drivers/media/platform/ti-vpe/cal.c struct vb2_queue *q; q 1534 drivers/media/platform/ti-vpe/cal.c q = &ctx->vb_vidq; q 1535 drivers/media/platform/ti-vpe/cal.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q 1536 drivers/media/platform/ti-vpe/cal.c q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ; q 1537 drivers/media/platform/ti-vpe/cal.c q->drv_priv = ctx; q 1538 drivers/media/platform/ti-vpe/cal.c q->buf_struct_size = sizeof(struct cal_buffer); q 1539 drivers/media/platform/ti-vpe/cal.c q->ops = &cal_video_qops; q 1540 drivers/media/platform/ti-vpe/cal.c q->mem_ops = &vb2_dma_contig_memops; q 1541 drivers/media/platform/ti-vpe/cal.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1542 drivers/media/platform/ti-vpe/cal.c q->lock = &ctx->mutex; q 1543 drivers/media/platform/ti-vpe/cal.c q->min_buffers_needed = 3; q 1544 drivers/media/platform/ti-vpe/cal.c q->dev = ctx->v4l2_dev.dev; q 1546 drivers/media/platform/ti-vpe/cal.c ret = vb2_queue_init(q); q 1556 drivers/media/platform/ti-vpe/cal.c vfd->queue = q; q 2096 drivers/media/platform/ti-vpe/vpe.c static void vpe_return_all_buffers(struct vpe_ctx *ctx, struct vb2_queue *q, q 2103 drivers/media/platform/ti-vpe/vpe.c if (V4L2_TYPE_IS_OUTPUT(q->type)) q 2119 drivers/media/platform/ti-vpe/vpe.c if (V4L2_TYPE_IS_OUTPUT(q->type)) { q 2149 drivers/media/platform/ti-vpe/vpe.c static int vpe_start_streaming(struct vb2_queue *q, unsigned int count) q 2151 drivers/media/platform/ti-vpe/vpe.c struct vpe_ctx *ctx = vb2_get_drv_priv(q); q 2158 drivers/media/platform/ti-vpe/vpe.c vpe_return_all_buffers(ctx, q, VB2_BUF_STATE_QUEUED); q 2171 drivers/media/platform/ti-vpe/vpe.c static void vpe_stop_streaming(struct vb2_queue *q) q 2173 drivers/media/platform/ti-vpe/vpe.c struct vpe_ctx *ctx = vb2_get_drv_priv(q); q 2178 drivers/media/platform/ti-vpe/vpe.c vpe_return_all_buffers(ctx, q, VB2_BUF_STATE_ERROR); q 729 drivers/media/platform/vicodec/vicodec-core.c !vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q)) q 1267 drivers/media/platform/vicodec/vicodec-core.c if (!vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q) || q 1268 drivers/media/platform/vicodec/vicodec-core.c !vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q)) q 1279 drivers/media/platform/vicodec/vicodec-core.c vb2_clear_last_buffer_dequeued(&ctx->fh.m2m_ctx->cap_q_ctx.q); q 1295 drivers/media/platform/vicodec/vicodec-core.c if (!vb2_is_streaming(&ctx->fh.m2m_ctx->cap_q_ctx.q) || q 1296 drivers/media/platform/vicodec/vicodec-core.c !vb2_is_streaming(&ctx->fh.m2m_ctx->out_q_ctx.q)) q 1307 drivers/media/platform/vicodec/vicodec-core.c vb2_clear_last_buffer_dequeued(&ctx->fh.m2m_ctx->cap_q_ctx.q); q 1558 drivers/media/platform/vicodec/vicodec-core.c static void vicodec_return_bufs(struct vb2_queue *q, u32 state) q 1560 drivers/media/platform/vicodec/vicodec-core.c struct vicodec_ctx *ctx = vb2_get_drv_priv(q); q 1564 drivers/media/platform/vicodec/vicodec-core.c if (V4L2_TYPE_IS_OUTPUT(q->type)) q 1597 drivers/media/platform/vicodec/vicodec-core.c static int vicodec_start_streaming(struct vb2_queue *q, q 1600 drivers/media/platform/vicodec/vicodec-core.c struct vicodec_ctx *ctx = vb2_get_drv_priv(q); q 1601 drivers/media/platform/vicodec/vicodec-core.c struct vicodec_q_data *q_data = get_q_data(ctx, q->type); q 1612 drivers/media/platform/vicodec/vicodec-core.c if (V4L2_TYPE_IS_OUTPUT(q->type)) q 1617 drivers/media/platform/vicodec/vicodec-core.c if ((V4L2_TYPE_IS_OUTPUT(q->type) && !ctx->is_enc) || q 1618 drivers/media/platform/vicodec/vicodec-core.c (!V4L2_TYPE_IS_OUTPUT(q->type) && ctx->is_enc)) q 1623 drivers/media/platform/vicodec/vicodec-core.c vicodec_return_bufs(q, VB2_BUF_STATE_QUEUED); q 1649 drivers/media/platform/vicodec/vicodec-core.c vicodec_return_bufs(q, VB2_BUF_STATE_QUEUED); q 1686 drivers/media/platform/vicodec/vicodec-core.c static void vicodec_stop_streaming(struct vb2_queue *q) q 1688 drivers/media/platform/vicodec/vicodec-core.c struct vicodec_ctx *ctx = vb2_get_drv_priv(q); q 1690 drivers/media/platform/vicodec/vicodec-core.c vicodec_return_bufs(q, VB2_BUF_STATE_ERROR); q 1692 drivers/media/platform/vicodec/vicodec-core.c if (V4L2_TYPE_IS_OUTPUT(q->type)) { q 1715 drivers/media/platform/vicodec/vicodec-core.c if (!ctx->is_enc && V4L2_TYPE_IS_OUTPUT(q->type)) q 1718 drivers/media/platform/vicodec/vicodec-core.c if ((!V4L2_TYPE_IS_OUTPUT(q->type) && !ctx->is_enc) || q 1719 drivers/media/platform/vicodec/vicodec-core.c (V4L2_TYPE_IS_OUTPUT(q->type) && ctx->is_enc)) { q 1727 drivers/media/platform/vicodec/vicodec-core.c if (V4L2_TYPE_IS_OUTPUT(q->type) && !ctx->is_enc) { q 1068 drivers/media/platform/vim2m.c static int vim2m_start_streaming(struct vb2_queue *q, unsigned int count) q 1070 drivers/media/platform/vim2m.c struct vim2m_ctx *ctx = vb2_get_drv_priv(q); q 1071 drivers/media/platform/vim2m.c struct vim2m_q_data *q_data = get_q_data(ctx, q->type); q 1076 drivers/media/platform/vim2m.c if (V4L2_TYPE_IS_OUTPUT(q->type)) q 1083 drivers/media/platform/vim2m.c static void vim2m_stop_streaming(struct vb2_queue *q) q 1085 drivers/media/platform/vim2m.c struct vim2m_ctx *ctx = vb2_get_drv_priv(q); q 1092 drivers/media/platform/vim2m.c if (V4L2_TYPE_IS_OUTPUT(q->type)) q 402 drivers/media/platform/vimc/vimc-capture.c struct vb2_queue *q; q 430 drivers/media/platform/vimc/vimc-capture.c q = &vcap->queue; q 431 drivers/media/platform/vimc/vimc-capture.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q 432 drivers/media/platform/vimc/vimc-capture.c q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_USERPTR; q 433 drivers/media/platform/vimc/vimc-capture.c q->drv_priv = vcap; q 434 drivers/media/platform/vimc/vimc-capture.c q->buf_struct_size = sizeof(struct vimc_cap_buffer); q 435 drivers/media/platform/vimc/vimc-capture.c q->ops = &vimc_cap_qops; q 436 drivers/media/platform/vimc/vimc-capture.c q->mem_ops = &vb2_vmalloc_memops; q 437 drivers/media/platform/vimc/vimc-capture.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 438 drivers/media/platform/vimc/vimc-capture.c q->min_buffers_needed = 2; q 439 drivers/media/platform/vimc/vimc-capture.c q->lock = &vcap->lock; q 441 drivers/media/platform/vimc/vimc-capture.c ret = vb2_queue_init(q); q 474 drivers/media/platform/vimc/vimc-capture.c vdev->queue = q; q 491 drivers/media/platform/vimc/vimc-capture.c vb2_queue_release(q); q 667 drivers/media/platform/vivid/vivid-core.c struct vb2_queue *q; q 1108 drivers/media/platform/vivid/vivid-core.c q = &dev->vb_vid_cap_q; q 1109 drivers/media/platform/vivid/vivid-core.c q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : q 1111 drivers/media/platform/vivid/vivid-core.c q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ; q 1113 drivers/media/platform/vivid/vivid-core.c q->io_modes |= VB2_USERPTR; q 1114 drivers/media/platform/vivid/vivid-core.c q->drv_priv = dev; q 1115 drivers/media/platform/vivid/vivid-core.c q->buf_struct_size = sizeof(struct vivid_buffer); q 1116 drivers/media/platform/vivid/vivid-core.c q->ops = &vivid_vid_cap_qops; q 1117 drivers/media/platform/vivid/vivid-core.c q->mem_ops = vivid_mem_ops[allocator]; q 1118 drivers/media/platform/vivid/vivid-core.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1119 drivers/media/platform/vivid/vivid-core.c q->min_buffers_needed = 2; q 1120 drivers/media/platform/vivid/vivid-core.c q->lock = &dev->mutex; q 1121 drivers/media/platform/vivid/vivid-core.c q->dev = dev->v4l2_dev.dev; q 1122 drivers/media/platform/vivid/vivid-core.c q->supports_requests = true; q 1124 drivers/media/platform/vivid/vivid-core.c ret = vb2_queue_init(q); q 1133 drivers/media/platform/vivid/vivid-core.c q = &dev->vb_vid_out_q; q 1134 drivers/media/platform/vivid/vivid-core.c q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE : q 1136 drivers/media/platform/vivid/vivid-core.c q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_WRITE; q 1138 drivers/media/platform/vivid/vivid-core.c q->io_modes |= VB2_USERPTR; q 1139 drivers/media/platform/vivid/vivid-core.c q->drv_priv = dev; q 1140 drivers/media/platform/vivid/vivid-core.c q->buf_struct_size = sizeof(struct vivid_buffer); q 1141 drivers/media/platform/vivid/vivid-core.c q->ops = &vivid_vid_out_qops; q 1142 drivers/media/platform/vivid/vivid-core.c q->mem_ops = vivid_mem_ops[allocator]; q 1143 drivers/media/platform/vivid/vivid-core.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1144 drivers/media/platform/vivid/vivid-core.c q->min_buffers_needed = 2; q 1145 drivers/media/platform/vivid/vivid-core.c q->lock = &dev->mutex; q 1146 drivers/media/platform/vivid/vivid-core.c q->dev = dev->v4l2_dev.dev; q 1147 drivers/media/platform/vivid/vivid-core.c q->supports_requests = true; q 1149 drivers/media/platform/vivid/vivid-core.c ret = vb2_queue_init(q); q 1156 drivers/media/platform/vivid/vivid-core.c q = &dev->vb_vbi_cap_q; q 1157 drivers/media/platform/vivid/vivid-core.c q->type = dev->has_raw_vbi_cap ? V4L2_BUF_TYPE_VBI_CAPTURE : q 1159 drivers/media/platform/vivid/vivid-core.c q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ; q 1161 drivers/media/platform/vivid/vivid-core.c q->io_modes |= VB2_USERPTR; q 1162 drivers/media/platform/vivid/vivid-core.c q->drv_priv = dev; q 1163 drivers/media/platform/vivid/vivid-core.c q->buf_struct_size = sizeof(struct vivid_buffer); q 1164 drivers/media/platform/vivid/vivid-core.c q->ops = &vivid_vbi_cap_qops; q 1165 drivers/media/platform/vivid/vivid-core.c q->mem_ops = vivid_mem_ops[allocator]; q 1166 drivers/media/platform/vivid/vivid-core.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1167 drivers/media/platform/vivid/vivid-core.c q->min_buffers_needed = 2; q 1168 drivers/media/platform/vivid/vivid-core.c q->lock = &dev->mutex; q 1169 drivers/media/platform/vivid/vivid-core.c q->dev = dev->v4l2_dev.dev; q 1170 drivers/media/platform/vivid/vivid-core.c q->supports_requests = true; q 1172 drivers/media/platform/vivid/vivid-core.c ret = vb2_queue_init(q); q 1179 drivers/media/platform/vivid/vivid-core.c q = &dev->vb_vbi_out_q; q 1180 drivers/media/platform/vivid/vivid-core.c q->type = dev->has_raw_vbi_out ? V4L2_BUF_TYPE_VBI_OUTPUT : q 1182 drivers/media/platform/vivid/vivid-core.c q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_WRITE; q 1184 drivers/media/platform/vivid/vivid-core.c q->io_modes |= VB2_USERPTR; q 1185 drivers/media/platform/vivid/vivid-core.c q->drv_priv = dev; q 1186 drivers/media/platform/vivid/vivid-core.c q->buf_struct_size = sizeof(struct vivid_buffer); q 1187 drivers/media/platform/vivid/vivid-core.c q->ops = &vivid_vbi_out_qops; q 1188 drivers/media/platform/vivid/vivid-core.c q->mem_ops = vivid_mem_ops[allocator]; q 1189 drivers/media/platform/vivid/vivid-core.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1190 drivers/media/platform/vivid/vivid-core.c q->min_buffers_needed = 2; q 1191 drivers/media/platform/vivid/vivid-core.c q->lock = &dev->mutex; q 1192 drivers/media/platform/vivid/vivid-core.c q->dev = dev->v4l2_dev.dev; q 1193 drivers/media/platform/vivid/vivid-core.c q->supports_requests = true; q 1195 drivers/media/platform/vivid/vivid-core.c ret = vb2_queue_init(q); q 1202 drivers/media/platform/vivid/vivid-core.c q = &dev->vb_sdr_cap_q; q 1203 drivers/media/platform/vivid/vivid-core.c q->type = V4L2_BUF_TYPE_SDR_CAPTURE; q 1204 drivers/media/platform/vivid/vivid-core.c q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ; q 1206 drivers/media/platform/vivid/vivid-core.c q->io_modes |= VB2_USERPTR; q 1207 drivers/media/platform/vivid/vivid-core.c q->drv_priv = dev; q 1208 drivers/media/platform/vivid/vivid-core.c q->buf_struct_size = sizeof(struct vivid_buffer); q 1209 drivers/media/platform/vivid/vivid-core.c q->ops = &vivid_sdr_cap_qops; q 1210 drivers/media/platform/vivid/vivid-core.c q->mem_ops = vivid_mem_ops[allocator]; q 1211 drivers/media/platform/vivid/vivid-core.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1212 drivers/media/platform/vivid/vivid-core.c q->min_buffers_needed = 8; q 1213 drivers/media/platform/vivid/vivid-core.c q->lock = &dev->mutex; q 1214 drivers/media/platform/vivid/vivid-core.c q->dev = dev->v4l2_dev.dev; q 1215 drivers/media/platform/vivid/vivid-core.c q->supports_requests = true; q 1217 drivers/media/platform/vivid/vivid-core.c ret = vb2_queue_init(q); q 463 drivers/media/platform/vivid/vivid-sdr-cap.c struct vb2_queue *q = &dev->vb_sdr_cap_q; q 466 drivers/media/platform/vivid/vivid-sdr-cap.c if (vb2_is_busy(q)) q 654 drivers/media/platform/vivid/vivid-vid-cap.c struct vb2_queue *q = &dev->vb_vid_cap_q; q 663 drivers/media/platform/vivid/vivid-vid-cap.c if (vb2_is_busy(q)) { q 454 drivers/media/platform/vivid/vivid-vid-out.c struct vb2_queue *q = &dev->vb_vid_out_q; q 462 drivers/media/platform/vivid/vivid-vid-out.c if (vb2_is_busy(q) && q 477 drivers/media/platform/vivid/vivid-vid-out.c if (vb2_is_busy(q)) q 153 drivers/media/radio/radio-gemtek.c int i, bit, q, mute; q 160 drivers/media/radio/radio-gemtek.c for (i = 0, q = gt->bu2614data; i < 32; i++, q >>= 1) { q 161 drivers/media/radio/radio-gemtek.c bit = (q & 1) ? GEMTEK_DA : 0; q 257 drivers/media/radio/radio-gemtek.c int i, q; q 259 drivers/media/radio/radio-gemtek.c q = inb_p(io); /* Read bus contents before probing. */ q 269 drivers/media/radio/radio-gemtek.c outb_p(q >> 5, io); /* Write bus contents back. */ q 153 drivers/media/tuners/max2165.c u32 q, f = 0; q 159 drivers/media/tuners/max2165.c q = dividend / divisor; q 160 drivers/media/tuners/max2165.c remainder = dividend - q * divisor; q 171 drivers/media/tuners/max2165.c *quotient = q; q 644 drivers/media/usb/airspy/airspy.c struct vb2_queue *q = &s->vb_queue; q 647 drivers/media/usb/airspy/airspy.c if (vb2_is_busy(q)) q 291 drivers/media/usb/au0828/au0828-video.c struct vb2_queue *q = vb->vb2_buf.vb2_queue; q 296 drivers/media/usb/au0828/au0828-video.c if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) q 1807 drivers/media/usb/au0828/au0828-video.c struct vb2_queue *q; q 1810 drivers/media/usb/au0828/au0828-video.c q = &dev->vb_vidq; q 1811 drivers/media/usb/au0828/au0828-video.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q 1812 drivers/media/usb/au0828/au0828-video.c q->io_modes = VB2_READ | VB2_MMAP | VB2_USERPTR | VB2_DMABUF; q 1813 drivers/media/usb/au0828/au0828-video.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1814 drivers/media/usb/au0828/au0828-video.c q->drv_priv = dev; q 1815 drivers/media/usb/au0828/au0828-video.c q->buf_struct_size = sizeof(struct au0828_buffer); q 1816 drivers/media/usb/au0828/au0828-video.c q->ops = &au0828_video_qops; q 1817 drivers/media/usb/au0828/au0828-video.c q->mem_ops = &vb2_vmalloc_memops; q 1819 drivers/media/usb/au0828/au0828-video.c rc = vb2_queue_init(q); q 1824 drivers/media/usb/au0828/au0828-video.c q = &dev->vb_vbiq; q 1825 drivers/media/usb/au0828/au0828-video.c q->type = V4L2_BUF_TYPE_VBI_CAPTURE; q 1826 drivers/media/usb/au0828/au0828-video.c q->io_modes = VB2_READ | VB2_MMAP | VB2_USERPTR | VB2_DMABUF; q 1827 drivers/media/usb/au0828/au0828-video.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1828 drivers/media/usb/au0828/au0828-video.c q->drv_priv = dev; q 1829 drivers/media/usb/au0828/au0828-video.c q->buf_struct_size = sizeof(struct au0828_buffer); q 1830 drivers/media/usb/au0828/au0828-video.c q->ops = &au0828_vbi_qops; q 1831 drivers/media/usb/au0828/au0828-video.c q->mem_ops = &vb2_vmalloc_memops; q 1833 drivers/media/usb/au0828/au0828-video.c rc = vb2_queue_init(q); q 1230 drivers/media/usb/cx231xx/cx231xx-417.c static int bb_buf_setup(struct videobuf_queue *q, q 1233 drivers/media/usb/cx231xx/cx231xx-417.c struct cx231xx_fh *fh = q->priv_data; q 1397 drivers/media/usb/cx231xx/cx231xx-417.c static int bb_buf_prepare(struct videobuf_queue *q, q 1400 drivers/media/usb/cx231xx/cx231xx-417.c struct cx231xx_fh *fh = q->priv_data; q 1415 drivers/media/usb/cx231xx/cx231xx-417.c rc = videobuf_iolock(q, &buf->vb, NULL); q 1456 drivers/media/usb/cx231xx/cx231xx-417.c free_buffer(q, buf); q 1460 drivers/media/usb/cx231xx/cx231xx-417.c static void bb_buf_queue(struct videobuf_queue *q, q 1463 drivers/media/usb/cx231xx/cx231xx-417.c struct cx231xx_fh *fh = q->priv_data; q 1475 drivers/media/usb/cx231xx/cx231xx-417.c static void bb_buf_release(struct videobuf_queue *q, q 1483 drivers/media/usb/cx231xx/cx231xx-417.c free_buffer(q, buf); q 39 drivers/media/usb/dvb-usb/cxusb-analog.c static int cxusb_medion_v_queue_setup(struct vb2_queue *q, q 45 drivers/media/usb/dvb-usb/cxusb-analog.c struct dvb_usb_device *dvbdev = vb2_get_drv_priv(q); q 772 drivers/media/usb/dvb-usb/cxusb-analog.c static int cxusb_medion_v_start_streaming(struct vb2_queue *q, q 775 drivers/media/usb/dvb-usb/cxusb-analog.c struct dvb_usb_device *dvbdev = vb2_get_drv_priv(q); q 898 drivers/media/usb/dvb-usb/cxusb-analog.c static void cxusb_medion_v_stop_streaming(struct vb2_queue *q) q 900 drivers/media/usb/dvb-usb/cxusb-analog.c struct dvb_usb_device *dvbdev = vb2_get_drv_priv(q); q 1249 drivers/media/usb/em28xx/em28xx-video.c struct vb2_queue *q; q 1253 drivers/media/usb/em28xx/em28xx-video.c q = &v4l2->vb_vidq; q 1254 drivers/media/usb/em28xx/em28xx-video.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q 1255 drivers/media/usb/em28xx/em28xx-video.c q->io_modes = VB2_READ | VB2_MMAP | VB2_USERPTR | VB2_DMABUF; q 1256 drivers/media/usb/em28xx/em28xx-video.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1257 drivers/media/usb/em28xx/em28xx-video.c q->drv_priv = dev; q 1258 drivers/media/usb/em28xx/em28xx-video.c q->buf_struct_size = sizeof(struct em28xx_buffer); q 1259 drivers/media/usb/em28xx/em28xx-video.c q->ops = &em28xx_video_qops; q 1260 drivers/media/usb/em28xx/em28xx-video.c q->mem_ops = &vb2_vmalloc_memops; q 1262 drivers/media/usb/em28xx/em28xx-video.c rc = vb2_queue_init(q); q 1267 drivers/media/usb/em28xx/em28xx-video.c q = &v4l2->vb_vbiq; q 1268 drivers/media/usb/em28xx/em28xx-video.c q->type = V4L2_BUF_TYPE_VBI_CAPTURE; q 1269 drivers/media/usb/em28xx/em28xx-video.c q->io_modes = VB2_READ | VB2_MMAP | VB2_USERPTR; q 1270 drivers/media/usb/em28xx/em28xx-video.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1271 drivers/media/usb/em28xx/em28xx-video.c q->drv_priv = dev; q 1272 drivers/media/usb/em28xx/em28xx-video.c q->buf_struct_size = sizeof(struct em28xx_buffer); q 1273 drivers/media/usb/em28xx/em28xx-video.c q->ops = &em28xx_vbi_qops; q 1274 drivers/media/usb/em28xx/em28xx-video.c q->mem_ops = &vb2_vmalloc_memops; q 1276 drivers/media/usb/em28xx/em28xx-video.c rc = vb2_queue_init(q); q 290 drivers/media/usb/go7007/go7007-fw.c static int mjpeg_frame_header(struct go7007 *go, unsigned char *buf, int q) q 304 drivers/media/usb/go7007/go7007-fw.c buf[p++] = (default_intra_quant_table[zz[i]] * q) >> 3; q 930 drivers/media/usb/go7007/go7007-fw.c int q = 0; q 937 drivers/media/usb/go7007/go7007-fw.c q > 0 ? sgop_expt_addr * q : q 939 drivers/media/usb/go7007/go7007-fw.c q > 0 ? sgop_expt_addr * q : q 941 drivers/media/usb/go7007/go7007-fw.c q > 0 ? sgop_expt_addr * q : q 943 drivers/media/usb/go7007/go7007-fw.c q > 0 ? sgop_expt_addr * q : q 946 drivers/media/usb/go7007/go7007-fw.c u32 calc_q = q > 0 ? q : cplx[0] / sgop_expt_addr; q 343 drivers/media/usb/go7007/go7007-v4l2.c static int go7007_queue_setup(struct vb2_queue *q, q 397 drivers/media/usb/go7007/go7007-v4l2.c static int go7007_start_streaming(struct vb2_queue *q, unsigned int count) q 399 drivers/media/usb/go7007/go7007-v4l2.c struct go7007 *go = vb2_get_drv_priv(q); q 407 drivers/media/usb/go7007/go7007-v4l2.c q->streaming = 1; q 414 drivers/media/usb/go7007/go7007-v4l2.c q->streaming = 0; q 428 drivers/media/usb/go7007/go7007-v4l2.c static void go7007_stop_streaming(struct vb2_queue *q) q 430 drivers/media/usb/go7007/go7007-v4l2.c struct go7007 *go = vb2_get_drv_priv(q); q 433 drivers/media/usb/go7007/go7007-v4l2.c q->streaming = 0; q 1450 drivers/media/usb/gspca/gspca.c struct vb2_queue *q; q 1510 drivers/media/usb/gspca/gspca.c q = &gspca_dev->queue; q 1511 drivers/media/usb/gspca/gspca.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q 1512 drivers/media/usb/gspca/gspca.c q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ; q 1513 drivers/media/usb/gspca/gspca.c q->drv_priv = gspca_dev; q 1514 drivers/media/usb/gspca/gspca.c q->buf_struct_size = sizeof(struct gspca_buffer); q 1515 drivers/media/usb/gspca/gspca.c q->ops = &gspca_qops; q 1516 drivers/media/usb/gspca/gspca.c q->mem_ops = &vb2_vmalloc_memops; q 1517 drivers/media/usb/gspca/gspca.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1518 drivers/media/usb/gspca/gspca.c q->min_buffers_needed = 2; q 1519 drivers/media/usb/gspca/gspca.c q->lock = &gspca_dev->usb_lock; q 1520 drivers/media/usb/gspca/gspca.c ret = vb2_queue_init(q); q 1523 drivers/media/usb/gspca/gspca.c gspca_dev->vdev.queue = q; q 1439 drivers/media/usb/gspca/topro.c static void set_dqt(struct gspca_dev *gspca_dev, u8 q) q 1444 drivers/media/usb/gspca/topro.c gspca_dbg(gspca_dev, D_STREAM, "q %d -> %d\n", sd->quality, q); q 1445 drivers/media/usb/gspca/topro.c sd->quality = q; q 1446 drivers/media/usb/gspca/topro.c if (q > 16) q 1447 drivers/media/usb/gspca/topro.c q = 16; q 1449 drivers/media/usb/gspca/topro.c jpeg_set_qual(sd->jpeg_hdr, jpeg_q[q]); q 1452 drivers/media/usb/gspca/topro.c DQT[q], sizeof DQT[0]); q 1456 drivers/media/usb/gspca/topro.c static void setquality(struct gspca_dev *gspca_dev, s32 q) q 1460 drivers/media/usb/gspca/topro.c if (q != 16) q 1461 drivers/media/usb/gspca/topro.c q = 15 - q; q 1465 drivers/media/usb/gspca/topro.c reg_w(gspca_dev, TP6800_R79_QUALITY, q); q 1468 drivers/media/usb/gspca/topro.c if (q == 15 && sd->bridge == BRIDGE_TP6810) { q 918 drivers/media/usb/hackrf/hackrf.c struct vb2_queue *q; q 925 drivers/media/usb/hackrf/hackrf.c q = &dev->rx_vb2_queue; q 927 drivers/media/usb/hackrf/hackrf.c q = &dev->tx_vb2_queue; q 929 drivers/media/usb/hackrf/hackrf.c if (vb2_is_busy(q)) q 97 drivers/media/usb/hdpvr/hdpvr-video.c static int hdpvr_free_queue(struct list_head *q) q 104 drivers/media/usb/hdpvr/hdpvr-video.c for (p = q->next; p != q;) { q 924 drivers/media/usb/msi2500/msi2500.c struct vb2_queue *q = &dev->vb_queue; q 930 drivers/media/usb/msi2500/msi2500.c if (vb2_is_busy(q)) q 815 drivers/media/usb/s2255/s2255drv.c struct vb2_queue *q = &vc->vb_vidq; q 829 drivers/media/usb/s2255/s2255drv.c if (vb2_is_busy(q)) { q 1100 drivers/media/usb/s2255/s2255drv.c struct vb2_queue *q = &vc->vb_vidq; q 1106 drivers/media/usb/s2255/s2255drv.c if (vb2_is_busy(q)) q 1592 drivers/media/usb/s2255/s2255drv.c struct vb2_queue *q; q 1626 drivers/media/usb/s2255/s2255drv.c q = &vc->vb_vidq; q 1627 drivers/media/usb/s2255/s2255drv.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q 1628 drivers/media/usb/s2255/s2255drv.c q->io_modes = VB2_MMAP | VB2_READ | VB2_USERPTR; q 1629 drivers/media/usb/s2255/s2255drv.c q->drv_priv = vc; q 1630 drivers/media/usb/s2255/s2255drv.c q->lock = &vc->vb_lock; q 1631 drivers/media/usb/s2255/s2255drv.c q->buf_struct_size = sizeof(struct s2255_buffer); q 1632 drivers/media/usb/s2255/s2255drv.c q->mem_ops = &vb2_vmalloc_memops; q 1633 drivers/media/usb/s2255/s2255drv.c q->ops = &s2255_video_qops; q 1634 drivers/media/usb/s2255/s2255drv.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1635 drivers/media/usb/s2255/s2255drv.c ret = vb2_queue_init(q); q 1643 drivers/media/usb/s2255/s2255drv.c vc->vdev.queue = q; q 483 drivers/media/usb/stk1160/stk1160-v4l.c struct vb2_queue *q = &dev->vb_vidq; q 487 drivers/media/usb/stk1160/stk1160-v4l.c if (vb2_is_busy(q)) q 518 drivers/media/usb/stk1160/stk1160-v4l.c struct vb2_queue *q = &dev->vb_vidq; q 523 drivers/media/usb/stk1160/stk1160-v4l.c if (vb2_is_busy(q)) q 779 drivers/media/usb/stk1160/stk1160-v4l.c struct vb2_queue *q; q 781 drivers/media/usb/stk1160/stk1160-v4l.c q = &dev->vb_vidq; q 782 drivers/media/usb/stk1160/stk1160-v4l.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q 783 drivers/media/usb/stk1160/stk1160-v4l.c q->io_modes = VB2_READ | VB2_MMAP | VB2_USERPTR | VB2_DMABUF; q 784 drivers/media/usb/stk1160/stk1160-v4l.c q->drv_priv = dev; q 785 drivers/media/usb/stk1160/stk1160-v4l.c q->buf_struct_size = sizeof(struct stk1160_buffer); q 786 drivers/media/usb/stk1160/stk1160-v4l.c q->ops = &stk1160_video_qops; q 787 drivers/media/usb/stk1160/stk1160-v4l.c q->mem_ops = &vb2_vmalloc_memops; q 788 drivers/media/usb/stk1160/stk1160-v4l.c q->lock = &dev->vb_queue_lock; q 789 drivers/media/usb/stk1160/stk1160-v4l.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 791 drivers/media/usb/stk1160/stk1160-v4l.c rc = vb2_queue_init(q); q 2417 drivers/media/usb/uvc/uvc_driver.c #define UVC_INFO_QUIRK(q) (kernel_ulong_t)&(struct uvc_device_info){.quirks = q} q 817 drivers/media/usb/zr364xx/zr364xx.c struct videobuf_queue *q = &cam->vb_vidq; q 825 drivers/media/usb/zr364xx/zr364xx.c mutex_lock(&q->vb_lock); q 899 drivers/media/usb/zr364xx/zr364xx.c mutex_unlock(&q->vb_lock); q 1273 drivers/media/usb/zr364xx/zr364xx.c struct videobuf_queue *q = &cam->vb_vidq; q 1278 drivers/media/usb/zr364xx/zr364xx.c return res | videobuf_poll_stream(file, q, wait); q 302 drivers/media/v4l2-core/v4l2-mc.c int v4l_vb2q_enable_media_source(struct vb2_queue *q) q 304 drivers/media/v4l2-core/v4l2-mc.c struct v4l2_fh *fh = q->owner; q 126 drivers/media/v4l2-core/v4l2-mem2mem.c return &q_ctx->q; q 291 drivers/media/v4l2-core/v4l2-mem2mem.c if (!m2m_ctx->out_q_ctx.q.streaming q 292 drivers/media/v4l2-core/v4l2-mem2mem.c || !m2m_ctx->cap_q_ctx.q.streaming) { q 578 drivers/media/v4l2-core/v4l2-mem2mem.c ret = vb2_streamoff(&q_ctx->q, type); q 926 drivers/media/v4l2-core/v4l2-mem2mem.c ret = queue_init(drv_priv, &out_q_ctx->q, &cap_q_ctx->q); q 934 drivers/media/v4l2-core/v4l2-mem2mem.c if (WARN_ON(out_q_ctx->q.lock != cap_q_ctx->q.lock)) { q 938 drivers/media/v4l2-core/v4l2-mem2mem.c m2m_ctx->q_lock = out_q_ctx->q.lock; q 952 drivers/media/v4l2-core/v4l2-mem2mem.c vb2_queue_release(&m2m_ctx->cap_q_ctx.q); q 953 drivers/media/v4l2-core/v4l2-mem2mem.c vb2_queue_release(&m2m_ctx->out_q_ctx.q); q 1025 drivers/media/v4l2-core/v4l2-mem2mem.c out_q_ctx.q); q 49 drivers/media/v4l2-core/videobuf-core.c #define CALL(q, f, arg...) \ q 50 drivers/media/v4l2-core/videobuf-core.c ((q->int_ops->f) ? q->int_ops->f(arg) : 0) q 51 drivers/media/v4l2-core/videobuf-core.c #define CALLPTR(q, f, arg...) \ q 52 drivers/media/v4l2-core/videobuf-core.c ((q->int_ops->f) ? q->int_ops->f(arg) : NULL) q 54 drivers/media/v4l2-core/videobuf-core.c struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q) q 58 drivers/media/v4l2-core/videobuf-core.c BUG_ON(q->msize < sizeof(*vb)); q 60 drivers/media/v4l2-core/videobuf-core.c if (!q->int_ops || !q->int_ops->alloc_vb) { q 65 drivers/media/v4l2-core/videobuf-core.c vb = q->int_ops->alloc_vb(q->msize); q 75 drivers/media/v4l2-core/videobuf-core.c static int state_neither_active_nor_queued(struct videobuf_queue *q, q 81 drivers/media/v4l2-core/videobuf-core.c spin_lock_irqsave(q->irqlock, flags); q 83 drivers/media/v4l2-core/videobuf-core.c spin_unlock_irqrestore(q->irqlock, flags); q 87 drivers/media/v4l2-core/videobuf-core.c int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb, q 96 drivers/media/v4l2-core/videobuf-core.c if (state_neither_active_nor_queued(q, vb)) q 101 drivers/media/v4l2-core/videobuf-core.c is_ext_locked = q->ext_lock && mutex_is_locked(q->ext_lock); q 106 drivers/media/v4l2-core/videobuf-core.c mutex_unlock(q->ext_lock); q 109 drivers/media/v4l2-core/videobuf-core.c state_neither_active_nor_queued(q, vb)); q 111 drivers/media/v4l2-core/videobuf-core.c wait_event(vb->done, state_neither_active_nor_queued(q, vb)); q 114 drivers/media/v4l2-core/videobuf-core.c mutex_lock(q->ext_lock); q 120 drivers/media/v4l2-core/videobuf-core.c int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb, q 124 drivers/media/v4l2-core/videobuf-core.c MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); q 126 drivers/media/v4l2-core/videobuf-core.c return CALL(q, iolock, q, vb, fbuf); q 130 drivers/media/v4l2-core/videobuf-core.c void *videobuf_queue_to_vaddr(struct videobuf_queue *q, q 133 drivers/media/v4l2-core/videobuf-core.c if (q->int_ops->vaddr) q 134 drivers/media/v4l2-core/videobuf-core.c return q->int_ops->vaddr(buf); q 142 drivers/media/v4l2-core/videobuf-core.c void videobuf_queue_core_init(struct videobuf_queue *q, q 153 drivers/media/v4l2-core/videobuf-core.c BUG_ON(!q); q 154 drivers/media/v4l2-core/videobuf-core.c memset(q, 0, sizeof(*q)); q 155 drivers/media/v4l2-core/videobuf-core.c q->irqlock = irqlock; q 156 drivers/media/v4l2-core/videobuf-core.c q->ext_lock = ext_lock; q 157 drivers/media/v4l2-core/videobuf-core.c q->dev = dev; q 158 drivers/media/v4l2-core/videobuf-core.c q->type = type; q 159 drivers/media/v4l2-core/videobuf-core.c q->field = field; q 160 drivers/media/v4l2-core/videobuf-core.c q->msize = msize; q 161 drivers/media/v4l2-core/videobuf-core.c q->ops = ops; q 162 drivers/media/v4l2-core/videobuf-core.c q->priv_data = priv; q 163 drivers/media/v4l2-core/videobuf-core.c q->int_ops = int_ops; q 166 drivers/media/v4l2-core/videobuf-core.c BUG_ON(!q->ops->buf_setup); q 167 drivers/media/v4l2-core/videobuf-core.c BUG_ON(!q->ops->buf_prepare); q 168 drivers/media/v4l2-core/videobuf-core.c BUG_ON(!q->ops->buf_queue); q 169 drivers/media/v4l2-core/videobuf-core.c BUG_ON(!q->ops->buf_release); q 175 drivers/media/v4l2-core/videobuf-core.c BUG_ON(!q->int_ops); q 177 drivers/media/v4l2-core/videobuf-core.c mutex_init(&q->vb_lock); q 178 drivers/media/v4l2-core/videobuf-core.c init_waitqueue_head(&q->wait); q 179 drivers/media/v4l2-core/videobuf-core.c INIT_LIST_HEAD(&q->stream); q 184 drivers/media/v4l2-core/videobuf-core.c int videobuf_queue_is_busy(struct videobuf_queue *q) q 188 drivers/media/v4l2-core/videobuf-core.c MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); q 190 drivers/media/v4l2-core/videobuf-core.c if (q->streaming) { q 194 drivers/media/v4l2-core/videobuf-core.c if (q->reading) { q 198 drivers/media/v4l2-core/videobuf-core.c if (q->read_buf) { q 203 drivers/media/v4l2-core/videobuf-core.c if (NULL == q->bufs[i]) q 205 drivers/media/v4l2-core/videobuf-core.c if (q->bufs[i]->map) { q 209 drivers/media/v4l2-core/videobuf-core.c if (q->bufs[i]->state == VIDEOBUF_QUEUED) { q 213 drivers/media/v4l2-core/videobuf-core.c if (q->bufs[i]->state == VIDEOBUF_ACTIVE) { q 229 drivers/media/v4l2-core/videobuf-core.c static int __videobuf_free(struct videobuf_queue *q) q 234 drivers/media/v4l2-core/videobuf-core.c if (!q) q 237 drivers/media/v4l2-core/videobuf-core.c if (q->streaming || q->reading) { q 242 drivers/media/v4l2-core/videobuf-core.c MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); q 245 drivers/media/v4l2-core/videobuf-core.c if (q->bufs[i] && q->bufs[i]->map) { q 251 drivers/media/v4l2-core/videobuf-core.c if (NULL == q->bufs[i]) q 253 drivers/media/v4l2-core/videobuf-core.c q->ops->buf_release(q, q->bufs[i]); q 254 drivers/media/v4l2-core/videobuf-core.c kfree(q->bufs[i]); q 255 drivers/media/v4l2-core/videobuf-core.c q->bufs[i] = NULL; q 262 drivers/media/v4l2-core/videobuf-core.c void videobuf_queue_cancel(struct videobuf_queue *q) q 267 drivers/media/v4l2-core/videobuf-core.c q->streaming = 0; q 268 drivers/media/v4l2-core/videobuf-core.c q->reading = 0; q 269 drivers/media/v4l2-core/videobuf-core.c wake_up_interruptible_sync(&q->wait); q 272 drivers/media/v4l2-core/videobuf-core.c spin_lock_irqsave(q->irqlock, flags); q 274 drivers/media/v4l2-core/videobuf-core.c if (NULL == q->bufs[i]) q 276 drivers/media/v4l2-core/videobuf-core.c if (q->bufs[i]->state == VIDEOBUF_QUEUED) { q 277 drivers/media/v4l2-core/videobuf-core.c list_del(&q->bufs[i]->queue); q 278 drivers/media/v4l2-core/videobuf-core.c q->bufs[i]->state = VIDEOBUF_ERROR; q 279 drivers/media/v4l2-core/videobuf-core.c wake_up_all(&q->bufs[i]->done); q 282 drivers/media/v4l2-core/videobuf-core.c spin_unlock_irqrestore(q->irqlock, flags); q 286 drivers/media/v4l2-core/videobuf-core.c if (NULL == q->bufs[i]) q 288 drivers/media/v4l2-core/videobuf-core.c q->ops->buf_release(q, q->bufs[i]); q 290 drivers/media/v4l2-core/videobuf-core.c INIT_LIST_HEAD(&q->stream); q 297 drivers/media/v4l2-core/videobuf-core.c enum v4l2_field videobuf_next_field(struct videobuf_queue *q) q 299 drivers/media/v4l2-core/videobuf-core.c enum v4l2_field field = q->field; q 304 drivers/media/v4l2-core/videobuf-core.c if (V4L2_FIELD_TOP == q->last) { q 306 drivers/media/v4l2-core/videobuf-core.c q->last = V4L2_FIELD_BOTTOM; q 309 drivers/media/v4l2-core/videobuf-core.c q->last = V4L2_FIELD_TOP; q 317 drivers/media/v4l2-core/videobuf-core.c static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b, q 321 drivers/media/v4l2-core/videobuf-core.c MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); q 372 drivers/media/v4l2-core/videobuf-core.c int videobuf_mmap_free(struct videobuf_queue *q) q 375 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_lock(q); q 376 drivers/media/v4l2-core/videobuf-core.c ret = __videobuf_free(q); q 377 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_unlock(q); q 383 drivers/media/v4l2-core/videobuf-core.c int __videobuf_mmap_setup(struct videobuf_queue *q, q 390 drivers/media/v4l2-core/videobuf-core.c MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); q 392 drivers/media/v4l2-core/videobuf-core.c err = __videobuf_free(q); q 398 drivers/media/v4l2-core/videobuf-core.c q->bufs[i] = videobuf_alloc_vb(q); q 400 drivers/media/v4l2-core/videobuf-core.c if (NULL == q->bufs[i]) q 403 drivers/media/v4l2-core/videobuf-core.c q->bufs[i]->i = i; q 404 drivers/media/v4l2-core/videobuf-core.c q->bufs[i]->memory = memory; q 405 drivers/media/v4l2-core/videobuf-core.c q->bufs[i]->bsize = bsize; q 408 drivers/media/v4l2-core/videobuf-core.c q->bufs[i]->boff = PAGE_ALIGN(bsize) * i; q 427 drivers/media/v4l2-core/videobuf-core.c int videobuf_mmap_setup(struct videobuf_queue *q, q 432 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_lock(q); q 433 drivers/media/v4l2-core/videobuf-core.c ret = __videobuf_mmap_setup(q, bcount, bsize, memory); q 434 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_unlock(q); q 439 drivers/media/v4l2-core/videobuf-core.c int videobuf_reqbufs(struct videobuf_queue *q, q 452 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_lock(q); q 453 drivers/media/v4l2-core/videobuf-core.c if (req->type != q->type) { q 459 drivers/media/v4l2-core/videobuf-core.c if (q->streaming) { q 464 drivers/media/v4l2-core/videobuf-core.c if (!list_empty(&q->stream)) { q 472 drivers/media/v4l2-core/videobuf-core.c retval = __videobuf_free(q); q 480 drivers/media/v4l2-core/videobuf-core.c q->ops->buf_setup(q, &count, &size); q 485 drivers/media/v4l2-core/videobuf-core.c retval = __videobuf_mmap_setup(q, count, size, req->memory); q 495 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_unlock(q); q 500 drivers/media/v4l2-core/videobuf-core.c int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b) q 504 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_lock(q); q 505 drivers/media/v4l2-core/videobuf-core.c if (unlikely(b->type != q->type)) { q 513 drivers/media/v4l2-core/videobuf-core.c if (unlikely(NULL == q->bufs[b->index])) { q 518 drivers/media/v4l2-core/videobuf-core.c videobuf_status(q, b, q->bufs[b->index], q->type); q 522 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_unlock(q); q 527 drivers/media/v4l2-core/videobuf-core.c int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b) q 534 drivers/media/v4l2-core/videobuf-core.c MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); q 539 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_lock(q); q 541 drivers/media/v4l2-core/videobuf-core.c if (q->reading) { q 546 drivers/media/v4l2-core/videobuf-core.c if (b->type != q->type) { q 554 drivers/media/v4l2-core/videobuf-core.c buf = q->bufs[b->index]; q 575 drivers/media/v4l2-core/videobuf-core.c if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT q 576 drivers/media/v4l2-core/videobuf-core.c || q->type == V4L2_BUF_TYPE_VBI_OUTPUT q 577 drivers/media/v4l2-core/videobuf-core.c || q->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT q 578 drivers/media/v4l2-core/videobuf-core.c || q->type == V4L2_BUF_TYPE_SDR_OUTPUT) { q 591 drivers/media/v4l2-core/videobuf-core.c q->ops->buf_release(q, buf); q 603 drivers/media/v4l2-core/videobuf-core.c field = videobuf_next_field(q); q 604 drivers/media/v4l2-core/videobuf-core.c retval = q->ops->buf_prepare(q, buf, field); q 610 drivers/media/v4l2-core/videobuf-core.c list_add_tail(&buf->stream, &q->stream); q 611 drivers/media/v4l2-core/videobuf-core.c if (q->streaming) { q 612 drivers/media/v4l2-core/videobuf-core.c spin_lock_irqsave(q->irqlock, flags); q 613 drivers/media/v4l2-core/videobuf-core.c q->ops->buf_queue(q, buf); q 614 drivers/media/v4l2-core/videobuf-core.c spin_unlock_irqrestore(q->irqlock, flags); q 618 drivers/media/v4l2-core/videobuf-core.c wake_up_interruptible_sync(&q->wait); q 621 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_unlock(q); q 631 drivers/media/v4l2-core/videobuf-core.c static int stream_next_buffer_check_queue(struct videobuf_queue *q, int noblock) q 636 drivers/media/v4l2-core/videobuf-core.c if (!q->streaming) { q 642 drivers/media/v4l2-core/videobuf-core.c if (list_empty(&q->stream)) { q 651 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_unlock(q); q 656 drivers/media/v4l2-core/videobuf-core.c retval = wait_event_interruptible(q->wait, q 657 drivers/media/v4l2-core/videobuf-core.c !list_empty(&q->stream) || !q->streaming); q 658 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_lock(q); q 674 drivers/media/v4l2-core/videobuf-core.c static int stream_next_buffer(struct videobuf_queue *q, q 680 drivers/media/v4l2-core/videobuf-core.c retval = stream_next_buffer_check_queue(q, nonblocking); q 684 drivers/media/v4l2-core/videobuf-core.c buf = list_entry(q->stream.next, struct videobuf_buffer, stream); q 685 drivers/media/v4l2-core/videobuf-core.c retval = videobuf_waiton(q, buf, nonblocking, 1); q 694 drivers/media/v4l2-core/videobuf-core.c int videobuf_dqbuf(struct videobuf_queue *q, q 700 drivers/media/v4l2-core/videobuf-core.c MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); q 703 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_lock(q); q 705 drivers/media/v4l2-core/videobuf-core.c retval = stream_next_buffer(q, &buf, nonblocking); q 723 drivers/media/v4l2-core/videobuf-core.c CALL(q, sync, q, buf); q 724 drivers/media/v4l2-core/videobuf-core.c videobuf_status(q, b, buf, q->type); q 729 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_unlock(q); q 734 drivers/media/v4l2-core/videobuf-core.c int videobuf_streamon(struct videobuf_queue *q) q 740 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_lock(q); q 742 drivers/media/v4l2-core/videobuf-core.c if (q->reading) q 745 drivers/media/v4l2-core/videobuf-core.c if (q->streaming) q 747 drivers/media/v4l2-core/videobuf-core.c q->streaming = 1; q 748 drivers/media/v4l2-core/videobuf-core.c spin_lock_irqsave(q->irqlock, flags); q 749 drivers/media/v4l2-core/videobuf-core.c list_for_each_entry(buf, &q->stream, stream) q 751 drivers/media/v4l2-core/videobuf-core.c q->ops->buf_queue(q, buf); q 752 drivers/media/v4l2-core/videobuf-core.c spin_unlock_irqrestore(q->irqlock, flags); q 754 drivers/media/v4l2-core/videobuf-core.c wake_up_interruptible_sync(&q->wait); q 756 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_unlock(q); q 762 drivers/media/v4l2-core/videobuf-core.c static int __videobuf_streamoff(struct videobuf_queue *q) q 764 drivers/media/v4l2-core/videobuf-core.c if (!q->streaming) q 767 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_cancel(q); q 772 drivers/media/v4l2-core/videobuf-core.c int videobuf_streamoff(struct videobuf_queue *q) q 776 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_lock(q); q 777 drivers/media/v4l2-core/videobuf-core.c retval = __videobuf_streamoff(q); q 778 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_unlock(q); q 785 drivers/media/v4l2-core/videobuf-core.c static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q, q 793 drivers/media/v4l2-core/videobuf-core.c MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); q 796 drivers/media/v4l2-core/videobuf-core.c q->read_buf = videobuf_alloc_vb(q); q 797 drivers/media/v4l2-core/videobuf-core.c if (NULL == q->read_buf) q 800 drivers/media/v4l2-core/videobuf-core.c q->read_buf->memory = V4L2_MEMORY_USERPTR; q 801 drivers/media/v4l2-core/videobuf-core.c q->read_buf->baddr = (unsigned long)data; q 802 drivers/media/v4l2-core/videobuf-core.c q->read_buf->bsize = count; q 804 drivers/media/v4l2-core/videobuf-core.c field = videobuf_next_field(q); q 805 drivers/media/v4l2-core/videobuf-core.c retval = q->ops->buf_prepare(q, q->read_buf, field); q 810 drivers/media/v4l2-core/videobuf-core.c spin_lock_irqsave(q->irqlock, flags); q 811 drivers/media/v4l2-core/videobuf-core.c q->ops->buf_queue(q, q->read_buf); q 812 drivers/media/v4l2-core/videobuf-core.c spin_unlock_irqrestore(q->irqlock, flags); q 813 drivers/media/v4l2-core/videobuf-core.c retval = videobuf_waiton(q, q->read_buf, 0, 0); q 815 drivers/media/v4l2-core/videobuf-core.c CALL(q, sync, q, q->read_buf); q 816 drivers/media/v4l2-core/videobuf-core.c if (VIDEOBUF_ERROR == q->read_buf->state) q 819 drivers/media/v4l2-core/videobuf-core.c retval = q->read_buf->size; q 824 drivers/media/v4l2-core/videobuf-core.c q->ops->buf_release(q, q->read_buf); q 825 drivers/media/v4l2-core/videobuf-core.c kfree(q->read_buf); q 826 drivers/media/v4l2-core/videobuf-core.c q->read_buf = NULL; q 830 drivers/media/v4l2-core/videobuf-core.c static int __videobuf_copy_to_user(struct videobuf_queue *q, q 835 drivers/media/v4l2-core/videobuf-core.c void *vaddr = CALLPTR(q, vaddr, buf); q 838 drivers/media/v4l2-core/videobuf-core.c if (count > buf->size - q->read_off) q 839 drivers/media/v4l2-core/videobuf-core.c count = buf->size - q->read_off; q 841 drivers/media/v4l2-core/videobuf-core.c if (copy_to_user(data, vaddr + q->read_off, count)) q 847 drivers/media/v4l2-core/videobuf-core.c static int __videobuf_copy_stream(struct videobuf_queue *q, q 852 drivers/media/v4l2-core/videobuf-core.c unsigned int *fc = CALLPTR(q, vaddr, buf); q 865 drivers/media/v4l2-core/videobuf-core.c count = __videobuf_copy_to_user(q, buf, data, count, nonblocking); q 873 drivers/media/v4l2-core/videobuf-core.c ssize_t videobuf_read_one(struct videobuf_queue *q, q 882 drivers/media/v4l2-core/videobuf-core.c MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); q 884 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_lock(q); q 886 drivers/media/v4l2-core/videobuf-core.c q->ops->buf_setup(q, &nbufs, &size); q 888 drivers/media/v4l2-core/videobuf-core.c if (NULL == q->read_buf && q 891 drivers/media/v4l2-core/videobuf-core.c retval = videobuf_read_zerocopy(q, data, count, ppos); q 898 drivers/media/v4l2-core/videobuf-core.c if (NULL == q->read_buf) { q 901 drivers/media/v4l2-core/videobuf-core.c q->read_buf = videobuf_alloc_vb(q); q 903 drivers/media/v4l2-core/videobuf-core.c dprintk(1, "video alloc=0x%p\n", q->read_buf); q 904 drivers/media/v4l2-core/videobuf-core.c if (NULL == q->read_buf) q 906 drivers/media/v4l2-core/videobuf-core.c q->read_buf->memory = V4L2_MEMORY_USERPTR; q 907 drivers/media/v4l2-core/videobuf-core.c q->read_buf->bsize = count; /* preferred size */ q 908 drivers/media/v4l2-core/videobuf-core.c field = videobuf_next_field(q); q 909 drivers/media/v4l2-core/videobuf-core.c retval = q->ops->buf_prepare(q, q->read_buf, field); q 912 drivers/media/v4l2-core/videobuf-core.c kfree(q->read_buf); q 913 drivers/media/v4l2-core/videobuf-core.c q->read_buf = NULL; q 917 drivers/media/v4l2-core/videobuf-core.c spin_lock_irqsave(q->irqlock, flags); q 918 drivers/media/v4l2-core/videobuf-core.c q->ops->buf_queue(q, q->read_buf); q 919 drivers/media/v4l2-core/videobuf-core.c spin_unlock_irqrestore(q->irqlock, flags); q 921 drivers/media/v4l2-core/videobuf-core.c q->read_off = 0; q 925 drivers/media/v4l2-core/videobuf-core.c retval = videobuf_waiton(q, q->read_buf, nonblocking, 1); q 929 drivers/media/v4l2-core/videobuf-core.c CALL(q, sync, q, q->read_buf); q 931 drivers/media/v4l2-core/videobuf-core.c if (VIDEOBUF_ERROR == q->read_buf->state) { q 933 drivers/media/v4l2-core/videobuf-core.c q->ops->buf_release(q, q->read_buf); q 934 drivers/media/v4l2-core/videobuf-core.c kfree(q->read_buf); q 935 drivers/media/v4l2-core/videobuf-core.c q->read_buf = NULL; q 941 drivers/media/v4l2-core/videobuf-core.c retval = __videobuf_copy_to_user(q, q->read_buf, data, count, nonblocking); q 945 drivers/media/v4l2-core/videobuf-core.c q->read_off += retval; q 946 drivers/media/v4l2-core/videobuf-core.c if (q->read_off == q->read_buf->size) { q 948 drivers/media/v4l2-core/videobuf-core.c q->ops->buf_release(q, q->read_buf); q 949 drivers/media/v4l2-core/videobuf-core.c kfree(q->read_buf); q 950 drivers/media/v4l2-core/videobuf-core.c q->read_buf = NULL; q 954 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_unlock(q); q 960 drivers/media/v4l2-core/videobuf-core.c static int __videobuf_read_start(struct videobuf_queue *q) q 967 drivers/media/v4l2-core/videobuf-core.c q->ops->buf_setup(q, &count, &size); q 974 drivers/media/v4l2-core/videobuf-core.c err = __videobuf_mmap_setup(q, count, size, V4L2_MEMORY_USERPTR); q 981 drivers/media/v4l2-core/videobuf-core.c field = videobuf_next_field(q); q 982 drivers/media/v4l2-core/videobuf-core.c err = q->ops->buf_prepare(q, q->bufs[i], field); q 985 drivers/media/v4l2-core/videobuf-core.c list_add_tail(&q->bufs[i]->stream, &q->stream); q 987 drivers/media/v4l2-core/videobuf-core.c spin_lock_irqsave(q->irqlock, flags); q 989 drivers/media/v4l2-core/videobuf-core.c q->ops->buf_queue(q, q->bufs[i]); q 990 drivers/media/v4l2-core/videobuf-core.c spin_unlock_irqrestore(q->irqlock, flags); q 991 drivers/media/v4l2-core/videobuf-core.c q->reading = 1; q 995 drivers/media/v4l2-core/videobuf-core.c static void __videobuf_read_stop(struct videobuf_queue *q) q 999 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_cancel(q); q 1000 drivers/media/v4l2-core/videobuf-core.c __videobuf_free(q); q 1001 drivers/media/v4l2-core/videobuf-core.c INIT_LIST_HEAD(&q->stream); q 1003 drivers/media/v4l2-core/videobuf-core.c if (NULL == q->bufs[i]) q 1005 drivers/media/v4l2-core/videobuf-core.c kfree(q->bufs[i]); q 1006 drivers/media/v4l2-core/videobuf-core.c q->bufs[i] = NULL; q 1008 drivers/media/v4l2-core/videobuf-core.c q->read_buf = NULL; q 1011 drivers/media/v4l2-core/videobuf-core.c int videobuf_read_start(struct videobuf_queue *q) q 1015 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_lock(q); q 1016 drivers/media/v4l2-core/videobuf-core.c rc = __videobuf_read_start(q); q 1017 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_unlock(q); q 1023 drivers/media/v4l2-core/videobuf-core.c void videobuf_read_stop(struct videobuf_queue *q) q 1025 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_lock(q); q 1026 drivers/media/v4l2-core/videobuf-core.c __videobuf_read_stop(q); q 1027 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_unlock(q); q 1031 drivers/media/v4l2-core/videobuf-core.c void videobuf_stop(struct videobuf_queue *q) q 1033 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_lock(q); q 1035 drivers/media/v4l2-core/videobuf-core.c if (q->streaming) q 1036 drivers/media/v4l2-core/videobuf-core.c __videobuf_streamoff(q); q 1038 drivers/media/v4l2-core/videobuf-core.c if (q->reading) q 1039 drivers/media/v4l2-core/videobuf-core.c __videobuf_read_stop(q); q 1041 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_unlock(q); q 1045 drivers/media/v4l2-core/videobuf-core.c ssize_t videobuf_read_stream(struct videobuf_queue *q, q 1052 drivers/media/v4l2-core/videobuf-core.c MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); q 1055 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_lock(q); q 1057 drivers/media/v4l2-core/videobuf-core.c if (q->streaming) q 1059 drivers/media/v4l2-core/videobuf-core.c if (!q->reading) { q 1060 drivers/media/v4l2-core/videobuf-core.c retval = __videobuf_read_start(q); q 1068 drivers/media/v4l2-core/videobuf-core.c if (NULL == q->read_buf) { q 1069 drivers/media/v4l2-core/videobuf-core.c q->read_buf = list_entry(q->stream.next, q 1072 drivers/media/v4l2-core/videobuf-core.c list_del(&q->read_buf->stream); q 1073 drivers/media/v4l2-core/videobuf-core.c q->read_off = 0; q 1075 drivers/media/v4l2-core/videobuf-core.c rc = videobuf_waiton(q, q->read_buf, nonblocking, 1); q 1082 drivers/media/v4l2-core/videobuf-core.c if (q->read_buf->state == VIDEOBUF_DONE) { q 1083 drivers/media/v4l2-core/videobuf-core.c rc = __videobuf_copy_stream(q, q->read_buf, data + retval, count, q 1091 drivers/media/v4l2-core/videobuf-core.c q->read_off += rc; q 1094 drivers/media/v4l2-core/videobuf-core.c q->read_off = q->read_buf->size; q 1100 drivers/media/v4l2-core/videobuf-core.c if (q->read_off == q->read_buf->size) { q 1101 drivers/media/v4l2-core/videobuf-core.c list_add_tail(&q->read_buf->stream, q 1102 drivers/media/v4l2-core/videobuf-core.c &q->stream); q 1103 drivers/media/v4l2-core/videobuf-core.c spin_lock_irqsave(q->irqlock, flags); q 1104 drivers/media/v4l2-core/videobuf-core.c q->ops->buf_queue(q, q->read_buf); q 1105 drivers/media/v4l2-core/videobuf-core.c spin_unlock_irqrestore(q->irqlock, flags); q 1106 drivers/media/v4l2-core/videobuf-core.c q->read_buf = NULL; q 1113 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_unlock(q); q 1119 drivers/media/v4l2-core/videobuf-core.c struct videobuf_queue *q, q 1126 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_lock(q); q 1127 drivers/media/v4l2-core/videobuf-core.c if (q->streaming) { q 1128 drivers/media/v4l2-core/videobuf-core.c if (!list_empty(&q->stream)) q 1129 drivers/media/v4l2-core/videobuf-core.c buf = list_entry(q->stream.next, q 1132 drivers/media/v4l2-core/videobuf-core.c if (!q->reading) q 1133 drivers/media/v4l2-core/videobuf-core.c __videobuf_read_start(q); q 1134 drivers/media/v4l2-core/videobuf-core.c if (!q->reading) { q 1136 drivers/media/v4l2-core/videobuf-core.c } else if (NULL == q->read_buf) { q 1137 drivers/media/v4l2-core/videobuf-core.c q->read_buf = list_entry(q->stream.next, q 1140 drivers/media/v4l2-core/videobuf-core.c list_del(&q->read_buf->stream); q 1141 drivers/media/v4l2-core/videobuf-core.c q->read_off = 0; q 1143 drivers/media/v4l2-core/videobuf-core.c buf = q->read_buf; q 1153 drivers/media/v4l2-core/videobuf-core.c switch (q->type) { q 1166 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_unlock(q); q 1171 drivers/media/v4l2-core/videobuf-core.c int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma) q 1176 drivers/media/v4l2-core/videobuf-core.c MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS); q 1183 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_lock(q); q 1185 drivers/media/v4l2-core/videobuf-core.c struct videobuf_buffer *buf = q->bufs[i]; q 1189 drivers/media/v4l2-core/videobuf-core.c rc = CALL(q, mmap_mapper, q, buf, vma); q 1193 drivers/media/v4l2-core/videobuf-core.c videobuf_queue_unlock(q); q 67 drivers/media/v4l2-core/videobuf-dma-contig.c dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", q 76 drivers/media/v4l2-core/videobuf-dma-contig.c struct videobuf_queue *q = map->q; q 79 drivers/media/v4l2-core/videobuf-dma-contig.c dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", q 86 drivers/media/v4l2-core/videobuf-dma-contig.c dev_dbg(q->dev, "munmap %p q=%p\n", map, q); q 87 drivers/media/v4l2-core/videobuf-dma-contig.c videobuf_queue_lock(q); q 90 drivers/media/v4l2-core/videobuf-dma-contig.c if (q->streaming) q 91 drivers/media/v4l2-core/videobuf-dma-contig.c videobuf_queue_cancel(q); q 94 drivers/media/v4l2-core/videobuf-dma-contig.c if (NULL == q->bufs[i]) q 97 drivers/media/v4l2-core/videobuf-dma-contig.c if (q->bufs[i]->map != map) q 100 drivers/media/v4l2-core/videobuf-dma-contig.c mem = q->bufs[i]->priv; q 113 drivers/media/v4l2-core/videobuf-dma-contig.c dev_dbg(q->dev, "buf[%d] freeing %p\n", q 116 drivers/media/v4l2-core/videobuf-dma-contig.c __videobuf_dc_free(q->dev, mem); q 120 drivers/media/v4l2-core/videobuf-dma-contig.c q->bufs[i]->map = NULL; q 121 drivers/media/v4l2-core/videobuf-dma-contig.c q->bufs[i]->baddr = 0; q 126 drivers/media/v4l2-core/videobuf-dma-contig.c videobuf_queue_unlock(q); q 234 drivers/media/v4l2-core/videobuf-dma-contig.c static int __videobuf_iolock(struct videobuf_queue *q, q 245 drivers/media/v4l2-core/videobuf-dma-contig.c dev_dbg(q->dev, "%s memory method MMAP\n", __func__); q 249 drivers/media/v4l2-core/videobuf-dma-contig.c dev_err(q->dev, "memory is not allocated/mmapped.\n"); q 254 drivers/media/v4l2-core/videobuf-dma-contig.c dev_dbg(q->dev, "%s memory method USERPTR\n", __func__); q 261 drivers/media/v4l2-core/videobuf-dma-contig.c if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size), q 267 drivers/media/v4l2-core/videobuf-dma-contig.c dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n", __func__); q 274 drivers/media/v4l2-core/videobuf-dma-contig.c static int __videobuf_mmap_mapper(struct videobuf_queue *q, q 282 drivers/media/v4l2-core/videobuf-dma-contig.c dev_dbg(q->dev, "%s\n", __func__); q 290 drivers/media/v4l2-core/videobuf-dma-contig.c map->q = q; q 298 drivers/media/v4l2-core/videobuf-dma-contig.c if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize), q 315 drivers/media/v4l2-core/videobuf-dma-contig.c dev_err(q->dev, "mmap: remap failed with error %d. ", q 317 drivers/media/v4l2-core/videobuf-dma-contig.c dma_free_coherent(q->dev, mem->size, q 326 drivers/media/v4l2-core/videobuf-dma-contig.c dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n", q 327 drivers/media/v4l2-core/videobuf-dma-contig.c map, q, vma->vm_start, vma->vm_end, q 347 drivers/media/v4l2-core/videobuf-dma-contig.c void videobuf_queue_dma_contig_init(struct videobuf_queue *q, q 357 drivers/media/v4l2-core/videobuf-dma-contig.c videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize, q 373 drivers/media/v4l2-core/videobuf-dma-contig.c void videobuf_dma_contig_free(struct videobuf_queue *q, q 400 drivers/media/v4l2-core/videobuf-dma-contig.c __videobuf_dc_free(q->dev, mem); q 400 drivers/media/v4l2-core/videobuf-dma-sg.c struct videobuf_queue *q = map->q; q 409 drivers/media/v4l2-core/videobuf-dma-sg.c dprintk(1, "munmap %p q=%p\n", map, q); q 410 drivers/media/v4l2-core/videobuf-dma-sg.c videobuf_queue_lock(q); q 412 drivers/media/v4l2-core/videobuf-dma-sg.c if (NULL == q->bufs[i]) q 414 drivers/media/v4l2-core/videobuf-dma-sg.c mem = q->bufs[i]->priv; q 420 drivers/media/v4l2-core/videobuf-dma-sg.c if (q->bufs[i]->map != map) q 422 drivers/media/v4l2-core/videobuf-dma-sg.c q->bufs[i]->map = NULL; q 423 drivers/media/v4l2-core/videobuf-dma-sg.c q->bufs[i]->baddr = 0; q 424 drivers/media/v4l2-core/videobuf-dma-sg.c q->ops->buf_release(q, q->bufs[i]); q 426 drivers/media/v4l2-core/videobuf-dma-sg.c videobuf_queue_unlock(q); q 502 drivers/media/v4l2-core/videobuf-dma-sg.c static int __videobuf_iolock(struct videobuf_queue *q, q 514 drivers/media/v4l2-core/videobuf-dma-sg.c mem->dma.dev = q->dev; q 516 drivers/media/v4l2-core/videobuf-dma-sg.c WARN_ON(mem->dma.dev != q->dev); q 568 drivers/media/v4l2-core/videobuf-dma-sg.c err = videobuf_dma_map(q->dev, &mem->dma); q 575 drivers/media/v4l2-core/videobuf-dma-sg.c static int __videobuf_sync(struct videobuf_queue *q, q 584 drivers/media/v4l2-core/videobuf-dma-sg.c dma_sync_sg_for_cpu(q->dev, mem->dma.sglist, q 590 drivers/media/v4l2-core/videobuf-dma-sg.c static int __videobuf_mmap_mapper(struct videobuf_queue *q, q 606 drivers/media/v4l2-core/videobuf-dma-sg.c if (buf == q->bufs[first]) { q 607 drivers/media/v4l2-core/videobuf-dma-sg.c size = PAGE_ALIGN(q->bufs[first]->bsize); q 629 drivers/media/v4l2-core/videobuf-dma-sg.c if (NULL == q->bufs[i]) q 631 drivers/media/v4l2-core/videobuf-dma-sg.c q->bufs[i]->map = map; q 632 drivers/media/v4l2-core/videobuf-dma-sg.c q->bufs[i]->baddr = vma->vm_start + size; q 633 drivers/media/v4l2-core/videobuf-dma-sg.c size += PAGE_ALIGN(q->bufs[i]->bsize); q 637 drivers/media/v4l2-core/videobuf-dma-sg.c map->q = q; q 643 drivers/media/v4l2-core/videobuf-dma-sg.c map, q, vma->vm_start, vma->vm_end, vma->vm_pgoff, first, last); q 662 drivers/media/v4l2-core/videobuf-dma-sg.c struct videobuf_queue q; q 665 drivers/media/v4l2-core/videobuf-dma-sg.c q.int_ops = &sg_ops; q 667 drivers/media/v4l2-core/videobuf-dma-sg.c q.msize = size; q 669 drivers/media/v4l2-core/videobuf-dma-sg.c return videobuf_alloc_vb(&q); q 673 drivers/media/v4l2-core/videobuf-dma-sg.c void videobuf_queue_sg_init(struct videobuf_queue *q, q 683 drivers/media/v4l2-core/videobuf-dma-sg.c videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize, q 64 drivers/media/v4l2-core/videobuf-vmalloc.c struct videobuf_queue *q = map->q; q 74 drivers/media/v4l2-core/videobuf-vmalloc.c dprintk(1, "munmap %p q=%p\n", map, q); q 75 drivers/media/v4l2-core/videobuf-vmalloc.c videobuf_queue_lock(q); q 78 drivers/media/v4l2-core/videobuf-vmalloc.c if (q->streaming) q 79 drivers/media/v4l2-core/videobuf-vmalloc.c videobuf_queue_cancel(q); q 82 drivers/media/v4l2-core/videobuf-vmalloc.c if (NULL == q->bufs[i]) q 85 drivers/media/v4l2-core/videobuf-vmalloc.c if (q->bufs[i]->map != map) q 88 drivers/media/v4l2-core/videobuf-vmalloc.c mem = q->bufs[i]->priv; q 108 drivers/media/v4l2-core/videobuf-vmalloc.c q->bufs[i]->map = NULL; q 109 drivers/media/v4l2-core/videobuf-vmalloc.c q->bufs[i]->baddr = 0; q 114 drivers/media/v4l2-core/videobuf-vmalloc.c videobuf_queue_unlock(q); q 154 drivers/media/v4l2-core/videobuf-vmalloc.c static int __videobuf_iolock(struct videobuf_queue *q, q 209 drivers/media/v4l2-core/videobuf-vmalloc.c static int __videobuf_mmap_mapper(struct videobuf_queue *q, q 225 drivers/media/v4l2-core/videobuf-vmalloc.c map->q = q; q 254 drivers/media/v4l2-core/videobuf-vmalloc.c map, q, vma->vm_start, vma->vm_end, q 277 drivers/media/v4l2-core/videobuf-vmalloc.c void videobuf_queue_vmalloc_init(struct videobuf_queue *q, q 287 drivers/media/v4l2-core/videobuf-vmalloc.c videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize, q 702 drivers/memstick/core/mspro_block.c msb->seg_count = blk_rq_map_sg(msb->block_req->q, q 848 drivers/misc/habanalabs/goya/goya.c struct hl_hw_queue *q; q 854 drivers/misc/habanalabs/goya/goya.c q = &hdev->kernel_queues[0]; q 856 drivers/misc/habanalabs/goya/goya.c for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++, q++) { q 857 drivers/misc/habanalabs/goya/goya.c goya_init_dma_qman(hdev, i, q->bus_address); q 1445 drivers/misc/habanalabs/habanalabs.h int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id); q 1446 drivers/misc/habanalabs/habanalabs.h void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q); q 1447 drivers/misc/habanalabs/habanalabs.h int hl_eq_init(struct hl_device *hdev, struct hl_eq *q); q 1448 drivers/misc/habanalabs/habanalabs.h void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q); q 1449 drivers/misc/habanalabs/habanalabs.h void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q); q 1450 drivers/misc/habanalabs/habanalabs.h void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q); q 27 drivers/misc/habanalabs/hw_queue.c static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len) q 29 drivers/misc/habanalabs/hw_queue.c int delta = (q->pi - q->ci); q 40 drivers/misc/habanalabs/hw_queue.c struct hl_hw_queue *q; q 48 drivers/misc/habanalabs/hw_queue.c q = &hdev->kernel_queues[0]; q 49 drivers/misc/habanalabs/hw_queue.c for (i = 0 ; i < HL_MAX_QUEUES ; i++, q++) { q 50 drivers/misc/habanalabs/hw_queue.c if (q->queue_type == QUEUE_TYPE_INT) { q 51 drivers/misc/habanalabs/hw_queue.c q->ci += cs->jobs_in_queue_cnt[i]; q 52 drivers/misc/habanalabs/hw_queue.c q->ci &= ((q->int_queue_len << 1) - 1); q 76 drivers/misc/habanalabs/hw_queue.c static void ext_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q, q 81 drivers/misc/habanalabs/hw_queue.c bd = (struct hl_bd *) (uintptr_t) q->kernel_address; q 82 drivers/misc/habanalabs/hw_queue.c bd += hl_pi_2_offset(q->pi); q 87 drivers/misc/habanalabs/hw_queue.c q->pi = hl_queue_inc_ptr(q->pi); q 88 drivers/misc/habanalabs/hw_queue.c hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi); q 110 drivers/misc/habanalabs/hw_queue.c struct hl_hw_queue *q, int num_of_entries, q 114 drivers/misc/habanalabs/hw_queue.c &hdev->completion_queue[q->hw_queue_id].free_slots_cnt; q 118 drivers/misc/habanalabs/hw_queue.c free_slots_cnt = queue_free_slots(q, HL_QUEUE_LENGTH); q 122 drivers/misc/habanalabs/hw_queue.c q->hw_queue_id, num_of_entries); q 136 drivers/misc/habanalabs/hw_queue.c num_of_entries, q->hw_queue_id); q 159 drivers/misc/habanalabs/hw_queue.c struct hl_hw_queue *q, q 165 drivers/misc/habanalabs/hw_queue.c free_slots_cnt = queue_free_slots(q, q->int_queue_len); q 169 drivers/misc/habanalabs/hw_queue.c q->hw_queue_id, num_of_entries); q 190 drivers/misc/habanalabs/hw_queue.c struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id]; q 201 drivers/misc/habanalabs/hw_queue.c if (q->queue_type != QUEUE_TYPE_CPU) q 209 drivers/misc/habanalabs/hw_queue.c rc = ext_queue_sanity_checks(hdev, q, 1, false); q 213 drivers/misc/habanalabs/hw_queue.c ext_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr); q 216 drivers/misc/habanalabs/hw_queue.c if (q->queue_type != QUEUE_TYPE_CPU) q 233 drivers/misc/habanalabs/hw_queue.c struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; q 246 drivers/misc/habanalabs/hw_queue.c ctl = ((q->pi << BD_CTL_SHADOW_INDEX_SHIFT) & BD_CTL_SHADOW_INDEX_MASK); q 253 drivers/misc/habanalabs/hw_queue.c ((q->pi << CQ_ENTRY_SHADOW_INDEX_SHIFT) q 265 drivers/misc/habanalabs/hw_queue.c cq = &hdev->completion_queue[q->hw_queue_id]; q 271 drivers/misc/habanalabs/hw_queue.c q->hw_queue_id); q 273 drivers/misc/habanalabs/hw_queue.c q->shadow_queue[hl_pi_2_offset(q->pi)] = job; q 277 drivers/misc/habanalabs/hw_queue.c ext_queue_submit_bd(hdev, q, ctl, len, ptr); q 291 drivers/misc/habanalabs/hw_queue.c struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; q 299 drivers/misc/habanalabs/hw_queue.c pi = (__le64 *) (uintptr_t) (q->kernel_address + q 300 drivers/misc/habanalabs/hw_queue.c ((q->pi & (q->int_queue_len - 1)) * sizeof(bd))); q 302 drivers/misc/habanalabs/hw_queue.c q->pi++; q 303 drivers/misc/habanalabs/hw_queue.c q->pi &= ((q->int_queue_len << 1) - 1); q 307 drivers/misc/habanalabs/hw_queue.c hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi); q 320 drivers/misc/habanalabs/hw_queue.c struct hl_hw_queue *q; q 332 drivers/misc/habanalabs/hw_queue.c q = &hdev->kernel_queues[0]; q 334 drivers/misc/habanalabs/hw_queue.c for (i = 0, cq_cnt = 0 ; i < HL_MAX_QUEUES ; i++, q++) { q 335 drivers/misc/habanalabs/hw_queue.c if (q->queue_type == QUEUE_TYPE_EXT) { q 337 drivers/misc/habanalabs/hw_queue.c rc = ext_queue_sanity_checks(hdev, q, q 343 drivers/misc/habanalabs/hw_queue.c } else if (q->queue_type == QUEUE_TYPE_INT) { q 345 drivers/misc/habanalabs/hw_queue.c rc = int_queue_sanity_checks(hdev, q, q 387 drivers/misc/habanalabs/hw_queue.c q = &hdev->kernel_queues[0]; q 388 drivers/misc/habanalabs/hw_queue.c for (i = 0 ; (i < HL_MAX_QUEUES) && (cq_cnt > 0) ; i++, q++) { q 389 drivers/misc/habanalabs/hw_queue.c if ((q->queue_type == QUEUE_TYPE_EXT) && q 412 drivers/misc/habanalabs/hw_queue.c struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id]; q 414 drivers/misc/habanalabs/hw_queue.c q->ci = hl_queue_inc_ptr(q->ci); q 418 drivers/misc/habanalabs/hw_queue.c struct hl_hw_queue *q, bool is_cpu_queue) q 426 drivers/misc/habanalabs/hw_queue.c &q->bus_address); q 430 drivers/misc/habanalabs/hw_queue.c &q->bus_address, q 435 drivers/misc/habanalabs/hw_queue.c q->kernel_address = (u64) (uintptr_t) p; q 437 drivers/misc/habanalabs/hw_queue.c q->shadow_queue = kmalloc_array(HL_QUEUE_LENGTH, q 438 drivers/misc/habanalabs/hw_queue.c sizeof(*q->shadow_queue), q 440 drivers/misc/habanalabs/hw_queue.c if (!q->shadow_queue) { q 443 drivers/misc/habanalabs/hw_queue.c q->hw_queue_id); q 449 drivers/misc/habanalabs/hw_queue.c q->ci = 0; q 450 drivers/misc/habanalabs/hw_queue.c q->pi = 0; q 458 drivers/misc/habanalabs/hw_queue.c (void *) (uintptr_t) q->kernel_address); q 462 drivers/misc/habanalabs/hw_queue.c (void *) (uintptr_t) q->kernel_address, q 463 drivers/misc/habanalabs/hw_queue.c q->bus_address); q 468 drivers/misc/habanalabs/hw_queue.c static int int_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q) q 472 drivers/misc/habanalabs/hw_queue.c p = hdev->asic_funcs->get_int_queue_base(hdev, q->hw_queue_id, q 473 drivers/misc/habanalabs/hw_queue.c &q->bus_address, &q->int_queue_len); q 477 drivers/misc/habanalabs/hw_queue.c q->hw_queue_id); q 481 drivers/misc/habanalabs/hw_queue.c q->kernel_address = (u64) (uintptr_t) p; q 482 drivers/misc/habanalabs/hw_queue.c q->pi = 0; q 483 drivers/misc/habanalabs/hw_queue.c q->ci = 0; q 488 drivers/misc/habanalabs/hw_queue.c static int cpu_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q) q 490 drivers/misc/habanalabs/hw_queue.c return ext_and_cpu_hw_queue_init(hdev, q, true); q 493 drivers/misc/habanalabs/hw_queue.c static int ext_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q) q 495 drivers/misc/habanalabs/hw_queue.c return ext_and_cpu_hw_queue_init(hdev, q, false); q 508 drivers/misc/habanalabs/hw_queue.c static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q, q 515 drivers/misc/habanalabs/hw_queue.c q->hw_queue_id = hw_queue_id; q 517 drivers/misc/habanalabs/hw_queue.c switch (q->queue_type) { q 519 drivers/misc/habanalabs/hw_queue.c rc = ext_hw_queue_init(hdev, q); q 523 drivers/misc/habanalabs/hw_queue.c rc = int_hw_queue_init(hdev, q); q 527 drivers/misc/habanalabs/hw_queue.c rc = cpu_hw_queue_init(hdev, q); q 531 drivers/misc/habanalabs/hw_queue.c q->valid = 0; q 536 drivers/misc/habanalabs/hw_queue.c q->queue_type); q 544 drivers/misc/habanalabs/hw_queue.c q->valid = 1; q 557 drivers/misc/habanalabs/hw_queue.c static void hw_queue_fini(struct hl_device *hdev, struct hl_hw_queue *q) q 559 drivers/misc/habanalabs/hw_queue.c if (!q->valid) q 580 drivers/misc/habanalabs/hw_queue.c if (q->queue_type == QUEUE_TYPE_INT) q 583 drivers/misc/habanalabs/hw_queue.c kfree(q->shadow_queue); q 585 drivers/misc/habanalabs/hw_queue.c if (q->queue_type == QUEUE_TYPE_CPU) q 588 drivers/misc/habanalabs/hw_queue.c (void *) (uintptr_t) q->kernel_address); q 592 drivers/misc/habanalabs/hw_queue.c (void *) (uintptr_t) q->kernel_address, q 593 drivers/misc/habanalabs/hw_queue.c q->bus_address); q 599 drivers/misc/habanalabs/hw_queue.c struct hl_hw_queue *q; q 611 drivers/misc/habanalabs/hw_queue.c for (i = 0, q_ready_cnt = 0, q = hdev->kernel_queues; q 612 drivers/misc/habanalabs/hw_queue.c i < HL_MAX_QUEUES ; i++, q_ready_cnt++, q++) { q 614 drivers/misc/habanalabs/hw_queue.c q->queue_type = asic->hw_queues_props[i].type; q 615 drivers/misc/habanalabs/hw_queue.c rc = hw_queue_init(hdev, q, i); q 626 drivers/misc/habanalabs/hw_queue.c for (i = 0, q = hdev->kernel_queues ; i < q_ready_cnt ; i++, q++) q 627 drivers/misc/habanalabs/hw_queue.c hw_queue_fini(hdev, q); q 636 drivers/misc/habanalabs/hw_queue.c struct hl_hw_queue *q; q 639 drivers/misc/habanalabs/hw_queue.c for (i = 0, q = hdev->kernel_queues ; i < HL_MAX_QUEUES ; i++, q++) q 640 drivers/misc/habanalabs/hw_queue.c hw_queue_fini(hdev, q); q 647 drivers/misc/habanalabs/hw_queue.c struct hl_hw_queue *q; q 650 drivers/misc/habanalabs/hw_queue.c for (i = 0, q = hdev->kernel_queues ; i < HL_MAX_QUEUES ; i++, q++) { q 651 drivers/misc/habanalabs/hw_queue.c if ((!q->valid) || q 652 drivers/misc/habanalabs/hw_queue.c ((!hard_reset) && (q->queue_type == QUEUE_TYPE_CPU))) q 654 drivers/misc/habanalabs/hw_queue.c q->pi = q->ci = 0; q 218 drivers/misc/habanalabs/irq.c int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id) q 225 drivers/misc/habanalabs/irq.c &q->bus_address, GFP_KERNEL | __GFP_ZERO); q 229 drivers/misc/habanalabs/irq.c q->hdev = hdev; q 230 drivers/misc/habanalabs/irq.c q->kernel_address = (u64) (uintptr_t) p; q 231 drivers/misc/habanalabs/irq.c q->hw_queue_id = hw_queue_id; q 232 drivers/misc/habanalabs/irq.c q->ci = 0; q 233 drivers/misc/habanalabs/irq.c q->pi = 0; q 235 drivers/misc/habanalabs/irq.c atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH); q 248 drivers/misc/habanalabs/irq.c void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q) q 251 drivers/misc/habanalabs/irq.c (void *) (uintptr_t) q->kernel_address, q->bus_address); q 254 drivers/misc/habanalabs/irq.c void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q) q 256 drivers/misc/habanalabs/irq.c q->ci = 0; q 257 drivers/misc/habanalabs/irq.c q->pi = 0; q 259 drivers/misc/habanalabs/irq.c atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH); q 268 drivers/misc/habanalabs/irq.c memset((void *) (uintptr_t) q->kernel_address, 0, HL_CQ_SIZE_IN_BYTES); q 280 drivers/misc/habanalabs/irq.c int hl_eq_init(struct hl_device *hdev, struct hl_eq *q) q 288 drivers/misc/habanalabs/irq.c &q->bus_address); q 292 drivers/misc/habanalabs/irq.c q->hdev = hdev; q 293 drivers/misc/habanalabs/irq.c q->kernel_address = (u64) (uintptr_t) p; q 294 drivers/misc/habanalabs/irq.c q->ci = 0; q 307 drivers/misc/habanalabs/irq.c void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q) q 313 drivers/misc/habanalabs/irq.c (void *) (uintptr_t) q->kernel_address); q 316 drivers/misc/habanalabs/irq.c void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q) q 318 drivers/misc/habanalabs/irq.c q->ci = 0; q 327 drivers/misc/habanalabs/irq.c memset((void *) (uintptr_t) q->kernel_address, 0, HL_EQ_SIZE_IN_BYTES); q 992 drivers/misc/sgi-gru/grukservices.c #define ALIGNUP(p, q) ((void *)(((unsigned long)(p) + (q) - 1) & ~(q - 1))) q 246 drivers/misc/vmw_vmci/vmci_queue_pair.c static void qp_free_queue(void *q, u64 size) q 248 drivers/misc/vmw_vmci/vmci_queue_pair.c struct vmci_queue *queue = q; q 1418 drivers/mmc/core/block.c struct request_queue *q = req->q; q 1459 drivers/mmc/core/block.c blk_mq_run_hw_queues(q, true); q 1487 drivers/mmc/core/block.c struct request_queue *q = req->q; q 1488 drivers/mmc/core/block.c struct mmc_queue *mq = q->queuedata; q 1719 drivers/mmc/core/block.c struct mmc_queue *mq = req->q->queuedata; q 1926 drivers/mmc/core/block.c struct mmc_queue *mq = req->q->queuedata; q 2044 drivers/mmc/core/block.c struct request_queue *q = req->q; q 2045 drivers/mmc/core/block.c struct mmc_queue *mq = q->queuedata; q 2641 drivers/mmc/core/block.c struct list_head *pos, *q; q 2646 drivers/mmc/core/block.c list_for_each_safe(pos, q, &md->rpmbs) { q 2652 drivers/mmc/core/block.c list_for_each_safe(pos, q, &md->part) { q 87 drivers/mmc/core/queue.c struct request_queue *q = req->q; q 88 drivers/mmc/core/queue.c struct mmc_queue *mq = q->queuedata; q 100 drivers/mmc/core/queue.c struct mmc_queue *mq = req->q->queuedata; q 124 drivers/mmc/core/queue.c struct request_queue *q = req->q; q 125 drivers/mmc/core/queue.c struct mmc_queue *mq = q->queuedata; q 140 drivers/mmc/core/queue.c struct request_queue *q = mq->queue; q 159 drivers/mmc/core/queue.c blk_mq_run_hw_queues(q, true); q 173 drivers/mmc/core/queue.c static void mmc_queue_setup_discard(struct request_queue *q, q 182 drivers/mmc/core/queue.c blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); q 183 drivers/mmc/core/queue.c blk_queue_max_discard_sectors(q, max_discard); q 184 drivers/mmc/core/queue.c q->limits.discard_granularity = card->pref_erase << 9; q 187 drivers/mmc/core/queue.c q->limits.discard_granularity = 0; q 189 drivers/mmc/core/queue.c blk_queue_flag_set(QUEUE_FLAG_SECERASE, q); q 218 drivers/mmc/core/queue.c static void mmc_exit_request(struct request_queue *q, struct request *req) q 244 drivers/mmc/core/queue.c struct request_queue *q = req->q; q 245 drivers/mmc/core/queue.c struct mmc_queue *mq = q->queuedata; q 494 drivers/mmc/core/queue.c struct request_queue *q = mq->queue; q 500 drivers/mmc/core/queue.c if (blk_queue_quiesced(q)) q 501 drivers/mmc/core/queue.c blk_mq_unquiesce_queue(q); q 503 drivers/mmc/core/queue.c blk_cleanup_queue(q); q 189 drivers/mtd/ubi/block.c struct ubiblock *dev = req->q->queuedata; q 305 drivers/mtd/ubi/block.c blk_rq_map_sg(req->q, req, pdu->usgl.sg); q 502 drivers/net/appletalk/ltpc.c struct xmitQel *q = NULL; q 573 drivers/net/appletalk/ltpc.c q=deQ(); q 574 drivers/net/appletalk/ltpc.c memcpy(ltdmacbuf,q->cbuf,q->cbuflen); q 575 drivers/net/appletalk/ltpc.c ltdmacbuf[1] = q->mailbox; q 579 drivers/net/appletalk/ltpc.c n = q->cbuflen; q 610 drivers/net/appletalk/ltpc.c if(q->QWrite) { q 611 drivers/net/appletalk/ltpc.c memcpy(ltdmabuf,q->dbuf,q->dbuflen); q 618 drivers/net/appletalk/ltpc.c if(q->mailbox) { q 619 drivers/net/appletalk/ltpc.c memcpy(q->dbuf,ltdmabuf,q->dbuflen); q 378 drivers/net/can/rcar/rcar_canfd.c #define RCANFD_C_RMID(q) (0x0600 + (0x10 * (q))) q 379 drivers/net/can/rcar/rcar_canfd.c #define RCANFD_C_RMPTR(q) (0x0604 + (0x10 * (q))) q 380 drivers/net/can/rcar/rcar_canfd.c #define RCANFD_C_RMDF0(q) (0x0608 + (0x10 * (q))) q 381 drivers/net/can/rcar/rcar_canfd.c #define RCANFD_C_RMDF1(q) (0x060c + (0x10 * (q))) q 425 drivers/net/can/rcar/rcar_canfd.c #define RCANFD_F_RMID(q) (0x2000 + (0x20 * (q))) q 426 drivers/net/can/rcar/rcar_canfd.c #define RCANFD_F_RMPTR(q) (0x2004 + (0x20 * (q))) q 427 drivers/net/can/rcar/rcar_canfd.c #define RCANFD_F_RMFDSTS(q) (0x2008 + (0x20 * (q))) q 428 drivers/net/can/rcar/rcar_canfd.c #define RCANFD_F_RMDF(q, b) (0x200c + (0x04 * (b)) + (0x20 * (q))) q 128 drivers/net/ethernet/amazon/ena/ena_netdev.h #define ENA_IO_TXQ_IDX(q) (2 * (q)) q 129 drivers/net/ethernet/amazon/ena/ena_netdev.h #define ENA_IO_RXQ_IDX(q) (2 * (q) + 1) q 130 drivers/net/ethernet/amazon/ena/ena_netdev.h #define ENA_IO_TXQ_IDX_TO_COMBINED_IDX(q) ((q) / 2) q 131 drivers/net/ethernet/amazon/ena/ena_netdev.h #define ENA_IO_RXQ_IDX_TO_COMBINED_IDX(q) (((q) - 1) / 2) q 135 drivers/net/ethernet/amazon/ena/ena_netdev.h #define ENA_IO_IRQ_IDX(q) (ENA_IO_IRQ_FIRST_IDX + (q)) q 434 drivers/net/ethernet/broadcom/bcmsysport.c unsigned int q; q 436 drivers/net/ethernet/broadcom/bcmsysport.c for (q = 0; q < priv->netdev->num_tx_queues; q++) { q 437 drivers/net/ethernet/broadcom/bcmsysport.c ring = &priv->tx_rings[q]; q 985 drivers/net/ethernet/broadcom/bcmsysport.c unsigned int q; q 987 drivers/net/ethernet/broadcom/bcmsysport.c for (q = 0; q < priv->netdev->num_tx_queues; q++) q 988 drivers/net/ethernet/broadcom/bcmsysport.c bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]); q 2237 drivers/net/ethernet/broadcom/bcmsysport.c unsigned int q, port; q 2243 drivers/net/ethernet/broadcom/bcmsysport.c q = BRCM_TAG_GET_QUEUE(queue); q 2245 drivers/net/ethernet/broadcom/bcmsysport.c tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues]; q 2275 drivers/net/ethernet/broadcom/bcmsysport.c unsigned int q, qp, port; q 2314 drivers/net/ethernet/broadcom/bcmsysport.c for (q = 0, qp = 0; q < dev->num_tx_queues && qp < num_tx_queues; q 2315 drivers/net/ethernet/broadcom/bcmsysport.c q++) { q 2316 drivers/net/ethernet/broadcom/bcmsysport.c ring = &priv->tx_rings[q]; q 2342 drivers/net/ethernet/broadcom/bcmsysport.c unsigned int q, qp, port; q 2358 drivers/net/ethernet/broadcom/bcmsysport.c for (q = 0; q < dev->num_tx_queues; q++) { q 2359 drivers/net/ethernet/broadcom/bcmsysport.c ring = &priv->tx_rings[q]; q 5367 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c int q, rc; q 5393 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c for_each_eth_queue(bp, q) { q 5395 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c fp = &bp->fp[q]; q 5402 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c q); q 5421 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c q); q 14508 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c struct list_head *pos, *q; q 14516 drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c list_for_each_safe(pos, q, &bnx2x_prev_list) { q 168 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c struct bnx2x_vf_queue *q, q 185 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c init_p->rx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); q 186 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c init_p->tx.fw_sb_id = vf_igu_sb(vf, q->sb_idx); q 189 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c init_p->cxts[0] = q->cxt; q 195 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c setup_p->gen_params.stat_id = vfq_stat_id(vf, q); q 219 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c rxq_p->cl_qzone_id = vfq_qzone_id(vf, q); q 220 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c rxq_p->fw_sb_id = vf_igu_sb(vf, q->sb_idx); q 230 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c setup_p->txq_params.fw_sb_id = vf_igu_sb(vf, q->sb_idx); q 1422 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c struct bnx2x_vf_queue *q) q 1424 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c u8 cl_id = vfq_cl_id(vf, q); q 1432 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c bnx2x_init_queue_obj(bp, &q->sp_obj, q 1433 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c cl_id, &q->cid, 1, func_id, q 1439 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c q->sp_initialized = false; q 1443 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c vf->abs_vfid, q->sp_obj.func_id, q->cid); q 2084 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c struct bnx2x_vf_queue *q = vfq_get(vf, i); q 2086 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c if (!q) { q 2091 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c q->index = i; q 2092 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c q->cxt = &((base_cxt + i)->eth); q 2093 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c q->cid = BNX2X_FIRST_VF_CID + base_vf_cid + i; q 2096 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c vf->abs_vfid, i, q->index, q->cid, q->cxt); q 2099 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c bnx2x_vfq_init(bp, vf, q); q 385 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) q 387 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h return vf->igu_base_id + q->index; q 390 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) q 395 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h return vfq_cl_id(vf, q); q 398 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q) q 400 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h return vfq_cl_id(vf, q); q 447 drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h struct bnx2x_vf_queue *q, q 543 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c struct bnx2x_vf_queue *q) q 545 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c u8 cl_id = vfq_cl_id(vf, q); q 549 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c bnx2x_init_mac_obj(bp, &q->mac_obj, q 550 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c cl_id, q->cid, func_id, q 558 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c bnx2x_init_vlan_obj(bp, &q->vlan_obj, q 559 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c cl_id, q->cid, func_id, q 567 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c bnx2x_init_vlan_mac_obj(bp, &q->vlan_mac_obj, q 568 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c cl_id, q->cid, func_id, q 578 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c q->cid, func_id, func_id, q 586 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid, q 595 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c q->is_leading = true; q 596 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c q->sp_initialized = true; q 1537 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c struct bnx2x_vf_queue *q = vfq_get(vf, setup_q->vf_qid); q 1543 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c if (bnx2x_vfq_is_leading(q)) q 1544 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c bnx2x_leading_vfq_init(bp, vf, q); q 1562 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c q->sb_idx = setup_q->txq.vf_sb; q 1583 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c q->index, q->sb_idx); q 1595 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c q->sb_idx = setup_q->rxq.vf_sb; q 1625 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c if (bnx2x_vfq_is_leading(q)) { q 1633 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c q->index, q->sb_idx); q 1636 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c bnx2x_vfop_qctor_prep(bp, vf, q, &qctor, q_type); q 1638 drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c rc = bnx2x_vf_queue_setup(bp, vf, q->index, &qctor); q 3064 drivers/net/ethernet/broadcom/genet/bcmgenet.c unsigned int q; q 3068 drivers/net/ethernet/broadcom/genet/bcmgenet.c for (q = 0; q < priv->hw_params->tx_queues; q++) q 3069 drivers/net/ethernet/broadcom/genet/bcmgenet.c bcmgenet_dump_tx_queue(&priv->tx_rings[q]); q 3074 drivers/net/ethernet/broadcom/genet/bcmgenet.c for (q = 0; q < priv->hw_params->tx_queues; q++) q 3075 drivers/net/ethernet/broadcom/genet/bcmgenet.c int1_enable |= (1 << q); q 3179 drivers/net/ethernet/broadcom/genet/bcmgenet.c unsigned int q; q 3181 drivers/net/ethernet/broadcom/genet/bcmgenet.c for (q = 0; q < priv->hw_params->tx_queues; q++) { q 3182 drivers/net/ethernet/broadcom/genet/bcmgenet.c tx_ring = &priv->tx_rings[q]; q 3190 drivers/net/ethernet/broadcom/genet/bcmgenet.c for (q = 0; q < priv->hw_params->rx_queues; q++) { q 3191 drivers/net/ethernet/broadcom/genet/bcmgenet.c rx_ring = &priv->rx_rings[q]; q 451 drivers/net/ethernet/broadcom/genet/bcmgenet.h #define DMA_PRIO_REG_INDEX(q) ((q) / 6) q 452 drivers/net/ethernet/broadcom/genet/bcmgenet.h #define DMA_PRIO_REG_SHIFT(q) (((q) % 6) * DMA_RING_BUF_PRIORITY_SHIFT) q 165 drivers/net/ethernet/brocade/bna/bfi_enet.h struct bfi_enet_q q; q 171 drivers/net/ethernet/brocade/bna/bfi_enet.h struct bfi_enet_q q; q 177 drivers/net/ethernet/brocade/bna/bfi_enet.h struct bfi_enet_q q; q 422 drivers/net/ethernet/brocade/bna/bfi_enet.h struct bfi_enet_txq q; q 238 drivers/net/ethernet/brocade/bna/bna.h static inline struct bna_mac *bna_mac_find(struct list_head *q, const u8 *addr) q 242 drivers/net/ethernet/brocade/bna/bna.h list_for_each_entry(mac, q, qe) q 1635 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].qs.q, q 1643 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].ql.q, q 1664 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].cq.q, q 3101 drivers/net/ethernet/brocade/bna/bna_tx_rx.c bfi_enet_datapath_q_init(&cfg_req->q_cfg[i].q.q, &txq->qpt); q 3102 drivers/net/ethernet/brocade/bna/bna_tx_rx.c cfg_req->q_cfg[i].q.priority = txq->priority; q 1307 drivers/net/ethernet/cadence/macb_main.c unsigned int q; q 1310 drivers/net/ethernet/cadence/macb_main.c for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { q 1325 drivers/net/ethernet/cadence/macb_main.c for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { q 1484 drivers/net/ethernet/cadence/macb_main.c unsigned int q; q 1487 drivers/net/ethernet/cadence/macb_main.c for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) q 1899 drivers/net/ethernet/cadence/macb_main.c unsigned int q; q 1902 drivers/net/ethernet/cadence/macb_main.c for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { q 1941 drivers/net/ethernet/cadence/macb_main.c unsigned int q; q 1946 drivers/net/ethernet/cadence/macb_main.c for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { q 1967 drivers/net/ethernet/cadence/macb_main.c unsigned int q; q 1970 drivers/net/ethernet/cadence/macb_main.c for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { q 2003 drivers/net/ethernet/cadence/macb_main.c unsigned int q; q 2006 drivers/net/ethernet/cadence/macb_main.c for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { q 2015 drivers/net/ethernet/cadence/macb_main.c q, size, (unsigned long)queue->tx_ring_dma, q 2046 drivers/net/ethernet/cadence/macb_main.c unsigned int q; q 2049 drivers/net/ethernet/cadence/macb_main.c for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { q 2087 drivers/net/ethernet/cadence/macb_main.c unsigned int q; q 2105 drivers/net/ethernet/cadence/macb_main.c for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { q 2186 drivers/net/ethernet/cadence/macb_main.c unsigned int q; q 2192 drivers/net/ethernet/cadence/macb_main.c for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { q 2193 drivers/net/ethernet/cadence/macb_main.c if (q) q 2231 drivers/net/ethernet/cadence/macb_main.c unsigned int q; q 2267 drivers/net/ethernet/cadence/macb_main.c for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { q 2414 drivers/net/ethernet/cadence/macb_main.c unsigned int q; q 2442 drivers/net/ethernet/cadence/macb_main.c for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) q 2469 drivers/net/ethernet/cadence/macb_main.c unsigned int q; q 2473 drivers/net/ethernet/cadence/macb_main.c for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) q 2507 drivers/net/ethernet/cadence/macb_main.c unsigned int i, q, idx; q 2528 drivers/net/ethernet/cadence/macb_main.c for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) q 2600 drivers/net/ethernet/cadence/macb_main.c unsigned int q; q 2608 drivers/net/ethernet/cadence/macb_main.c for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { q 2611 drivers/net/ethernet/cadence/macb_main.c q, queue_statistics[i].stat_string); q 3476 drivers/net/ethernet/cadence/macb_main.c unsigned int hw_q, q; q 3489 drivers/net/ethernet/cadence/macb_main.c for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) { q 3493 drivers/net/ethernet/cadence/macb_main.c queue = &bp->queues[q]; q 3531 drivers/net/ethernet/cadence/macb_main.c queue->irq = platform_get_irq(pdev, q); q 3542 drivers/net/ethernet/cadence/macb_main.c q++; q 3640 drivers/net/ethernet/cadence/macb_main.c struct macb_queue *q = &lp->queues[0]; q 3646 drivers/net/ethernet/cadence/macb_main.c q->rx_ring = dma_alloc_coherent(&lp->pdev->dev, q 3649 drivers/net/ethernet/cadence/macb_main.c &q->rx_ring_dma, GFP_KERNEL); q 3650 drivers/net/ethernet/cadence/macb_main.c if (!q->rx_ring) q 3653 drivers/net/ethernet/cadence/macb_main.c q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev, q 3656 drivers/net/ethernet/cadence/macb_main.c &q->rx_buffers_dma, GFP_KERNEL); q 3657 drivers/net/ethernet/cadence/macb_main.c if (!q->rx_buffers) { q 3661 drivers/net/ethernet/cadence/macb_main.c q->rx_ring, q->rx_ring_dma); q 3662 drivers/net/ethernet/cadence/macb_main.c q->rx_ring = NULL; q 3666 drivers/net/ethernet/cadence/macb_main.c addr = q->rx_buffers_dma; q 3668 drivers/net/ethernet/cadence/macb_main.c desc = macb_rx_desc(q, i); q 3678 drivers/net/ethernet/cadence/macb_main.c q->rx_tail = 0; q 3681 drivers/net/ethernet/cadence/macb_main.c macb_writel(lp, RBQP, q->rx_ring_dma); q 3734 drivers/net/ethernet/cadence/macb_main.c struct macb_queue *q = &lp->queues[0]; q 3755 drivers/net/ethernet/cadence/macb_main.c q->rx_ring, q->rx_ring_dma); q 3756 drivers/net/ethernet/cadence/macb_main.c q->rx_ring = NULL; q 3760 drivers/net/ethernet/cadence/macb_main.c q->rx_buffers, q->rx_buffers_dma); q 3761 drivers/net/ethernet/cadence/macb_main.c q->rx_buffers = NULL; q 3806 drivers/net/ethernet/cadence/macb_main.c struct macb_queue *q = &lp->queues[0]; q 3812 drivers/net/ethernet/cadence/macb_main.c desc = macb_rx_desc(q, q->rx_tail); q 3814 drivers/net/ethernet/cadence/macb_main.c p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ; q 3836 drivers/net/ethernet/cadence/macb_main.c if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) q 3837 drivers/net/ethernet/cadence/macb_main.c q->rx_tail = 0; q 3839 drivers/net/ethernet/cadence/macb_main.c q->rx_tail++; q 3841 drivers/net/ethernet/cadence/macb_main.c desc = macb_rx_desc(q, q->rx_tail); q 4424 drivers/net/ethernet/cadence/macb_main.c unsigned int q; q 4437 drivers/net/ethernet/cadence/macb_main.c for (q = 0, queue = bp->queues; q < bp->num_queues; q 4438 drivers/net/ethernet/cadence/macb_main.c ++q, ++queue) q 4466 drivers/net/ethernet/cadence/macb_main.c unsigned int q; q 4486 drivers/net/ethernet/cadence/macb_main.c for (q = 0, queue = bp->queues; q < bp->num_queues; q 4487 drivers/net/ethernet/cadence/macb_main.c ++q, ++queue) q 353 drivers/net/ethernet/cadence/macb_ptp.c unsigned int q; q 373 drivers/net/ethernet/cadence/macb_ptp.c for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { q 335 drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h #define CN23XX_SLI_PKT_MBOX_INT(q) \ q 336 drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h (CN23XX_SLI_PKT_MBOX_INT_START + ((q) * CN23XX_SLI_MBOX_OFFSET)) q 338 drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h #define CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q, idx) \ q 340 drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h ((q) * CN23XX_SLI_MBOX_OFFSET + \ q 214 drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h #define CN23XX_VF_SLI_PKT_MBOX_INT(q) \ q 215 drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h (CN23XX_VF_SLI_PKT_MBOX_INT_START + ((q) * CN23XX_SLI_MBOX_OFFSET)) q 217 drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h #define CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q, idx) \ q 219 drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h ((q) * CN23XX_SLI_MBOX_OFFSET + \ q 226 drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h #define CN23XX_VF_SLI_INT_SUM(q) \ q 227 drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h (CN23XX_VF_SLI_INT_SUM_START + ((q) * CN23XX_VF_IQ_OFFSET)) q 459 drivers/net/ethernet/cavium/liquidio/lio_core.c int q, q_no; q 461 drivers/net/ethernet/cavium/liquidio/lio_core.c for (q = 0; q < oct->num_oqs; q++) { q 462 drivers/net/ethernet/cavium/liquidio/lio_core.c q_no = lio->linfo.rxpciq[q].s.q_no; q 815 drivers/net/ethernet/cavium/liquidio/lio_core.c int q, q_no; q 833 drivers/net/ethernet/cavium/liquidio/lio_core.c for (q = 0; q < num_oqs; q++) { q 834 drivers/net/ethernet/cavium/liquidio/lio_core.c q_no = lio->linfo.rxpciq[q].s.q_no; q 837 drivers/net/ethernet/cavium/liquidio/lio_core.c __func__, q, q_no); q 877 drivers/net/ethernet/cavium/liquidio/lio_core.c for (q = 0; q < num_iqs; q++) { q 880 drivers/net/ethernet/cavium/liquidio/lio_core.c retval = octeon_setup_iq(octeon_dev, ifidx, q, q 881 drivers/net/ethernet/cavium/liquidio/lio_core.c lio->linfo.txpciq[q], num_tx_descs, q 882 drivers/net/ethernet/cavium/liquidio/lio_core.c netdev_get_tx_queue(netdev, q)); q 895 drivers/net/ethernet/cavium/liquidio/lio_core.c ioq_vector = &octeon_dev->ioq_vector[q]; q 496 drivers/net/ethernet/cavium/liquidio/lio_main.c int q, iq; q 499 drivers/net/ethernet/cavium/liquidio/lio_main.c for (q = 0; q < numqs; q++) { q 500 drivers/net/ethernet/cavium/liquidio/lio_main.c iq = lio->linfo.txpciq[q % q 504 drivers/net/ethernet/cavium/liquidio/lio_main.c if (__netif_subqueue_stopped(lio->netdev, q)) { q 505 drivers/net/ethernet/cavium/liquidio/lio_main.c netif_wake_subqueue(lio->netdev, q); q 370 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c int mbox, key, stat, q; q 403 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++) { q 404 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG, q); q 405 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG2, q); q 406 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_THRESH, q); q 407 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_BASE, q); q 408 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, q); q 409 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, q); q 410 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DOOR, q); q 411 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, q); q 412 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS2, q); q 413 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DEBUG, q); q 417 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++) { q 418 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_CFG, q); q 420 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c NIC_QSET_RQ_0_7_STAT_0_1, q); q 422 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c p[i++] = nicvf_queue_reg_read(nic, reg_offset, q); q 425 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++) { q 426 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, q); q 427 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_THRESH, q); q 428 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_BASE, q); q 429 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, q); q 430 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, q); q 431 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q); q 432 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q); q 433 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q); q 438 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q); q 440 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c p[i++] = nicvf_queue_reg_read(nic, reg_offset, q); q 443 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++) { q 444 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_CFG, q); q 445 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_THRESH, q); q 446 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_BASE, q); q 447 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, q); q 448 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, q); q 449 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_DOOR, q); q 451 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c NIC_QSET_RBDR_0_1_STATUS0, q); q 453 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c NIC_QSET_RBDR_0_1_STATUS1, q); q 455 drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c p[i++] = nicvf_queue_reg_read(nic, reg_offset, q); q 480 drivers/net/ethernet/chelsio/cxgb/sge.c struct cmdQ *q = &sge->cmdQ[0]; q 481 drivers/net/ethernet/chelsio/cxgb/sge.c clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); q 482 drivers/net/ethernet/chelsio/cxgb/sge.c if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { q 483 drivers/net/ethernet/chelsio/cxgb/sge.c set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); q 505 drivers/net/ethernet/chelsio/cxgb/sge.c static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q) q 507 drivers/net/ethernet/chelsio/cxgb/sge.c unsigned int cidx = q->cidx; q 509 drivers/net/ethernet/chelsio/cxgb/sge.c while (q->credits--) { q 510 drivers/net/ethernet/chelsio/cxgb/sge.c struct freelQ_ce *ce = &q->centries[cidx]; q 517 drivers/net/ethernet/chelsio/cxgb/sge.c if (++cidx == q->size) q 537 drivers/net/ethernet/chelsio/cxgb/sge.c struct freelQ *q = &sge->freelQ[i]; q 539 drivers/net/ethernet/chelsio/cxgb/sge.c if (q->centries) { q 540 drivers/net/ethernet/chelsio/cxgb/sge.c free_freelQ_buffers(pdev, q); q 541 drivers/net/ethernet/chelsio/cxgb/sge.c kfree(q->centries); q 543 drivers/net/ethernet/chelsio/cxgb/sge.c if (q->entries) { q 544 drivers/net/ethernet/chelsio/cxgb/sge.c size = sizeof(struct freelQ_e) * q->size; q 545 drivers/net/ethernet/chelsio/cxgb/sge.c pci_free_consistent(pdev, size, q->entries, q 546 drivers/net/ethernet/chelsio/cxgb/sge.c q->dma_addr); q 561 drivers/net/ethernet/chelsio/cxgb/sge.c struct freelQ *q = &sge->freelQ[i]; q 563 drivers/net/ethernet/chelsio/cxgb/sge.c q->genbit = 1; q 564 drivers/net/ethernet/chelsio/cxgb/sge.c q->size = p->freelQ_size[i]; q 565 drivers/net/ethernet/chelsio/cxgb/sge.c q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN; q 566 drivers/net/ethernet/chelsio/cxgb/sge.c size = sizeof(struct freelQ_e) * q->size; q 567 drivers/net/ethernet/chelsio/cxgb/sge.c q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); q 568 drivers/net/ethernet/chelsio/cxgb/sge.c if (!q->entries) q 571 drivers/net/ethernet/chelsio/cxgb/sge.c size = sizeof(struct freelQ_ce) * q->size; q 572 drivers/net/ethernet/chelsio/cxgb/sge.c q->centries = kzalloc(size, GFP_KERNEL); q 573 drivers/net/ethernet/chelsio/cxgb/sge.c if (!q->centries) q 617 drivers/net/ethernet/chelsio/cxgb/sge.c static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n) q 621 drivers/net/ethernet/chelsio/cxgb/sge.c unsigned int cidx = q->cidx; q 623 drivers/net/ethernet/chelsio/cxgb/sge.c q->in_use -= n; q 624 drivers/net/ethernet/chelsio/cxgb/sge.c ce = &q->centries[cidx]; q 630 drivers/net/ethernet/chelsio/cxgb/sge.c if (q->sop) q 631 drivers/net/ethernet/chelsio/cxgb/sge.c q->sop = 0; q 635 drivers/net/ethernet/chelsio/cxgb/sge.c q->sop = 1; q 638 drivers/net/ethernet/chelsio/cxgb/sge.c if (++cidx == q->size) { q 640 drivers/net/ethernet/chelsio/cxgb/sge.c ce = q->centries; q 643 drivers/net/ethernet/chelsio/cxgb/sge.c q->cidx = cidx; q 657 drivers/net/ethernet/chelsio/cxgb/sge.c struct cmdQ *q = &sge->cmdQ[i]; q 659 drivers/net/ethernet/chelsio/cxgb/sge.c if (q->centries) { q 660 drivers/net/ethernet/chelsio/cxgb/sge.c if (q->in_use) q 661 drivers/net/ethernet/chelsio/cxgb/sge.c free_cmdQ_buffers(sge, q, q->in_use); q 662 drivers/net/ethernet/chelsio/cxgb/sge.c kfree(q->centries); q 664 drivers/net/ethernet/chelsio/cxgb/sge.c if (q->entries) { q 665 drivers/net/ethernet/chelsio/cxgb/sge.c size = sizeof(struct cmdQ_e) * q->size; q 666 drivers/net/ethernet/chelsio/cxgb/sge.c pci_free_consistent(pdev, size, q->entries, q 667 drivers/net/ethernet/chelsio/cxgb/sge.c q->dma_addr); q 681 drivers/net/ethernet/chelsio/cxgb/sge.c struct cmdQ *q = &sge->cmdQ[i]; q 683 drivers/net/ethernet/chelsio/cxgb/sge.c q->genbit = 1; q 684 drivers/net/ethernet/chelsio/cxgb/sge.c q->sop = 1; q 685 drivers/net/ethernet/chelsio/cxgb/sge.c q->size = p->cmdQ_size[i]; q 686 drivers/net/ethernet/chelsio/cxgb/sge.c q->in_use = 0; q 687 drivers/net/ethernet/chelsio/cxgb/sge.c q->status = 0; q 688 drivers/net/ethernet/chelsio/cxgb/sge.c q->processed = q->cleaned = 0; q 689 drivers/net/ethernet/chelsio/cxgb/sge.c q->stop_thres = 0; q 690 drivers/net/ethernet/chelsio/cxgb/sge.c spin_lock_init(&q->lock); q 691 drivers/net/ethernet/chelsio/cxgb/sge.c size = sizeof(struct cmdQ_e) * q->size; q 692 drivers/net/ethernet/chelsio/cxgb/sge.c q->entries = pci_alloc_consistent(pdev, size, &q->dma_addr); q 693 drivers/net/ethernet/chelsio/cxgb/sge.c if (!q->entries) q 696 drivers/net/ethernet/chelsio/cxgb/sge.c size = sizeof(struct cmdQ_ce) * q->size; q 697 drivers/net/ethernet/chelsio/cxgb/sge.c q->centries = kzalloc(size, GFP_KERNEL); q 698 drivers/net/ethernet/chelsio/cxgb/sge.c if (!q->centries) q 824 drivers/net/ethernet/chelsio/cxgb/sge.c static void refill_free_list(struct sge *sge, struct freelQ *q) q 827 drivers/net/ethernet/chelsio/cxgb/sge.c struct freelQ_ce *ce = &q->centries[q->pidx]; q 828 drivers/net/ethernet/chelsio/cxgb/sge.c struct freelQ_e *e = &q->entries[q->pidx]; q 829 drivers/net/ethernet/chelsio/cxgb/sge.c unsigned int dma_len = q->rx_buffer_size - q->dma_offset; q 831 drivers/net/ethernet/chelsio/cxgb/sge.c while (q->credits < q->size) { q 835 drivers/net/ethernet/chelsio/cxgb/sge.c skb = dev_alloc_skb(q->rx_buffer_size); q 839 drivers/net/ethernet/chelsio/cxgb/sge.c skb_reserve(skb, q->dma_offset); q 849 drivers/net/ethernet/chelsio/cxgb/sge.c e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit); q 851 drivers/net/ethernet/chelsio/cxgb/sge.c e->gen2 = V_CMD_GEN2(q->genbit); q 855 drivers/net/ethernet/chelsio/cxgb/sge.c if (++q->pidx == q->size) { q 856 drivers/net/ethernet/chelsio/cxgb/sge.c q->pidx = 0; q 857 drivers/net/ethernet/chelsio/cxgb/sge.c q->genbit ^= 1; q 858 drivers/net/ethernet/chelsio/cxgb/sge.c ce = q->centries; q 859 drivers/net/ethernet/chelsio/cxgb/sge.c e = q->entries; q 861 drivers/net/ethernet/chelsio/cxgb/sge.c q->credits++; q 1165 drivers/net/ethernet/chelsio/cxgb/sge.c struct cmdQ *q) q 1181 drivers/net/ethernet/chelsio/cxgb/sge.c if (++pidx == q->size) { q 1184 drivers/net/ethernet/chelsio/cxgb/sge.c ce1 = q->centries; q 1185 drivers/net/ethernet/chelsio/cxgb/sge.c e1 = q->entries; q 1201 drivers/net/ethernet/chelsio/cxgb/sge.c struct cmdQ *q) q 1209 drivers/net/ethernet/chelsio/cxgb/sge.c e = e1 = &q->entries[pidx]; q 1210 drivers/net/ethernet/chelsio/cxgb/sge.c ce = &q->centries[pidx]; q 1235 drivers/net/ethernet/chelsio/cxgb/sge.c if (++pidx == q->size) { q 1238 drivers/net/ethernet/chelsio/cxgb/sge.c e1 = q->entries; q 1239 drivers/net/ethernet/chelsio/cxgb/sge.c ce = q->centries; q 1243 drivers/net/ethernet/chelsio/cxgb/sge.c nfrags, q); q 1258 drivers/net/ethernet/chelsio/cxgb/sge.c if (++pidx == q->size) { q 1261 drivers/net/ethernet/chelsio/cxgb/sge.c e1 = q->entries; q 1262 drivers/net/ethernet/chelsio/cxgb/sge.c ce = q->centries; q 1272 drivers/net/ethernet/chelsio/cxgb/sge.c nfrags, q); q 1288 drivers/net/ethernet/chelsio/cxgb/sge.c static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q) q 1290 drivers/net/ethernet/chelsio/cxgb/sge.c unsigned int reclaim = q->processed - q->cleaned; q 1294 drivers/net/ethernet/chelsio/cxgb/sge.c q->processed, q->cleaned); q 1295 drivers/net/ethernet/chelsio/cxgb/sge.c free_cmdQ_buffers(sge, q, reclaim); q 1296 drivers/net/ethernet/chelsio/cxgb/sge.c q->cleaned += reclaim; q 1308 drivers/net/ethernet/chelsio/cxgb/sge.c struct cmdQ *q = &sge->cmdQ[0]; q 1312 drivers/net/ethernet/chelsio/cxgb/sge.c spin_lock(&q->lock); q 1313 drivers/net/ethernet/chelsio/cxgb/sge.c reclaim_completed_tx(sge, q); q 1315 drivers/net/ethernet/chelsio/cxgb/sge.c credits = q->size - q->in_use; q 1321 drivers/net/ethernet/chelsio/cxgb/sge.c q->in_use += count; q 1322 drivers/net/ethernet/chelsio/cxgb/sge.c genbit = q->genbit; q 1323 drivers/net/ethernet/chelsio/cxgb/sge.c pidx = q->pidx; q 1324 drivers/net/ethernet/chelsio/cxgb/sge.c q->pidx += count; q 1325 drivers/net/ethernet/chelsio/cxgb/sge.c if (q->pidx >= q->size) { q 1326 drivers/net/ethernet/chelsio/cxgb/sge.c q->pidx -= q->size; q 1327 drivers/net/ethernet/chelsio/cxgb/sge.c q->genbit ^= 1; q 1329 drivers/net/ethernet/chelsio/cxgb/sge.c write_tx_descs(adapter, skb, pidx, genbit, q); q 1330 drivers/net/ethernet/chelsio/cxgb/sge.c credits = q->size - q->in_use; q 1335 drivers/net/ethernet/chelsio/cxgb/sge.c clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); q 1336 drivers/net/ethernet/chelsio/cxgb/sge.c if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { q 1337 drivers/net/ethernet/chelsio/cxgb/sge.c set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); q 1341 drivers/net/ethernet/chelsio/cxgb/sge.c spin_unlock(&q->lock); q 1396 drivers/net/ethernet/chelsio/cxgb/sge.c static inline int enough_free_Tx_descs(const struct cmdQ *q) q 1398 drivers/net/ethernet/chelsio/cxgb/sge.c unsigned int r = q->processed - q->cleaned; q 1400 drivers/net/ethernet/chelsio/cxgb/sge.c return q->in_use - r < (q->size >> 1); q 1469 drivers/net/ethernet/chelsio/cxgb/sge.c struct respQ *q = &sge->respQ; q 1470 drivers/net/ethernet/chelsio/cxgb/sge.c struct respQ_e *e = &q->entries[q->cidx]; q 1475 drivers/net/ethernet/chelsio/cxgb/sge.c while (done < budget && e->GenerationBit == q->genbit) { q 1521 drivers/net/ethernet/chelsio/cxgb/sge.c if (unlikely(++q->cidx == q->size)) { q 1522 drivers/net/ethernet/chelsio/cxgb/sge.c q->cidx = 0; q 1523 drivers/net/ethernet/chelsio/cxgb/sge.c q->genbit ^= 1; q 1524 drivers/net/ethernet/chelsio/cxgb/sge.c e = q->entries; q 1528 drivers/net/ethernet/chelsio/cxgb/sge.c if (++q->credits > SGE_RESPQ_REPLENISH_THRES) { q 1529 drivers/net/ethernet/chelsio/cxgb/sge.c writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); q 1530 drivers/net/ethernet/chelsio/cxgb/sge.c q->credits = 0; q 1559 drivers/net/ethernet/chelsio/cxgb/sge.c struct respQ *q = &sge->respQ; q 1560 drivers/net/ethernet/chelsio/cxgb/sge.c struct respQ_e *e = &q->entries[q->cidx]; q 1576 drivers/net/ethernet/chelsio/cxgb/sge.c if (unlikely(++q->cidx == q->size)) { q 1577 drivers/net/ethernet/chelsio/cxgb/sge.c q->cidx = 0; q 1578 drivers/net/ethernet/chelsio/cxgb/sge.c q->genbit ^= 1; q 1579 drivers/net/ethernet/chelsio/cxgb/sge.c e = q->entries; q 1583 drivers/net/ethernet/chelsio/cxgb/sge.c if (++q->credits > SGE_RESPQ_REPLENISH_THRES) { q 1584 drivers/net/ethernet/chelsio/cxgb/sge.c writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT); q 1585 drivers/net/ethernet/chelsio/cxgb/sge.c q->credits = 0; q 1588 drivers/net/ethernet/chelsio/cxgb/sge.c } while (e->GenerationBit == q->genbit && !e->DataValid); q 1593 drivers/net/ethernet/chelsio/cxgb/sge.c return e->GenerationBit == q->genbit; q 1663 drivers/net/ethernet/chelsio/cxgb/sge.c struct cmdQ *q = &sge->cmdQ[qid]; q 1666 drivers/net/ethernet/chelsio/cxgb/sge.c spin_lock(&q->lock); q 1668 drivers/net/ethernet/chelsio/cxgb/sge.c reclaim_completed_tx(sge, q); q 1670 drivers/net/ethernet/chelsio/cxgb/sge.c pidx = q->pidx; q 1671 drivers/net/ethernet/chelsio/cxgb/sge.c credits = q->size - q->in_use; q 1684 drivers/net/ethernet/chelsio/cxgb/sge.c spin_unlock(&q->lock); q 1688 drivers/net/ethernet/chelsio/cxgb/sge.c if (unlikely(credits - count < q->stop_thres)) { q 1705 drivers/net/ethernet/chelsio/cxgb/sge.c spin_unlock(&q->lock); q 1708 drivers/net/ethernet/chelsio/cxgb/sge.c pidx = q->pidx; q 1713 drivers/net/ethernet/chelsio/cxgb/sge.c q->in_use += count; q 1714 drivers/net/ethernet/chelsio/cxgb/sge.c genbit = q->genbit; q 1715 drivers/net/ethernet/chelsio/cxgb/sge.c pidx = q->pidx; q 1716 drivers/net/ethernet/chelsio/cxgb/sge.c q->pidx += count; q 1717 drivers/net/ethernet/chelsio/cxgb/sge.c if (q->pidx >= q->size) { q 1718 drivers/net/ethernet/chelsio/cxgb/sge.c q->pidx -= q->size; q 1719 drivers/net/ethernet/chelsio/cxgb/sge.c q->genbit ^= 1; q 1721 drivers/net/ethernet/chelsio/cxgb/sge.c spin_unlock(&q->lock); q 1723 drivers/net/ethernet/chelsio/cxgb/sge.c write_tx_descs(adapter, skb, pidx, genbit, q); q 1735 drivers/net/ethernet/chelsio/cxgb/sge.c clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); q 1736 drivers/net/ethernet/chelsio/cxgb/sge.c if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { q 1737 drivers/net/ethernet/chelsio/cxgb/sge.c set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); q 1743 drivers/net/ethernet/chelsio/cxgb/sge.c if (spin_trylock(&q->lock)) { q 1744 drivers/net/ethernet/chelsio/cxgb/sge.c credits = q->size - q->in_use; q 1890 drivers/net/ethernet/chelsio/cxgb/sge.c struct cmdQ *q = &sge->cmdQ[i]; q 1892 drivers/net/ethernet/chelsio/cxgb/sge.c if (!spin_trylock(&q->lock)) q 1895 drivers/net/ethernet/chelsio/cxgb/sge.c reclaim_completed_tx(sge, q); q 1896 drivers/net/ethernet/chelsio/cxgb/sge.c if (i == 0 && q->in_use) { /* flush pending credits */ q 1899 drivers/net/ethernet/chelsio/cxgb/sge.c spin_unlock(&q->lock); q 1173 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c struct sge_rspq *q = &adap->sge.qs[i].rspq; q 1175 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c spin_lock_irq(&q->lock); q 1176 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c spin_unlock_irq(&q->lock); q 1957 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c const struct qset_params *q = &adapter->params.sge.qset[pi->first_qset]; q 1963 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c e->rx_pending = q->fl_size; q 1964 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c e->rx_mini_pending = q->rspq_size; q 1965 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c e->rx_jumbo_pending = q->jumbo_size; q 1966 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c e->tx_pending = q->txq_size[0]; q 1973 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c struct qset_params *q; q 1989 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c q = &adapter->params.sge.qset[pi->first_qset]; q 1990 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c for (i = 0; i < pi->nqsets; ++i, ++q) { q 1991 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c q->rspq_size = e->rx_mini_pending; q 1992 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c q->fl_size = e->rx_pending; q 1993 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c q->jumbo_size = e->rx_jumbo_pending; q 1994 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c q->txq_size[0] = e->tx_pending; q 1995 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c q->txq_size[1] = e->tx_pending; q 1996 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c q->txq_size[2] = e->tx_pending; q 2026 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c struct qset_params *q = adapter->params.sge.qset; q 2028 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c c->rx_coalesce_usecs = q->coalesce_usecs; q 2152 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c struct qset_params *q; q 2202 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c q = &adapter->params.sge.qset[t.qset_idx]; q 2205 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c q->rspq_size = t.rspq_size; q 2207 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c q->fl_size = t.fl_size[0]; q 2209 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c q->jumbo_size = t.fl_size[1]; q 2211 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c q->txq_size[0] = t.txq_size[0]; q 2213 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c q->txq_size[1] = t.txq_size[1]; q 2215 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c q->txq_size[2] = t.txq_size[2]; q 2217 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c q->cong_thres = t.cong_thres; q 2222 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c q->coalesce_usecs = t.intr_lat; q 2223 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c t3_update_qset_coalesce(qs, q); q 2227 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c q->polling = t.polling; q 2235 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c q = &adapter->params.sge. q 2237 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c q->polling = t.polling; q 2253 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c struct qset_params *q; q 2278 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c q = &adapter->params.sge.qset[q1 + t.qset_idx]; q 2279 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c t.rspq_size = q->rspq_size; q 2280 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c t.txq_size[0] = q->txq_size[0]; q 2281 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c t.txq_size[1] = q->txq_size[1]; q 2282 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c t.txq_size[2] = q->txq_size[2]; q 2283 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c t.fl_size[0] = q->fl_size; q 2284 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c t.fl_size[1] = q->jumbo_size; q 2285 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c t.polling = q->polling; q 2287 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c t.intr_lat = q->coalesce_usecs; q 2288 drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c t.cong_thres = q->cong_thres; q 167 drivers/net/ethernet/chelsio/cxgb3/sge.c static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx) q 169 drivers/net/ethernet/chelsio/cxgb3/sge.c return container_of(q, struct sge_qset, fl[qidx]); q 172 drivers/net/ethernet/chelsio/cxgb3/sge.c static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q) q 174 drivers/net/ethernet/chelsio/cxgb3/sge.c return container_of(q, struct sge_qset, rspq); q 177 drivers/net/ethernet/chelsio/cxgb3/sge.c static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx) q 179 drivers/net/ethernet/chelsio/cxgb3/sge.c return container_of(q, struct sge_qset, txq[qidx]); q 192 drivers/net/ethernet/chelsio/cxgb3/sge.c const struct sge_rspq *q, unsigned int credits) q 196 drivers/net/ethernet/chelsio/cxgb3/sge.c V_RSPQ(q->cntxt_id) | V_CREDITS(credits)); q 236 drivers/net/ethernet/chelsio/cxgb3/sge.c static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q, q 240 drivers/net/ethernet/chelsio/cxgb3/sge.c struct tx_sw_desc *d = &q->sdesc[cidx]; q 243 drivers/net/ethernet/chelsio/cxgb3/sge.c sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit]; q 269 drivers/net/ethernet/chelsio/cxgb3/sge.c d = cidx + 1 == q->size ? q->sdesc : d + 1; q 285 drivers/net/ethernet/chelsio/cxgb3/sge.c static void free_tx_desc(struct adapter *adapter, struct sge_txq *q, q 290 drivers/net/ethernet/chelsio/cxgb3/sge.c unsigned int cidx = q->cidx; q 293 drivers/net/ethernet/chelsio/cxgb3/sge.c q->cntxt_id >= FW_TUNNEL_SGEEC_START; q 295 drivers/net/ethernet/chelsio/cxgb3/sge.c d = &q->sdesc[cidx]; q 299 drivers/net/ethernet/chelsio/cxgb3/sge.c unmap_skb(d->skb, q, cidx, pdev); q 306 drivers/net/ethernet/chelsio/cxgb3/sge.c if (++cidx == q->size) { q 308 drivers/net/ethernet/chelsio/cxgb3/sge.c d = q->sdesc; q 311 drivers/net/ethernet/chelsio/cxgb3/sge.c q->cidx = cidx; q 325 drivers/net/ethernet/chelsio/cxgb3/sge.c struct sge_txq *q, q 328 drivers/net/ethernet/chelsio/cxgb3/sge.c unsigned int reclaim = q->processed - q->cleaned; q 332 drivers/net/ethernet/chelsio/cxgb3/sge.c free_tx_desc(adapter, q, reclaim); q 333 drivers/net/ethernet/chelsio/cxgb3/sge.c q->cleaned += reclaim; q 334 drivers/net/ethernet/chelsio/cxgb3/sge.c q->in_use -= reclaim; q 336 drivers/net/ethernet/chelsio/cxgb3/sge.c return q->processed - q->cleaned; q 345 drivers/net/ethernet/chelsio/cxgb3/sge.c static inline int should_restart_tx(const struct sge_txq *q) q 347 drivers/net/ethernet/chelsio/cxgb3/sge.c unsigned int r = q->processed - q->cleaned; q 349 drivers/net/ethernet/chelsio/cxgb3/sge.c return q->in_use - r < (q->size >> 1); q 352 drivers/net/ethernet/chelsio/cxgb3/sge.c static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q, q 355 drivers/net/ethernet/chelsio/cxgb3/sge.c if (q->use_pages && d->pg_chunk.page) { q 360 drivers/net/ethernet/chelsio/cxgb3/sge.c q->alloc_size, PCI_DMA_FROMDEVICE); q 366 drivers/net/ethernet/chelsio/cxgb3/sge.c q->buf_size, PCI_DMA_FROMDEVICE); q 380 drivers/net/ethernet/chelsio/cxgb3/sge.c static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q) q 382 drivers/net/ethernet/chelsio/cxgb3/sge.c unsigned int cidx = q->cidx; q 384 drivers/net/ethernet/chelsio/cxgb3/sge.c while (q->credits--) { q 385 drivers/net/ethernet/chelsio/cxgb3/sge.c struct rx_sw_desc *d = &q->sdesc[cidx]; q 388 drivers/net/ethernet/chelsio/cxgb3/sge.c clear_rx_desc(pdev, q, d); q 389 drivers/net/ethernet/chelsio/cxgb3/sge.c if (++cidx == q->size) q 393 drivers/net/ethernet/chelsio/cxgb3/sge.c if (q->pg_chunk.page) { q 394 drivers/net/ethernet/chelsio/cxgb3/sge.c __free_pages(q->pg_chunk.page, q->order); q 395 drivers/net/ethernet/chelsio/cxgb3/sge.c q->pg_chunk.page = NULL; q 442 drivers/net/ethernet/chelsio/cxgb3/sge.c static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, q 446 drivers/net/ethernet/chelsio/cxgb3/sge.c if (!q->pg_chunk.page) { q 449 drivers/net/ethernet/chelsio/cxgb3/sge.c q->pg_chunk.page = alloc_pages(gfp, order); q 450 drivers/net/ethernet/chelsio/cxgb3/sge.c if (unlikely(!q->pg_chunk.page)) q 452 drivers/net/ethernet/chelsio/cxgb3/sge.c q->pg_chunk.va = page_address(q->pg_chunk.page); q 453 drivers/net/ethernet/chelsio/cxgb3/sge.c q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) - q 455 drivers/net/ethernet/chelsio/cxgb3/sge.c q->pg_chunk.offset = 0; q 456 drivers/net/ethernet/chelsio/cxgb3/sge.c mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, q 457 drivers/net/ethernet/chelsio/cxgb3/sge.c 0, q->alloc_size, PCI_DMA_FROMDEVICE); q 459 drivers/net/ethernet/chelsio/cxgb3/sge.c __free_pages(q->pg_chunk.page, order); q 460 drivers/net/ethernet/chelsio/cxgb3/sge.c q->pg_chunk.page = NULL; q 463 drivers/net/ethernet/chelsio/cxgb3/sge.c q->pg_chunk.mapping = mapping; q 465 drivers/net/ethernet/chelsio/cxgb3/sge.c sd->pg_chunk = q->pg_chunk; q 469 drivers/net/ethernet/chelsio/cxgb3/sge.c q->pg_chunk.offset += q->buf_size; q 470 drivers/net/ethernet/chelsio/cxgb3/sge.c if (q->pg_chunk.offset == (PAGE_SIZE << order)) q 471 drivers/net/ethernet/chelsio/cxgb3/sge.c q->pg_chunk.page = NULL; q 473 drivers/net/ethernet/chelsio/cxgb3/sge.c q->pg_chunk.va += q->buf_size; q 474 drivers/net/ethernet/chelsio/cxgb3/sge.c get_page(q->pg_chunk.page); q 485 drivers/net/ethernet/chelsio/cxgb3/sge.c static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) q 487 drivers/net/ethernet/chelsio/cxgb3/sge.c if (q->pend_cred >= q->credits / 4) { q 488 drivers/net/ethernet/chelsio/cxgb3/sge.c q->pend_cred = 0; q 490 drivers/net/ethernet/chelsio/cxgb3/sge.c t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); q 505 drivers/net/ethernet/chelsio/cxgb3/sge.c static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) q 507 drivers/net/ethernet/chelsio/cxgb3/sge.c struct rx_sw_desc *sd = &q->sdesc[q->pidx]; q 508 drivers/net/ethernet/chelsio/cxgb3/sge.c struct rx_desc *d = &q->desc[q->pidx]; q 515 drivers/net/ethernet/chelsio/cxgb3/sge.c if (q->use_pages) { q 516 drivers/net/ethernet/chelsio/cxgb3/sge.c if (unlikely(alloc_pg_chunk(adap, q, sd, gfp, q 517 drivers/net/ethernet/chelsio/cxgb3/sge.c q->order))) { q 518 drivers/net/ethernet/chelsio/cxgb3/sge.c nomem: q->alloc_failed++; q 524 drivers/net/ethernet/chelsio/cxgb3/sge.c add_one_rx_chunk(mapping, d, q->gen); q 526 drivers/net/ethernet/chelsio/cxgb3/sge.c q->buf_size - SGE_PG_RSVD, q 531 drivers/net/ethernet/chelsio/cxgb3/sge.c struct sk_buff *skb = alloc_skb(q->buf_size, gfp); q 537 drivers/net/ethernet/chelsio/cxgb3/sge.c err = add_one_rx_buf(buf_start, q->buf_size, d, sd, q 538 drivers/net/ethernet/chelsio/cxgb3/sge.c q->gen, adap->pdev); q 540 drivers/net/ethernet/chelsio/cxgb3/sge.c clear_rx_desc(adap->pdev, q, sd); q 547 drivers/net/ethernet/chelsio/cxgb3/sge.c if (++q->pidx == q->size) { q 548 drivers/net/ethernet/chelsio/cxgb3/sge.c q->pidx = 0; q 549 drivers/net/ethernet/chelsio/cxgb3/sge.c q->gen ^= 1; q 550 drivers/net/ethernet/chelsio/cxgb3/sge.c sd = q->sdesc; q 551 drivers/net/ethernet/chelsio/cxgb3/sge.c d = q->desc; q 556 drivers/net/ethernet/chelsio/cxgb3/sge.c q->credits += count; q 557 drivers/net/ethernet/chelsio/cxgb3/sge.c q->pend_cred += count; q 558 drivers/net/ethernet/chelsio/cxgb3/sge.c ring_fl_db(adap, q); q 578 drivers/net/ethernet/chelsio/cxgb3/sge.c static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q, q 581 drivers/net/ethernet/chelsio/cxgb3/sge.c struct rx_desc *from = &q->desc[idx]; q 582 drivers/net/ethernet/chelsio/cxgb3/sge.c struct rx_desc *to = &q->desc[q->pidx]; q 584 drivers/net/ethernet/chelsio/cxgb3/sge.c q->sdesc[q->pidx] = q->sdesc[idx]; q 588 drivers/net/ethernet/chelsio/cxgb3/sge.c to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen)); q 589 drivers/net/ethernet/chelsio/cxgb3/sge.c to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen)); q 591 drivers/net/ethernet/chelsio/cxgb3/sge.c if (++q->pidx == q->size) { q 592 drivers/net/ethernet/chelsio/cxgb3/sge.c q->pidx = 0; q 593 drivers/net/ethernet/chelsio/cxgb3/sge.c q->gen ^= 1; q 596 drivers/net/ethernet/chelsio/cxgb3/sge.c q->credits++; q 597 drivers/net/ethernet/chelsio/cxgb3/sge.c q->pend_cred++; q 598 drivers/net/ethernet/chelsio/cxgb3/sge.c ring_fl_db(adap, q); q 647 drivers/net/ethernet/chelsio/cxgb3/sge.c static void t3_reset_qset(struct sge_qset *q) q 649 drivers/net/ethernet/chelsio/cxgb3/sge.c if (q->adap && q 650 drivers/net/ethernet/chelsio/cxgb3/sge.c !(q->adap->flags & NAPI_INIT)) { q 651 drivers/net/ethernet/chelsio/cxgb3/sge.c memset(q, 0, sizeof(*q)); q 655 drivers/net/ethernet/chelsio/cxgb3/sge.c q->adap = NULL; q 656 drivers/net/ethernet/chelsio/cxgb3/sge.c memset(&q->rspq, 0, sizeof(q->rspq)); q 657 drivers/net/ethernet/chelsio/cxgb3/sge.c memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET); q 658 drivers/net/ethernet/chelsio/cxgb3/sge.c memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); q 659 drivers/net/ethernet/chelsio/cxgb3/sge.c q->txq_stopped = 0; q 660 drivers/net/ethernet/chelsio/cxgb3/sge.c q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */ q 661 drivers/net/ethernet/chelsio/cxgb3/sge.c q->rx_reclaim_timer.function = NULL; q 662 drivers/net/ethernet/chelsio/cxgb3/sge.c q->nomem = 0; q 663 drivers/net/ethernet/chelsio/cxgb3/sge.c napi_free_frags(&q->napi); q 676 drivers/net/ethernet/chelsio/cxgb3/sge.c static void t3_free_qset(struct adapter *adapter, struct sge_qset *q) q 682 drivers/net/ethernet/chelsio/cxgb3/sge.c if (q->fl[i].desc) { q 684 drivers/net/ethernet/chelsio/cxgb3/sge.c t3_sge_disable_fl(adapter, q->fl[i].cntxt_id); q 686 drivers/net/ethernet/chelsio/cxgb3/sge.c free_rx_bufs(pdev, &q->fl[i]); q 687 drivers/net/ethernet/chelsio/cxgb3/sge.c kfree(q->fl[i].sdesc); q 689 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[i].size * q 690 drivers/net/ethernet/chelsio/cxgb3/sge.c sizeof(struct rx_desc), q->fl[i].desc, q 691 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[i].phys_addr); q 695 drivers/net/ethernet/chelsio/cxgb3/sge.c if (q->txq[i].desc) { q 697 drivers/net/ethernet/chelsio/cxgb3/sge.c t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0); q 699 drivers/net/ethernet/chelsio/cxgb3/sge.c if (q->txq[i].sdesc) { q 700 drivers/net/ethernet/chelsio/cxgb3/sge.c free_tx_desc(adapter, &q->txq[i], q 701 drivers/net/ethernet/chelsio/cxgb3/sge.c q->txq[i].in_use); q 702 drivers/net/ethernet/chelsio/cxgb3/sge.c kfree(q->txq[i].sdesc); q 705 drivers/net/ethernet/chelsio/cxgb3/sge.c q->txq[i].size * q 707 drivers/net/ethernet/chelsio/cxgb3/sge.c q->txq[i].desc, q->txq[i].phys_addr); q 708 drivers/net/ethernet/chelsio/cxgb3/sge.c __skb_queue_purge(&q->txq[i].sendq); q 711 drivers/net/ethernet/chelsio/cxgb3/sge.c if (q->rspq.desc) { q 713 drivers/net/ethernet/chelsio/cxgb3/sge.c t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id); q 716 drivers/net/ethernet/chelsio/cxgb3/sge.c q->rspq.size * sizeof(struct rsp_desc), q 717 drivers/net/ethernet/chelsio/cxgb3/sge.c q->rspq.desc, q->rspq.phys_addr); q 720 drivers/net/ethernet/chelsio/cxgb3/sge.c t3_reset_qset(q); q 843 drivers/net/ethernet/chelsio/cxgb3/sge.c struct sge_rspq *q, unsigned int len, q 851 drivers/net/ethernet/chelsio/cxgb3/sge.c newskb = skb = q->pg_skb; q 867 drivers/net/ethernet/chelsio/cxgb3/sge.c q->rx_recycle_buf++; q 871 drivers/net/ethernet/chelsio/cxgb3/sge.c if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres))) q 1049 drivers/net/ethernet/chelsio/cxgb3/sge.c static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q) q 1052 drivers/net/ethernet/chelsio/cxgb3/sge.c clear_bit(TXQ_LAST_PKT_DB, &q->flags); q 1053 drivers/net/ethernet/chelsio/cxgb3/sge.c if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) { q 1054 drivers/net/ethernet/chelsio/cxgb3/sge.c set_bit(TXQ_LAST_PKT_DB, &q->flags); q 1056 drivers/net/ethernet/chelsio/cxgb3/sge.c F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); q 1061 drivers/net/ethernet/chelsio/cxgb3/sge.c F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); q 1093 drivers/net/ethernet/chelsio/cxgb3/sge.c const struct sge_txq *q, q 1100 drivers/net/ethernet/chelsio/cxgb3/sge.c struct tx_sw_desc *sd = &q->sdesc[pidx]; q 1140 drivers/net/ethernet/chelsio/cxgb3/sge.c if (++pidx == q->size) { q 1143 drivers/net/ethernet/chelsio/cxgb3/sge.c d = q->desc; q 1144 drivers/net/ethernet/chelsio/cxgb3/sge.c sd = q->sdesc; q 1182 drivers/net/ethernet/chelsio/cxgb3/sge.c struct sge_txq *q, unsigned int ndesc, q 1187 drivers/net/ethernet/chelsio/cxgb3/sge.c struct tx_desc *d = &q->desc[pidx]; q 1218 drivers/net/ethernet/chelsio/cxgb3/sge.c q->sdesc[pidx].skb = NULL; q 1231 drivers/net/ethernet/chelsio/cxgb3/sge.c V_WR_TID(q->token)); q 1243 drivers/net/ethernet/chelsio/cxgb3/sge.c write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, q 1245 drivers/net/ethernet/chelsio/cxgb3/sge.c htonl(V_WR_TID(q->token))); q 1249 drivers/net/ethernet/chelsio/cxgb3/sge.c struct sge_qset *qs, struct sge_txq *q) q 1253 drivers/net/ethernet/chelsio/cxgb3/sge.c q->stops++; q 1271 drivers/net/ethernet/chelsio/cxgb3/sge.c struct sge_txq *q; q 1285 drivers/net/ethernet/chelsio/cxgb3/sge.c q = &qs->txq[TXQ_ETH]; q 1288 drivers/net/ethernet/chelsio/cxgb3/sge.c reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); q 1290 drivers/net/ethernet/chelsio/cxgb3/sge.c credits = q->size - q->in_use; q 1294 drivers/net/ethernet/chelsio/cxgb3/sge.c t3_stop_tx_queue(txq, qs, q); q 1297 drivers/net/ethernet/chelsio/cxgb3/sge.c dev->name, q->cntxt_id & 7); q 1309 drivers/net/ethernet/chelsio/cxgb3/sge.c q->in_use += ndesc; q 1310 drivers/net/ethernet/chelsio/cxgb3/sge.c if (unlikely(credits - ndesc < q->stop_thres)) { q 1311 drivers/net/ethernet/chelsio/cxgb3/sge.c t3_stop_tx_queue(txq, qs, q); q 1313 drivers/net/ethernet/chelsio/cxgb3/sge.c if (should_restart_tx(q) && q 1315 drivers/net/ethernet/chelsio/cxgb3/sge.c q->restarts++; q 1320 drivers/net/ethernet/chelsio/cxgb3/sge.c gen = q->gen; q 1321 drivers/net/ethernet/chelsio/cxgb3/sge.c q->unacked += ndesc; q 1322 drivers/net/ethernet/chelsio/cxgb3/sge.c compl = (q->unacked & 8) << (S_WR_COMPL - 3); q 1323 drivers/net/ethernet/chelsio/cxgb3/sge.c q->unacked &= 7; q 1324 drivers/net/ethernet/chelsio/cxgb3/sge.c pidx = q->pidx; q 1325 drivers/net/ethernet/chelsio/cxgb3/sge.c q->pidx += ndesc; q 1326 drivers/net/ethernet/chelsio/cxgb3/sge.c if (q->pidx >= q->size) { q 1327 drivers/net/ethernet/chelsio/cxgb3/sge.c q->pidx -= q->size; q 1328 drivers/net/ethernet/chelsio/cxgb3/sge.c q->gen ^= 1; q 1366 drivers/net/ethernet/chelsio/cxgb3/sge.c write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr); q 1367 drivers/net/ethernet/chelsio/cxgb3/sge.c check_ring_tx_db(adap, q); q 1421 drivers/net/ethernet/chelsio/cxgb3/sge.c static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q, q 1425 drivers/net/ethernet/chelsio/cxgb3/sge.c if (unlikely(!skb_queue_empty(&q->sendq))) { q 1426 drivers/net/ethernet/chelsio/cxgb3/sge.c addq_exit:__skb_queue_tail(&q->sendq, skb); q 1429 drivers/net/ethernet/chelsio/cxgb3/sge.c if (unlikely(q->size - q->in_use < ndesc)) { q 1430 drivers/net/ethernet/chelsio/cxgb3/sge.c struct sge_qset *qs = txq_to_qset(q, qid); q 1435 drivers/net/ethernet/chelsio/cxgb3/sge.c if (should_restart_tx(q) && q 1439 drivers/net/ethernet/chelsio/cxgb3/sge.c q->stops++; q 1453 drivers/net/ethernet/chelsio/cxgb3/sge.c static inline void reclaim_completed_tx_imm(struct sge_txq *q) q 1455 drivers/net/ethernet/chelsio/cxgb3/sge.c unsigned int reclaim = q->processed - q->cleaned; q 1457 drivers/net/ethernet/chelsio/cxgb3/sge.c q->in_use -= reclaim; q 1458 drivers/net/ethernet/chelsio/cxgb3/sge.c q->cleaned += reclaim; q 1476 drivers/net/ethernet/chelsio/cxgb3/sge.c static int ctrl_xmit(struct adapter *adap, struct sge_txq *q, q 1489 drivers/net/ethernet/chelsio/cxgb3/sge.c wrp->wr_lo = htonl(V_WR_TID(q->token)); q 1491 drivers/net/ethernet/chelsio/cxgb3/sge.c spin_lock(&q->lock); q 1492 drivers/net/ethernet/chelsio/cxgb3/sge.c again:reclaim_completed_tx_imm(q); q 1494 drivers/net/ethernet/chelsio/cxgb3/sge.c ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL); q 1497 drivers/net/ethernet/chelsio/cxgb3/sge.c spin_unlock(&q->lock); q 1503 drivers/net/ethernet/chelsio/cxgb3/sge.c write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); q 1505 drivers/net/ethernet/chelsio/cxgb3/sge.c q->in_use++; q 1506 drivers/net/ethernet/chelsio/cxgb3/sge.c if (++q->pidx >= q->size) { q 1507 drivers/net/ethernet/chelsio/cxgb3/sge.c q->pidx = 0; q 1508 drivers/net/ethernet/chelsio/cxgb3/sge.c q->gen ^= 1; q 1510 drivers/net/ethernet/chelsio/cxgb3/sge.c spin_unlock(&q->lock); q 1513 drivers/net/ethernet/chelsio/cxgb3/sge.c F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); q 1527 drivers/net/ethernet/chelsio/cxgb3/sge.c struct sge_txq *q = &qs->txq[TXQ_CTRL]; q 1529 drivers/net/ethernet/chelsio/cxgb3/sge.c spin_lock(&q->lock); q 1530 drivers/net/ethernet/chelsio/cxgb3/sge.c again:reclaim_completed_tx_imm(q); q 1532 drivers/net/ethernet/chelsio/cxgb3/sge.c while (q->in_use < q->size && q 1533 drivers/net/ethernet/chelsio/cxgb3/sge.c (skb = __skb_dequeue(&q->sendq)) != NULL) { q 1535 drivers/net/ethernet/chelsio/cxgb3/sge.c write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); q 1537 drivers/net/ethernet/chelsio/cxgb3/sge.c if (++q->pidx >= q->size) { q 1538 drivers/net/ethernet/chelsio/cxgb3/sge.c q->pidx = 0; q 1539 drivers/net/ethernet/chelsio/cxgb3/sge.c q->gen ^= 1; q 1541 drivers/net/ethernet/chelsio/cxgb3/sge.c q->in_use++; q 1544 drivers/net/ethernet/chelsio/cxgb3/sge.c if (!skb_queue_empty(&q->sendq)) { q 1548 drivers/net/ethernet/chelsio/cxgb3/sge.c if (should_restart_tx(q) && q 1551 drivers/net/ethernet/chelsio/cxgb3/sge.c q->stops++; q 1554 drivers/net/ethernet/chelsio/cxgb3/sge.c spin_unlock(&q->lock); q 1557 drivers/net/ethernet/chelsio/cxgb3/sge.c F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); q 1630 drivers/net/ethernet/chelsio/cxgb3/sge.c struct sge_txq *q, unsigned int pidx, q 1637 drivers/net/ethernet/chelsio/cxgb3/sge.c struct tx_desc *d = &q->desc[pidx]; q 1640 drivers/net/ethernet/chelsio/cxgb3/sge.c q->sdesc[pidx].skb = NULL; q 1661 drivers/net/ethernet/chelsio/cxgb3/sge.c write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, q 1694 drivers/net/ethernet/chelsio/cxgb3/sge.c static int ofld_xmit(struct adapter *adap, struct sge_txq *q, q 1700 drivers/net/ethernet/chelsio/cxgb3/sge.c spin_lock(&q->lock); q 1701 drivers/net/ethernet/chelsio/cxgb3/sge.c again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); q 1703 drivers/net/ethernet/chelsio/cxgb3/sge.c ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD); q 1707 drivers/net/ethernet/chelsio/cxgb3/sge.c spin_unlock(&q->lock); q 1715 drivers/net/ethernet/chelsio/cxgb3/sge.c spin_unlock(&q->lock); q 1719 drivers/net/ethernet/chelsio/cxgb3/sge.c gen = q->gen; q 1720 drivers/net/ethernet/chelsio/cxgb3/sge.c q->in_use += ndesc; q 1721 drivers/net/ethernet/chelsio/cxgb3/sge.c pidx = q->pidx; q 1722 drivers/net/ethernet/chelsio/cxgb3/sge.c q->pidx += ndesc; q 1723 drivers/net/ethernet/chelsio/cxgb3/sge.c if (q->pidx >= q->size) { q 1724 drivers/net/ethernet/chelsio/cxgb3/sge.c q->pidx -= q->size; q 1725 drivers/net/ethernet/chelsio/cxgb3/sge.c q->gen ^= 1; q 1727 drivers/net/ethernet/chelsio/cxgb3/sge.c spin_unlock(&q->lock); q 1729 drivers/net/ethernet/chelsio/cxgb3/sge.c write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head); q 1730 drivers/net/ethernet/chelsio/cxgb3/sge.c check_ring_tx_db(adap, q); q 1744 drivers/net/ethernet/chelsio/cxgb3/sge.c struct sge_txq *q = &qs->txq[TXQ_OFLD]; q 1749 drivers/net/ethernet/chelsio/cxgb3/sge.c spin_lock(&q->lock); q 1750 drivers/net/ethernet/chelsio/cxgb3/sge.c again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); q 1752 drivers/net/ethernet/chelsio/cxgb3/sge.c while ((skb = skb_peek(&q->sendq)) != NULL) { q 1756 drivers/net/ethernet/chelsio/cxgb3/sge.c if (unlikely(q->size - q->in_use < ndesc)) { q 1760 drivers/net/ethernet/chelsio/cxgb3/sge.c if (should_restart_tx(q) && q 1763 drivers/net/ethernet/chelsio/cxgb3/sge.c q->stops++; q 1771 drivers/net/ethernet/chelsio/cxgb3/sge.c gen = q->gen; q 1772 drivers/net/ethernet/chelsio/cxgb3/sge.c q->in_use += ndesc; q 1773 drivers/net/ethernet/chelsio/cxgb3/sge.c pidx = q->pidx; q 1774 drivers/net/ethernet/chelsio/cxgb3/sge.c q->pidx += ndesc; q 1776 drivers/net/ethernet/chelsio/cxgb3/sge.c if (q->pidx >= q->size) { q 1777 drivers/net/ethernet/chelsio/cxgb3/sge.c q->pidx -= q->size; q 1778 drivers/net/ethernet/chelsio/cxgb3/sge.c q->gen ^= 1; q 1780 drivers/net/ethernet/chelsio/cxgb3/sge.c __skb_unlink(skb, &q->sendq); q 1781 drivers/net/ethernet/chelsio/cxgb3/sge.c spin_unlock(&q->lock); q 1783 drivers/net/ethernet/chelsio/cxgb3/sge.c write_ofld_wr(adap, skb, q, pidx, gen, ndesc, q 1785 drivers/net/ethernet/chelsio/cxgb3/sge.c spin_lock(&q->lock); q 1787 drivers/net/ethernet/chelsio/cxgb3/sge.c spin_unlock(&q->lock); q 1790 drivers/net/ethernet/chelsio/cxgb3/sge.c set_bit(TXQ_RUNNING, &q->flags); q 1791 drivers/net/ethernet/chelsio/cxgb3/sge.c set_bit(TXQ_LAST_PKT_DB, &q->flags); q 1796 drivers/net/ethernet/chelsio/cxgb3/sge.c F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); q 1852 drivers/net/ethernet/chelsio/cxgb3/sge.c static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb) q 1854 drivers/net/ethernet/chelsio/cxgb3/sge.c int was_empty = skb_queue_empty(&q->rx_queue); q 1856 drivers/net/ethernet/chelsio/cxgb3/sge.c __skb_queue_tail(&q->rx_queue, skb); q 1859 drivers/net/ethernet/chelsio/cxgb3/sge.c struct sge_qset *qs = rspq_to_qset(q); q 1875 drivers/net/ethernet/chelsio/cxgb3/sge.c struct sge_rspq *q, q 1879 drivers/net/ethernet/chelsio/cxgb3/sge.c q->offload_bundles++; q 1898 drivers/net/ethernet/chelsio/cxgb3/sge.c struct sge_rspq *q = &qs->rspq; q 1907 drivers/net/ethernet/chelsio/cxgb3/sge.c spin_lock_irq(&q->lock); q 1909 drivers/net/ethernet/chelsio/cxgb3/sge.c skb_queue_splice_init(&q->rx_queue, &queue); q 1912 drivers/net/ethernet/chelsio/cxgb3/sge.c spin_unlock_irq(&q->lock); q 1915 drivers/net/ethernet/chelsio/cxgb3/sge.c spin_unlock_irq(&q->lock); q 1927 drivers/net/ethernet/chelsio/cxgb3/sge.c q->offload_bundles++; q 1935 drivers/net/ethernet/chelsio/cxgb3/sge.c spin_lock_irq(&q->lock); q 1936 drivers/net/ethernet/chelsio/cxgb3/sge.c skb_queue_splice(&queue, &q->rx_queue); q 1937 drivers/net/ethernet/chelsio/cxgb3/sge.c spin_unlock_irq(&q->lock); q 1939 drivers/net/ethernet/chelsio/cxgb3/sge.c deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered); q 2285 drivers/net/ethernet/chelsio/cxgb3/sge.c const struct sge_rspq *q) q 2287 drivers/net/ethernet/chelsio/cxgb3/sge.c return (r->intr_gen & F_RSPD_GEN2) == q->gen; q 2290 drivers/net/ethernet/chelsio/cxgb3/sge.c static inline void clear_rspq_bufstate(struct sge_rspq * const q) q 2292 drivers/net/ethernet/chelsio/cxgb3/sge.c q->pg_skb = NULL; q 2293 drivers/net/ethernet/chelsio/cxgb3/sge.c q->rx_recycle_buf = 0; q 2323 drivers/net/ethernet/chelsio/cxgb3/sge.c struct sge_rspq *q = &qs->rspq; q 2324 drivers/net/ethernet/chelsio/cxgb3/sge.c struct rsp_desc *r = &q->desc[q->cidx]; q 2330 drivers/net/ethernet/chelsio/cxgb3/sge.c q->next_holdoff = q->holdoff_tmr; q 2332 drivers/net/ethernet/chelsio/cxgb3/sge.c while (likely(budget_left && is_new_response(r, q))) { q 2353 drivers/net/ethernet/chelsio/cxgb3/sge.c q->async_notif++; q 2358 drivers/net/ethernet/chelsio/cxgb3/sge.c q->next_holdoff = NOMEM_INTR_DELAY; q 2359 drivers/net/ethernet/chelsio/cxgb3/sge.c q->nomem++; q 2364 drivers/net/ethernet/chelsio/cxgb3/sge.c q->imm_data++; q 2387 drivers/net/ethernet/chelsio/cxgb3/sge.c skb = get_packet_pg(adap, fl, q, q 2391 drivers/net/ethernet/chelsio/cxgb3/sge.c q->pg_skb = skb; q 2398 drivers/net/ethernet/chelsio/cxgb3/sge.c q->rx_drops++; q 2405 drivers/net/ethernet/chelsio/cxgb3/sge.c q->pure_rsps++; q 2413 drivers/net/ethernet/chelsio/cxgb3/sge.c if (unlikely(++q->cidx == q->size)) { q 2414 drivers/net/ethernet/chelsio/cxgb3/sge.c q->cidx = 0; q 2415 drivers/net/ethernet/chelsio/cxgb3/sge.c q->gen ^= 1; q 2416 drivers/net/ethernet/chelsio/cxgb3/sge.c r = q->desc; q 2420 drivers/net/ethernet/chelsio/cxgb3/sge.c if (++q->credits >= (q->size / 4)) { q 2421 drivers/net/ethernet/chelsio/cxgb3/sge.c refill_rspq(adap, q, q->credits); q 2422 drivers/net/ethernet/chelsio/cxgb3/sge.c q->credits = 0; q 2431 drivers/net/ethernet/chelsio/cxgb3/sge.c rx_eth(adap, q, skb, ethpad, lro); q 2433 drivers/net/ethernet/chelsio/cxgb3/sge.c q->offload_pkts++; q 2437 drivers/net/ethernet/chelsio/cxgb3/sge.c ngathered = rx_offload(&adap->tdev, q, skb, q 2443 drivers/net/ethernet/chelsio/cxgb3/sge.c clear_rspq_bufstate(q); q 2448 drivers/net/ethernet/chelsio/cxgb3/sge.c deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); q 2530 drivers/net/ethernet/chelsio/cxgb3/sge.c struct sge_rspq *q = &qs->rspq; q 2537 drivers/net/ethernet/chelsio/cxgb3/sge.c if (unlikely(++q->cidx == q->size)) { q 2538 drivers/net/ethernet/chelsio/cxgb3/sge.c q->cidx = 0; q 2539 drivers/net/ethernet/chelsio/cxgb3/sge.c q->gen ^= 1; q 2540 drivers/net/ethernet/chelsio/cxgb3/sge.c r = q->desc; q 2549 drivers/net/ethernet/chelsio/cxgb3/sge.c q->pure_rsps++; q 2550 drivers/net/ethernet/chelsio/cxgb3/sge.c if (++q->credits >= (q->size / 4)) { q 2551 drivers/net/ethernet/chelsio/cxgb3/sge.c refill_rspq(adap, q, q->credits); q 2552 drivers/net/ethernet/chelsio/cxgb3/sge.c q->credits = 0; q 2554 drivers/net/ethernet/chelsio/cxgb3/sge.c if (!is_new_response(r, q)) q 2566 drivers/net/ethernet/chelsio/cxgb3/sge.c return is_new_response(r, q); q 2584 drivers/net/ethernet/chelsio/cxgb3/sge.c static inline int handle_responses(struct adapter *adap, struct sge_rspq *q) q 2586 drivers/net/ethernet/chelsio/cxgb3/sge.c struct sge_qset *qs = rspq_to_qset(q); q 2587 drivers/net/ethernet/chelsio/cxgb3/sge.c struct rsp_desc *r = &q->desc[q->cidx]; q 2589 drivers/net/ethernet/chelsio/cxgb3/sge.c if (!is_new_response(r, q)) q 2593 drivers/net/ethernet/chelsio/cxgb3/sge.c t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | q 2594 drivers/net/ethernet/chelsio/cxgb3/sge.c V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx)); q 2609 drivers/net/ethernet/chelsio/cxgb3/sge.c struct sge_rspq *q = &qs->rspq; q 2611 drivers/net/ethernet/chelsio/cxgb3/sge.c spin_lock(&q->lock); q 2613 drivers/net/ethernet/chelsio/cxgb3/sge.c q->unhandled_irqs++; q 2614 drivers/net/ethernet/chelsio/cxgb3/sge.c t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | q 2615 drivers/net/ethernet/chelsio/cxgb3/sge.c V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); q 2616 drivers/net/ethernet/chelsio/cxgb3/sge.c spin_unlock(&q->lock); q 2627 drivers/net/ethernet/chelsio/cxgb3/sge.c struct sge_rspq *q = &qs->rspq; q 2629 drivers/net/ethernet/chelsio/cxgb3/sge.c spin_lock(&q->lock); q 2631 drivers/net/ethernet/chelsio/cxgb3/sge.c if (handle_responses(qs->adap, q) < 0) q 2632 drivers/net/ethernet/chelsio/cxgb3/sge.c q->unhandled_irqs++; q 2633 drivers/net/ethernet/chelsio/cxgb3/sge.c spin_unlock(&q->lock); q 2647 drivers/net/ethernet/chelsio/cxgb3/sge.c struct sge_rspq *q = &adap->sge.qs[0].rspq; q 2649 drivers/net/ethernet/chelsio/cxgb3/sge.c spin_lock(&q->lock); q 2652 drivers/net/ethernet/chelsio/cxgb3/sge.c t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | q 2653 drivers/net/ethernet/chelsio/cxgb3/sge.c V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); q 2668 drivers/net/ethernet/chelsio/cxgb3/sge.c q->unhandled_irqs++; q 2670 drivers/net/ethernet/chelsio/cxgb3/sge.c spin_unlock(&q->lock); q 2676 drivers/net/ethernet/chelsio/cxgb3/sge.c struct sge_rspq *q = &qs->rspq; q 2679 drivers/net/ethernet/chelsio/cxgb3/sge.c is_new_response(&q->desc[q->cidx], q)) { q 2697 drivers/net/ethernet/chelsio/cxgb3/sge.c struct sge_rspq *q = &adap->sge.qs[0].rspq; q 2699 drivers/net/ethernet/chelsio/cxgb3/sge.c spin_lock(&q->lock); q 2705 drivers/net/ethernet/chelsio/cxgb3/sge.c q->unhandled_irqs++; q 2707 drivers/net/ethernet/chelsio/cxgb3/sge.c spin_unlock(&q->lock); q 3041 drivers/net/ethernet/chelsio/cxgb3/sge.c struct sge_qset *q = &adapter->sge.qs[id]; q 3043 drivers/net/ethernet/chelsio/cxgb3/sge.c init_qset_cntxt(q, id); q 3044 drivers/net/ethernet/chelsio/cxgb3/sge.c timer_setup(&q->tx_reclaim_timer, sge_timer_tx, 0); q 3045 drivers/net/ethernet/chelsio/cxgb3/sge.c timer_setup(&q->rx_reclaim_timer, sge_timer_rx, 0); q 3047 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size, q 3050 drivers/net/ethernet/chelsio/cxgb3/sge.c &q->fl[0].phys_addr, &q->fl[0].sdesc); q 3051 drivers/net/ethernet/chelsio/cxgb3/sge.c if (!q->fl[0].desc) q 3054 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size, q 3057 drivers/net/ethernet/chelsio/cxgb3/sge.c &q->fl[1].phys_addr, &q->fl[1].sdesc); q 3058 drivers/net/ethernet/chelsio/cxgb3/sge.c if (!q->fl[1].desc) q 3061 drivers/net/ethernet/chelsio/cxgb3/sge.c q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size, q 3063 drivers/net/ethernet/chelsio/cxgb3/sge.c &q->rspq.phys_addr, NULL); q 3064 drivers/net/ethernet/chelsio/cxgb3/sge.c if (!q->rspq.desc) q 3074 drivers/net/ethernet/chelsio/cxgb3/sge.c q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i], q 3076 drivers/net/ethernet/chelsio/cxgb3/sge.c &q->txq[i].phys_addr, q 3077 drivers/net/ethernet/chelsio/cxgb3/sge.c &q->txq[i].sdesc); q 3078 drivers/net/ethernet/chelsio/cxgb3/sge.c if (!q->txq[i].desc) q 3081 drivers/net/ethernet/chelsio/cxgb3/sge.c q->txq[i].gen = 1; q 3082 drivers/net/ethernet/chelsio/cxgb3/sge.c q->txq[i].size = p->txq_size[i]; q 3083 drivers/net/ethernet/chelsio/cxgb3/sge.c spin_lock_init(&q->txq[i].lock); q 3084 drivers/net/ethernet/chelsio/cxgb3/sge.c skb_queue_head_init(&q->txq[i].sendq); q 3087 drivers/net/ethernet/chelsio/cxgb3/sge.c tasklet_init(&q->txq[TXQ_OFLD].qresume_tsk, restart_offloadq, q 3088 drivers/net/ethernet/chelsio/cxgb3/sge.c (unsigned long)q); q 3089 drivers/net/ethernet/chelsio/cxgb3/sge.c tasklet_init(&q->txq[TXQ_CTRL].qresume_tsk, restart_ctrlq, q 3090 drivers/net/ethernet/chelsio/cxgb3/sge.c (unsigned long)q); q 3092 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[0].gen = q->fl[1].gen = 1; q 3093 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[0].size = p->fl_size; q 3094 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[1].size = p->jumbo_size; q 3096 drivers/net/ethernet/chelsio/cxgb3/sge.c q->rspq.gen = 1; q 3097 drivers/net/ethernet/chelsio/cxgb3/sge.c q->rspq.size = p->rspq_size; q 3098 drivers/net/ethernet/chelsio/cxgb3/sge.c spin_lock_init(&q->rspq.lock); q 3099 drivers/net/ethernet/chelsio/cxgb3/sge.c skb_queue_head_init(&q->rspq.rx_queue); q 3101 drivers/net/ethernet/chelsio/cxgb3/sge.c q->txq[TXQ_ETH].stop_thres = nports * q 3105 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[0].buf_size = FL0_PG_CHUNK_SIZE; q 3107 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data); q 3110 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[1].buf_size = FL1_PG_CHUNK_SIZE; q 3112 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[1].buf_size = is_offload(adapter) ? q 3117 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0; q 3118 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0; q 3119 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[0].order = FL0_PG_ORDER; q 3120 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[1].order = FL1_PG_ORDER; q 3121 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE; q 3122 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE; q 3127 drivers/net/ethernet/chelsio/cxgb3/sge.c ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx, q 3128 drivers/net/ethernet/chelsio/cxgb3/sge.c q->rspq.phys_addr, q->rspq.size, q 3129 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[0].buf_size - SGE_PG_RSVD, 1, 0); q 3134 drivers/net/ethernet/chelsio/cxgb3/sge.c ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0, q 3135 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[i].phys_addr, q->fl[i].size, q 3136 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl[i].buf_size - SGE_PG_RSVD, q 3142 drivers/net/ethernet/chelsio/cxgb3/sge.c ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS, q 3143 drivers/net/ethernet/chelsio/cxgb3/sge.c SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr, q 3144 drivers/net/ethernet/chelsio/cxgb3/sge.c q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token, q 3150 drivers/net/ethernet/chelsio/cxgb3/sge.c ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id, q 3152 drivers/net/ethernet/chelsio/cxgb3/sge.c q->txq[TXQ_OFLD].phys_addr, q 3153 drivers/net/ethernet/chelsio/cxgb3/sge.c q->txq[TXQ_OFLD].size, 0, 1, 0); q 3159 drivers/net/ethernet/chelsio/cxgb3/sge.c ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0, q 3161 drivers/net/ethernet/chelsio/cxgb3/sge.c q->txq[TXQ_CTRL].phys_addr, q 3162 drivers/net/ethernet/chelsio/cxgb3/sge.c q->txq[TXQ_CTRL].size, q 3163 drivers/net/ethernet/chelsio/cxgb3/sge.c q->txq[TXQ_CTRL].token, 1, 0); q 3170 drivers/net/ethernet/chelsio/cxgb3/sge.c q->adap = adapter; q 3171 drivers/net/ethernet/chelsio/cxgb3/sge.c q->netdev = dev; q 3172 drivers/net/ethernet/chelsio/cxgb3/sge.c q->tx_q = netdevq; q 3173 drivers/net/ethernet/chelsio/cxgb3/sge.c t3_update_qset_coalesce(q, p); q 3175 drivers/net/ethernet/chelsio/cxgb3/sge.c avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, q 3181 drivers/net/ethernet/chelsio/cxgb3/sge.c if (avail < q->fl[0].size) q 3185 drivers/net/ethernet/chelsio/cxgb3/sge.c avail = refill_fl(adapter, &q->fl[1], q->fl[1].size, q 3187 drivers/net/ethernet/chelsio/cxgb3/sge.c if (avail < q->fl[1].size) q 3190 drivers/net/ethernet/chelsio/cxgb3/sge.c refill_rspq(adapter, &q->rspq, q->rspq.size - 1); q 3192 drivers/net/ethernet/chelsio/cxgb3/sge.c t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) | q 3193 drivers/net/ethernet/chelsio/cxgb3/sge.c V_NEWTIMER(q->rspq.holdoff_tmr)); q 3200 drivers/net/ethernet/chelsio/cxgb3/sge.c t3_free_qset(adapter, q); q 3215 drivers/net/ethernet/chelsio/cxgb3/sge.c struct sge_qset *q = &adap->sge.qs[i]; q 3217 drivers/net/ethernet/chelsio/cxgb3/sge.c if (q->tx_reclaim_timer.function) q 3218 drivers/net/ethernet/chelsio/cxgb3/sge.c mod_timer(&q->tx_reclaim_timer, q 3221 drivers/net/ethernet/chelsio/cxgb3/sge.c if (q->rx_reclaim_timer.function) q 3222 drivers/net/ethernet/chelsio/cxgb3/sge.c mod_timer(&q->rx_reclaim_timer, q 3238 drivers/net/ethernet/chelsio/cxgb3/sge.c struct sge_qset *q = &adap->sge.qs[i]; q 3240 drivers/net/ethernet/chelsio/cxgb3/sge.c if (q->tx_reclaim_timer.function) q 3241 drivers/net/ethernet/chelsio/cxgb3/sge.c del_timer_sync(&q->tx_reclaim_timer); q 3242 drivers/net/ethernet/chelsio/cxgb3/sge.c if (q->rx_reclaim_timer.function) q 3243 drivers/net/ethernet/chelsio/cxgb3/sge.c del_timer_sync(&q->rx_reclaim_timer); q 3358 drivers/net/ethernet/chelsio/cxgb3/sge.c struct qset_params *q = p->qset + i; q 3360 drivers/net/ethernet/chelsio/cxgb3/sge.c q->polling = adap->params.rev > 0; q 3361 drivers/net/ethernet/chelsio/cxgb3/sge.c q->coalesce_usecs = 5; q 3362 drivers/net/ethernet/chelsio/cxgb3/sge.c q->rspq_size = 1024; q 3363 drivers/net/ethernet/chelsio/cxgb3/sge.c q->fl_size = 1024; q 3364 drivers/net/ethernet/chelsio/cxgb3/sge.c q->jumbo_size = 512; q 3365 drivers/net/ethernet/chelsio/cxgb3/sge.c q->txq_size[TXQ_ETH] = 1024; q 3366 drivers/net/ethernet/chelsio/cxgb3/sge.c q->txq_size[TXQ_OFLD] = 1024; q 3367 drivers/net/ethernet/chelsio/cxgb3/sge.c q->txq_size[TXQ_CTRL] = 256; q 3368 drivers/net/ethernet/chelsio/cxgb3/sge.c q->cong_thres = 0; q 2990 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c #define QDESC_GET(q, desc, type, label) do { \ q 2995 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c cudbg_fill_qdesc_##q(q, type, qdesc_entry); \ q 3002 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c #define QDESC_GET_TXQ(q, type, label) do { \ q 3003 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c struct sge_txq *txq = (struct sge_txq *)q; \ q 3007 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c #define QDESC_GET_RXQ(q, type, label) do { \ q 3008 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c struct sge_rspq *rxq = (struct sge_rspq *)q; \ q 3012 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c #define QDESC_GET_FLQ(q, type, label) do { \ q 3013 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c struct sge_fl *flq = (struct sge_fl *)q; \ q 3019 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c QDESC_GET_TXQ(&s->ethtxq[i].q, CUDBG_QTYPE_NIC_TXQ, out); q 3031 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c QDESC_GET_TXQ(&s->ctrlq[i].q, CUDBG_QTYPE_CTRLQ, out); q 3040 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c QDESC_GET_TXQ(&s->ptptxq.q, CUDBG_QTYPE_PTP_TXQ, out); q 3055 drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c QDESC_GET_TXQ(&utxq->uldtxq[i].q, q 662 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp, q 664 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h typedef void (*rspq_flush_handler_t)(struct sge_rspq *q); q 758 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h struct sge_txq q; q 772 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h struct sge_txq q; q 782 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h struct sge_txq q; q 1393 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h const struct sge_rspq *q) q 1395 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h unsigned int idx = q->intr_params >> 1; q 1408 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q); q 1411 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, q 1434 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *q, q 1545 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h int cxgb4_set_rspq_intr_params(struct sge_rspq *q, unsigned int us, q 1547 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h static inline void init_rspq(struct adapter *adap, struct sge_rspq *q, q 1551 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h q->adap = adap; q 1552 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h cxgb4_set_rspq_intr_params(q, us, cnt); q 1553 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h q->iqe_len = iqe_size; q 1554 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h q->size = size; q 1675 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h const struct sge_rspq *q); q 1892 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h void free_tx_desc(struct adapter *adap, struct sge_txq *q, q 1894 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h void free_txq(struct adapter *adap, struct sge_txq *q); q 1896 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h struct sge_txq *q, bool unmap); q 1899 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h void cxgb4_inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q, q 1901 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q, q 1904 drivers/net/ethernet/chelsio/cxgb4/cxgb4.h void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n); q 2713 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c T("TxQ ID:", q.cntxt_id); q 2714 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c T("TxQ size:", q.size); q 2715 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c T("TxQ inuse:", q.in_use); q 2716 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c T("TxQ CIDX:", q.cidx); q 2717 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c T("TxQ PIDX:", q.pidx); q 2750 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c TL("TxQFull:", q.stops); q 2751 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c TL("TxQRestarts:", q.restarts); q 2771 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c T("TxQ ID:", q.cntxt_id); q 2772 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c T("TxQ size:", q.size); q 2773 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c T("TxQ inuse:", q.in_use); q 2774 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c T("TxQ CIDX:", q.cidx); q 2775 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c T("TxQ PIDX:", q.pidx); q 2922 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c T("TxQ ID:", q.cntxt_id); q 2923 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c T("TxQ size:", q.size); q 2924 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c T("TxQ inuse:", q.in_use); q 2925 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c T("TxQ CIDX:", q.cidx); q 2926 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c T("TxQ PIDX:", q.pidx); q 2951 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c T("TxQ ID:", q.cntxt_id); q 2952 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c T("TxQ size:", q.size); q 2953 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c T("TxQ inuse:", q.in_use); q 2954 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c T("TxQ CIDX:", q.cidx); q 2955 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c T("TxQ PIDX:", q.pidx); q 2956 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c TL("TxQFull:", q.stops); q 2957 drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c TL("TxQRestarts:", q.restarts); q 836 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c e->tx_pending = s->ethtxq[pi->first_qset].q.size; q 857 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c s->ethtxq[pi->first_qset + i].q.size = e->tx_pending; q 878 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; q 880 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c for (i = 0; i < pi->nqsets; i++, q++) { q 881 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c err = cxgb4_set_rspq_intr_params(&q->rspq, us, cnt); q 893 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; q 895 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c for (i = 0; i < pi->nqsets; i++, q++) q 896 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c q->rspq.adaptive_rx = adaptive_rx; q 905 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset]; q 907 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c return q->rspq.adaptive_rx; q 1024 drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id)); q 249 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id)); q 552 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp, q 567 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n" q 578 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start]; q 583 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c eq = container_of(txq, struct sge_eth_txq, q); q 584 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c t4_sge_eth_txq_egress_update(q->adap, eq, -1); q 588 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c oq = container_of(txq, struct sge_uld_txq, q); q 608 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c dev = q->adap->port[q->adap->chan_map[port]]; q 622 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c dcb_rpl(q->adap, pcmd); q 626 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c t4_handle_fw_rpl(q->adap, p->data); q 630 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c do_l2t_write_rpl(q->adap, p); q 634 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c do_smt_write_rpl(q->adap, p); q 638 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c filter_rpl(q->adap, p); q 642 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c hash_filter_rpl(q->adap, p); q 646 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c hash_del_filter_rpl(q->adap, p); q 650 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c do_srq_table_rpl(q->adap, p); q 652 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c dev_err(q->adap->pdev_dev, q 910 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c struct sge_rspq *q = adap->sge.ingr_map[i]; q 912 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c if (q && q->handler) q 913 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c napi_disable(&q->napi); q 940 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c struct sge_rspq *q = adap->sge.ingr_map[i]; q 942 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c if (!q) q 944 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c if (q->handler) q 945 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c napi_enable(&q->napi); q 949 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c SEINTARM_V(q->intr_params) | q 950 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c INGRESSQID_V(q->cntxt_id)); q 999 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset]; q 1002 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c for (j = 0; j < pi->nqsets; j++, q++) { q 1005 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, q 1006 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c adap->msi_idx, &q->fl, q 1013 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c q->rspq.idx = j; q 1014 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c memset(&q->stats, 0, sizeof(q->stats)); q 1017 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c q = &s->ethrxq[pi->first_qset]; q 1018 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c for (j = 0; j < pi->nqsets; j++, t++, q++) { q 1021 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c q->rspq.cntxt_id, q 1148 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c int cxgb4_set_rspq_intr_params(struct sge_rspq *q, q 1151 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c struct adapter *adap = q->adap; q 1161 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c if (q->desc && q->pktcnt_idx != new_idx) { q 1166 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c FW_PARAMS_PARAM_YZ_V(q->cntxt_id); q 1172 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c q->pktcnt_idx = new_idx; q 1176 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0); q 2027 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c static void disable_txq_db(struct sge_txq *q) q 2031 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c spin_lock_irqsave(&q->db_lock, flags); q 2032 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c q->db_disabled = 1; q 2033 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c spin_unlock_irqrestore(&q->db_lock, flags); q 2036 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c static void enable_txq_db(struct adapter *adap, struct sge_txq *q) q 2038 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c spin_lock_irq(&q->db_lock); q 2039 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c if (q->db_pidx_inc) { q 2045 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc)); q 2046 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c q->db_pidx_inc = 0; q 2048 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c q->db_disabled = 0; q 2049 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c spin_unlock_irq(&q->db_lock); q 2057 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c disable_txq_db(&adap->sge.ethtxq[i].q); q 2066 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c disable_txq_db(&txq->q); q 2071 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c disable_txq_db(&adap->sge.ctrlq[i].q); q 2079 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c enable_txq_db(adap, &adap->sge.ethtxq[i].q); q 2088 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c enable_txq_db(adap, &txq->q); q 2093 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c enable_txq_db(adap, &adap->sge.ctrlq[i].q); q 2122 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q) q 2127 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c spin_lock_irq(&q->db_lock); q 2128 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx); q 2131 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c if (q->db_pidx != hw_pidx) { q 2135 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c if (q->db_pidx >= hw_pidx) q 2136 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c delta = q->db_pidx - hw_pidx; q 2138 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c delta = q->size - hw_pidx + q->db_pidx; q 2146 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c QID_V(q->cntxt_id) | val); q 2149 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c q->db_disabled = 0; q 2150 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c q->db_pidx_inc = 0; q 2151 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c spin_unlock_irq(&q->db_lock); q 2161 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c sync_txq_pidx(adap, &adap->sge.ethtxq[i].q); q 2169 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c sync_txq_pidx(adap, &txq->q); q 2174 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c sync_txq_pidx(adap, &adap->sge.ctrlq[i].q); q 5190 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c s->ethtxq[i].q.size = 1024; q 5193 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c s->ctrlq[i].q.size = 512; q 5196 drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c s->ptptxq.q.size = 8; q 86 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c static void uldrx_flush_handler(struct sge_rspq *q) q 88 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c struct adapter *adap = q->adap; q 90 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c if (adap->uld[q->uld].lro_flush) q 91 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c adap->uld[q->uld].lro_flush(&q->lro_mgr); q 103 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp, q 106 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c struct adapter *adap = q->adap; q 107 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq); q 115 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c if (q->flush_handler) q 116 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle, q 117 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c rsp, gl, &q->lro_mgr, q 118 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c &q->napi); q 120 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle, q 142 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c struct sge_ofld_rxq *q = rxq_info->uldrxq; q 154 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c for (i = 0; i < nq; i++, q++) { q 169 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c err = t4_sge_alloc_rxq(adap, &q->rspq, false, q 172 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c q->fl.size ? &q->fl : NULL, q 180 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c memset(&q->stats, 0, sizeof(q->stats)); q 182 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c ids[i] = q->rspq.abs_id; q 186 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c q = rxq_info->uldrxq; q 187 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c for ( ; i; i--, q++) { q 188 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c if (q->rspq.desc) q 189 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c free_rspq_fl(adap, &q->rspq, q 190 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c q->fl.size ? &q->fl : NULL); q 223 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id)); q 232 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c struct sge_ofld_rxq *q) q 234 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c for ( ; n; n--, q++) { q 235 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c if (q->rspq.desc) q 236 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c free_rspq_fl(adap, &q->rspq, q 237 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c q->fl.size ? &q->fl : NULL); q 254 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id)); q 419 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c static void enable_rx(struct adapter *adap, struct sge_rspq *q) q 421 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c if (!q) q 424 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c if (q->handler) q 425 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c napi_enable(&q->napi); q 429 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c SEINTARM_V(q->intr_params) | q 430 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c INGRESSQID_V(q->cntxt_id)); q 433 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c static void quiesce_rx(struct adapter *adap, struct sge_rspq *q) q 435 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c if (q && q->handler) q 436 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c napi_disable(&q->napi); q 466 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c if (txq && txq->q.desc) { q 469 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c txq->q.cntxt_id); q 470 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c free_tx_desc(adap, &txq->q, txq->q.in_use, false); q 471 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c kfree(txq->q.sdesc); q 473 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c free_txq(adap, &txq->q); q 490 drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c txq->q.size = 1024; q 157 drivers/net/ethernet/chelsio/cxgb4/sched.c qid = txq->q.cntxt_id; q 202 drivers/net/ethernet/chelsio/cxgb4/sched.c qid = txq->q.cntxt_id; q 206 drivers/net/ethernet/chelsio/cxgb4/sge.c static inline unsigned int txq_avail(const struct sge_txq *q) q 208 drivers/net/ethernet/chelsio/cxgb4/sge.c return q->size - 1 - q->in_use; q 302 drivers/net/ethernet/chelsio/cxgb4/sge.c const struct ulptx_sgl *sgl, const struct sge_txq *q) q 321 drivers/net/ethernet/chelsio/cxgb4/sge.c if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) { q 327 drivers/net/ethernet/chelsio/cxgb4/sge.c } else if ((u8 *)p == (u8 *)q->stat) { q 328 drivers/net/ethernet/chelsio/cxgb4/sge.c p = (const struct ulptx_sge_pair *)q->desc; q 330 drivers/net/ethernet/chelsio/cxgb4/sge.c } else if ((u8 *)p + 8 == (u8 *)q->stat) { q 331 drivers/net/ethernet/chelsio/cxgb4/sge.c const __be64 *addr = (const __be64 *)q->desc; q 339 drivers/net/ethernet/chelsio/cxgb4/sge.c const __be64 *addr = (const __be64 *)q->desc; q 351 drivers/net/ethernet/chelsio/cxgb4/sge.c if ((u8 *)p == (u8 *)q->stat) q 352 drivers/net/ethernet/chelsio/cxgb4/sge.c p = (const struct ulptx_sge_pair *)q->desc; q 353 drivers/net/ethernet/chelsio/cxgb4/sge.c addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] : q 354 drivers/net/ethernet/chelsio/cxgb4/sge.c *(const __be64 *)q->desc; q 370 drivers/net/ethernet/chelsio/cxgb4/sge.c void free_tx_desc(struct adapter *adap, struct sge_txq *q, q 374 drivers/net/ethernet/chelsio/cxgb4/sge.c unsigned int cidx = q->cidx; q 377 drivers/net/ethernet/chelsio/cxgb4/sge.c d = &q->sdesc[cidx]; q 381 drivers/net/ethernet/chelsio/cxgb4/sge.c unmap_sgl(dev, d->skb, d->sgl, q); q 386 drivers/net/ethernet/chelsio/cxgb4/sge.c if (++cidx == q->size) { q 388 drivers/net/ethernet/chelsio/cxgb4/sge.c d = q->sdesc; q 391 drivers/net/ethernet/chelsio/cxgb4/sge.c q->cidx = cidx; q 397 drivers/net/ethernet/chelsio/cxgb4/sge.c static inline int reclaimable(const struct sge_txq *q) q 399 drivers/net/ethernet/chelsio/cxgb4/sge.c int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); q 400 drivers/net/ethernet/chelsio/cxgb4/sge.c hw_cidx -= q->cidx; q 401 drivers/net/ethernet/chelsio/cxgb4/sge.c return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; q 415 drivers/net/ethernet/chelsio/cxgb4/sge.c static inline int reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, q 418 drivers/net/ethernet/chelsio/cxgb4/sge.c int reclaim = reclaimable(q); q 430 drivers/net/ethernet/chelsio/cxgb4/sge.c free_tx_desc(adap, q, reclaim, unmap); q 431 drivers/net/ethernet/chelsio/cxgb4/sge.c q->in_use -= reclaim; q 447 drivers/net/ethernet/chelsio/cxgb4/sge.c void cxgb4_reclaim_completed_tx(struct adapter *adap, struct sge_txq *q, q 450 drivers/net/ethernet/chelsio/cxgb4/sge.c (void)reclaim_completed_tx(adap, q, -1, unmap); q 494 drivers/net/ethernet/chelsio/cxgb4/sge.c static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n) q 497 drivers/net/ethernet/chelsio/cxgb4/sge.c struct rx_sw_desc *d = &q->sdesc[q->cidx]; q 505 drivers/net/ethernet/chelsio/cxgb4/sge.c if (++q->cidx == q->size) q 506 drivers/net/ethernet/chelsio/cxgb4/sge.c q->cidx = 0; q 507 drivers/net/ethernet/chelsio/cxgb4/sge.c q->avail--; q 522 drivers/net/ethernet/chelsio/cxgb4/sge.c static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q) q 524 drivers/net/ethernet/chelsio/cxgb4/sge.c struct rx_sw_desc *d = &q->sdesc[q->cidx]; q 530 drivers/net/ethernet/chelsio/cxgb4/sge.c if (++q->cidx == q->size) q 531 drivers/net/ethernet/chelsio/cxgb4/sge.c q->cidx = 0; q 532 drivers/net/ethernet/chelsio/cxgb4/sge.c q->avail--; q 535 drivers/net/ethernet/chelsio/cxgb4/sge.c static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) q 537 drivers/net/ethernet/chelsio/cxgb4/sge.c if (q->pend_cred >= 8) { q 541 drivers/net/ethernet/chelsio/cxgb4/sge.c val |= PIDX_V(q->pend_cred / 8); q 543 drivers/net/ethernet/chelsio/cxgb4/sge.c val |= PIDX_T5_V(q->pend_cred / 8); q 554 drivers/net/ethernet/chelsio/cxgb4/sge.c if (unlikely(q->bar2_addr == NULL)) { q 556 drivers/net/ethernet/chelsio/cxgb4/sge.c val | QID_V(q->cntxt_id)); q 558 drivers/net/ethernet/chelsio/cxgb4/sge.c writel(val | QID_V(q->bar2_qid), q 559 drivers/net/ethernet/chelsio/cxgb4/sge.c q->bar2_addr + SGE_UDB_KDOORBELL); q 566 drivers/net/ethernet/chelsio/cxgb4/sge.c q->pend_cred &= 7; q 591 drivers/net/ethernet/chelsio/cxgb4/sge.c static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, q 597 drivers/net/ethernet/chelsio/cxgb4/sge.c unsigned int cred = q->avail; q 598 drivers/net/ethernet/chelsio/cxgb4/sge.c __be64 *d = &q->desc[q->pidx]; q 599 drivers/net/ethernet/chelsio/cxgb4/sge.c struct rx_sw_desc *sd = &q->sdesc[q->pidx]; q 603 drivers/net/ethernet/chelsio/cxgb4/sge.c if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl)) q 619 drivers/net/ethernet/chelsio/cxgb4/sge.c q->large_alloc_failed++; q 628 drivers/net/ethernet/chelsio/cxgb4/sge.c q->mapping_err++; q 637 drivers/net/ethernet/chelsio/cxgb4/sge.c q->avail++; q 638 drivers/net/ethernet/chelsio/cxgb4/sge.c if (++q->pidx == q->size) { q 639 drivers/net/ethernet/chelsio/cxgb4/sge.c q->pidx = 0; q 640 drivers/net/ethernet/chelsio/cxgb4/sge.c sd = q->sdesc; q 641 drivers/net/ethernet/chelsio/cxgb4/sge.c d = q->desc; q 650 drivers/net/ethernet/chelsio/cxgb4/sge.c q->alloc_failed++; q 658 drivers/net/ethernet/chelsio/cxgb4/sge.c q->mapping_err++; q 666 drivers/net/ethernet/chelsio/cxgb4/sge.c q->avail++; q 667 drivers/net/ethernet/chelsio/cxgb4/sge.c if (++q->pidx == q->size) { q 668 drivers/net/ethernet/chelsio/cxgb4/sge.c q->pidx = 0; q 669 drivers/net/ethernet/chelsio/cxgb4/sge.c sd = q->sdesc; q 670 drivers/net/ethernet/chelsio/cxgb4/sge.c d = q->desc; q 674 drivers/net/ethernet/chelsio/cxgb4/sge.c out: cred = q->avail - cred; q 675 drivers/net/ethernet/chelsio/cxgb4/sge.c q->pend_cred += cred; q 676 drivers/net/ethernet/chelsio/cxgb4/sge.c ring_fl_db(adap, q); q 678 drivers/net/ethernet/chelsio/cxgb4/sge.c if (unlikely(fl_starving(adap, q))) { q 680 drivers/net/ethernet/chelsio/cxgb4/sge.c q->low++; q 681 drivers/net/ethernet/chelsio/cxgb4/sge.c set_bit(q->cntxt_id - adap->sge.egr_start, q 880 drivers/net/ethernet/chelsio/cxgb4/sge.c void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q, q 909 drivers/net/ethernet/chelsio/cxgb4/sge.c to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; q 922 drivers/net/ethernet/chelsio/cxgb4/sge.c if (unlikely((u8 *)end > (u8 *)q->stat)) { q 923 drivers/net/ethernet/chelsio/cxgb4/sge.c unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; q 927 drivers/net/ethernet/chelsio/cxgb4/sge.c part1 = (u8 *)end - (u8 *)q->stat; q 928 drivers/net/ethernet/chelsio/cxgb4/sge.c memcpy(q->desc, (u8 *)buf + part0, part1); q 929 drivers/net/ethernet/chelsio/cxgb4/sge.c end = (void *)q->desc + part1; q 960 drivers/net/ethernet/chelsio/cxgb4/sge.c inline void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n) q 970 drivers/net/ethernet/chelsio/cxgb4/sge.c if (unlikely(q->bar2_addr == NULL)) { q 977 drivers/net/ethernet/chelsio/cxgb4/sge.c spin_lock_irqsave(&q->db_lock, flags); q 978 drivers/net/ethernet/chelsio/cxgb4/sge.c if (!q->db_disabled) q 980 drivers/net/ethernet/chelsio/cxgb4/sge.c QID_V(q->cntxt_id) | val); q 982 drivers/net/ethernet/chelsio/cxgb4/sge.c q->db_pidx_inc += n; q 983 drivers/net/ethernet/chelsio/cxgb4/sge.c q->db_pidx = q->pidx; q 984 drivers/net/ethernet/chelsio/cxgb4/sge.c spin_unlock_irqrestore(&q->db_lock, flags); q 1000 drivers/net/ethernet/chelsio/cxgb4/sge.c if (n == 1 && q->bar2_qid == 0) { q 1001 drivers/net/ethernet/chelsio/cxgb4/sge.c int index = (q->pidx q 1002 drivers/net/ethernet/chelsio/cxgb4/sge.c ? (q->pidx - 1) q 1003 drivers/net/ethernet/chelsio/cxgb4/sge.c : (q->size - 1)); q 1004 drivers/net/ethernet/chelsio/cxgb4/sge.c u64 *wr = (u64 *)&q->desc[index]; q 1007 drivers/net/ethernet/chelsio/cxgb4/sge.c (q->bar2_addr + SGE_UDB_WCDOORBELL), q 1010 drivers/net/ethernet/chelsio/cxgb4/sge.c writel(val | QID_V(q->bar2_qid), q 1011 drivers/net/ethernet/chelsio/cxgb4/sge.c q->bar2_addr + SGE_UDB_KDOORBELL); q 1041 drivers/net/ethernet/chelsio/cxgb4/sge.c const struct sge_txq *q, void *pos) q 1043 drivers/net/ethernet/chelsio/cxgb4/sge.c int left = (void *)q->stat - pos; q 1054 drivers/net/ethernet/chelsio/cxgb4/sge.c skb_copy_bits(skb, left, q->desc, skb->len - left); q 1055 drivers/net/ethernet/chelsio/cxgb4/sge.c pos = (void *)q->desc + (skb->len - left); q 1066 drivers/net/ethernet/chelsio/cxgb4/sge.c const struct sge_txq *q, void *pos, q 1070 drivers/net/ethernet/chelsio/cxgb4/sge.c int left = (void *)q->stat - pos; q 1077 drivers/net/ethernet/chelsio/cxgb4/sge.c memcpy(q->desc, skb->data + left, length - left); q 1078 drivers/net/ethernet/chelsio/cxgb4/sge.c pos = (void *)q->desc + (length - left); q 1167 drivers/net/ethernet/chelsio/cxgb4/sge.c static void eth_txq_stop(struct sge_eth_txq *q) q 1169 drivers/net/ethernet/chelsio/cxgb4/sge.c netif_tx_stop_queue(q->txq); q 1170 drivers/net/ethernet/chelsio/cxgb4/sge.c q->q.stops++; q 1173 drivers/net/ethernet/chelsio/cxgb4/sge.c static inline void txq_advance(struct sge_txq *q, unsigned int n) q 1175 drivers/net/ethernet/chelsio/cxgb4/sge.c q->in_use += n; q 1176 drivers/net/ethernet/chelsio/cxgb4/sge.c q->pidx += n; q 1177 drivers/net/ethernet/chelsio/cxgb4/sge.c if (q->pidx >= q->size) q 1178 drivers/net/ethernet/chelsio/cxgb4/sge.c q->pidx -= q->size; q 1328 drivers/net/ethernet/chelsio/cxgb4/sge.c struct sge_txq *q = &eq->q; q 1331 drivers/net/ethernet/chelsio/cxgb4/sge.c if (!q->in_use || !__netif_tx_trylock(eq->txq)) q 1335 drivers/net/ethernet/chelsio/cxgb4/sge.c reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true); q 1337 drivers/net/ethernet/chelsio/cxgb4/sge.c hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); q 1338 drivers/net/ethernet/chelsio/cxgb4/sge.c hw_in_use = q->pidx - hw_cidx; q 1340 drivers/net/ethernet/chelsio/cxgb4/sge.c hw_in_use += q->size; q 1347 drivers/net/ethernet/chelsio/cxgb4/sge.c if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) { q 1349 drivers/net/ethernet/chelsio/cxgb4/sge.c eq->q.restarts++; q 1370 drivers/net/ethernet/chelsio/cxgb4/sge.c struct sge_eth_txq *q; q 1420 drivers/net/ethernet/chelsio/cxgb4/sge.c q = &adap->sge.ptptxq; q 1422 drivers/net/ethernet/chelsio/cxgb4/sge.c q = &adap->sge.ethtxq[qidx + pi->first_qset]; q 1426 drivers/net/ethernet/chelsio/cxgb4/sge.c reclaim_completed_tx(adap, &q->q, -1, true); q 1441 drivers/net/ethernet/chelsio/cxgb4/sge.c credits = txq_avail(&q->q) - ndesc; q 1444 drivers/net/ethernet/chelsio/cxgb4/sge.c eth_txq_stop(q); q 1461 drivers/net/ethernet/chelsio/cxgb4/sge.c q->mapping_err++; q 1477 drivers/net/ethernet/chelsio/cxgb4/sge.c eth_txq_stop(q); q 1481 drivers/net/ethernet/chelsio/cxgb4/sge.c wr = (void *)&q->q.desc[q->q.pidx]; q 1545 drivers/net/ethernet/chelsio/cxgb4/sge.c if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) { q 1550 drivers/net/ethernet/chelsio/cxgb4/sge.c if (sgl == (u64 *)q->q.stat) { q 1551 drivers/net/ethernet/chelsio/cxgb4/sge.c int left = (u8 *)end - (u8 *)q->q.stat; q 1553 drivers/net/ethernet/chelsio/cxgb4/sge.c end = (void *)q->q.desc + left; q 1554 drivers/net/ethernet/chelsio/cxgb4/sge.c sgl = (void *)q->q.desc; q 1557 drivers/net/ethernet/chelsio/cxgb4/sge.c q->tso++; q 1558 drivers/net/ethernet/chelsio/cxgb4/sge.c q->tx_cso += ssi->gso_segs; q 1571 drivers/net/ethernet/chelsio/cxgb4/sge.c q->tx_cso++; q 1576 drivers/net/ethernet/chelsio/cxgb4/sge.c q->vlan_ins++; q 1591 drivers/net/ethernet/chelsio/cxgb4/sge.c ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio); q 1593 drivers/net/ethernet/chelsio/cxgb4/sge.c ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio); q 1601 drivers/net/ethernet/chelsio/cxgb4/sge.c cxgb4_inline_tx_skb(skb, &q->q, sgl); q 1606 drivers/net/ethernet/chelsio/cxgb4/sge.c cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, 0, addr); q 1609 drivers/net/ethernet/chelsio/cxgb4/sge.c last_desc = q->q.pidx + ndesc - 1; q 1610 drivers/net/ethernet/chelsio/cxgb4/sge.c if (last_desc >= q->q.size) q 1611 drivers/net/ethernet/chelsio/cxgb4/sge.c last_desc -= q->q.size; q 1612 drivers/net/ethernet/chelsio/cxgb4/sge.c q->q.sdesc[last_desc].skb = skb; q 1613 drivers/net/ethernet/chelsio/cxgb4/sge.c q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)sgl; q 1616 drivers/net/ethernet/chelsio/cxgb4/sge.c txq_advance(&q->q, ndesc); q 1618 drivers/net/ethernet/chelsio/cxgb4/sge.c cxgb4_ring_tx_db(adap, &q->q, ndesc); q 1748 drivers/net/ethernet/chelsio/cxgb4/sge.c reclaim_completed_tx(adapter, &txq->q, -1, true); q 1756 drivers/net/ethernet/chelsio/cxgb4/sge.c credits = txq_avail(&txq->q) - ndesc; q 1801 drivers/net/ethernet/chelsio/cxgb4/sge.c wr = (void *)&txq->q.desc[txq->q.pidx]; q 1901 drivers/net/ethernet/chelsio/cxgb4/sge.c cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1); q 1941 drivers/net/ethernet/chelsio/cxgb4/sge.c struct sge_txq *tq = &txq->q; q 1969 drivers/net/ethernet/chelsio/cxgb4/sge.c txq_advance(&txq->q, ndesc); q 1971 drivers/net/ethernet/chelsio/cxgb4/sge.c cxgb4_ring_tx_db(adapter, &txq->q, ndesc); q 2000 drivers/net/ethernet/chelsio/cxgb4/sge.c static inline void reclaim_completed_tx_imm(struct sge_txq *q) q 2002 drivers/net/ethernet/chelsio/cxgb4/sge.c int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); q 2003 drivers/net/ethernet/chelsio/cxgb4/sge.c int reclaim = hw_cidx - q->cidx; q 2006 drivers/net/ethernet/chelsio/cxgb4/sge.c reclaim += q->size; q 2008 drivers/net/ethernet/chelsio/cxgb4/sge.c q->in_use -= reclaim; q 2009 drivers/net/ethernet/chelsio/cxgb4/sge.c q->cidx = hw_cidx; q 2033 drivers/net/ethernet/chelsio/cxgb4/sge.c static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr) q 2035 drivers/net/ethernet/chelsio/cxgb4/sge.c reclaim_completed_tx_imm(&q->q); q 2036 drivers/net/ethernet/chelsio/cxgb4/sge.c if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { q 2038 drivers/net/ethernet/chelsio/cxgb4/sge.c q->q.stops++; q 2039 drivers/net/ethernet/chelsio/cxgb4/sge.c q->full = 1; q 2051 drivers/net/ethernet/chelsio/cxgb4/sge.c static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb) q 2063 drivers/net/ethernet/chelsio/cxgb4/sge.c spin_lock(&q->sendq.lock); q 2065 drivers/net/ethernet/chelsio/cxgb4/sge.c if (unlikely(q->full)) { q 2067 drivers/net/ethernet/chelsio/cxgb4/sge.c __skb_queue_tail(&q->sendq, skb); q 2068 drivers/net/ethernet/chelsio/cxgb4/sge.c spin_unlock(&q->sendq.lock); q 2072 drivers/net/ethernet/chelsio/cxgb4/sge.c wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; q 2073 drivers/net/ethernet/chelsio/cxgb4/sge.c cxgb4_inline_tx_skb(skb, &q->q, wr); q 2075 drivers/net/ethernet/chelsio/cxgb4/sge.c txq_advance(&q->q, ndesc); q 2076 drivers/net/ethernet/chelsio/cxgb4/sge.c if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) q 2077 drivers/net/ethernet/chelsio/cxgb4/sge.c ctrlq_check_stop(q, wr); q 2079 drivers/net/ethernet/chelsio/cxgb4/sge.c cxgb4_ring_tx_db(q->adap, &q->q, ndesc); q 2080 drivers/net/ethernet/chelsio/cxgb4/sge.c spin_unlock(&q->sendq.lock); q 2096 drivers/net/ethernet/chelsio/cxgb4/sge.c struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data; q 2098 drivers/net/ethernet/chelsio/cxgb4/sge.c spin_lock(&q->sendq.lock); q 2099 drivers/net/ethernet/chelsio/cxgb4/sge.c reclaim_completed_tx_imm(&q->q); q 2100 drivers/net/ethernet/chelsio/cxgb4/sge.c BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */ q 2102 drivers/net/ethernet/chelsio/cxgb4/sge.c while ((skb = __skb_dequeue(&q->sendq)) != NULL) { q 2110 drivers/net/ethernet/chelsio/cxgb4/sge.c wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; q 2111 drivers/net/ethernet/chelsio/cxgb4/sge.c txq_advance(&q->q, ndesc); q 2112 drivers/net/ethernet/chelsio/cxgb4/sge.c spin_unlock(&q->sendq.lock); q 2114 drivers/net/ethernet/chelsio/cxgb4/sge.c cxgb4_inline_tx_skb(skb, &q->q, wr); q 2117 drivers/net/ethernet/chelsio/cxgb4/sge.c if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { q 2118 drivers/net/ethernet/chelsio/cxgb4/sge.c unsigned long old = q->q.stops; q 2120 drivers/net/ethernet/chelsio/cxgb4/sge.c ctrlq_check_stop(q, wr); q 2121 drivers/net/ethernet/chelsio/cxgb4/sge.c if (q->q.stops != old) { /* suspended anew */ q 2122 drivers/net/ethernet/chelsio/cxgb4/sge.c spin_lock(&q->sendq.lock); q 2127 drivers/net/ethernet/chelsio/cxgb4/sge.c cxgb4_ring_tx_db(q->adap, &q->q, written); q 2130 drivers/net/ethernet/chelsio/cxgb4/sge.c spin_lock(&q->sendq.lock); q 2132 drivers/net/ethernet/chelsio/cxgb4/sge.c q->full = 0; q 2135 drivers/net/ethernet/chelsio/cxgb4/sge.c cxgb4_ring_tx_db(q->adap, &q->q, written); q 2136 drivers/net/ethernet/chelsio/cxgb4/sge.c spin_unlock(&q->sendq.lock); q 2205 drivers/net/ethernet/chelsio/cxgb4/sge.c static void txq_stop_maperr(struct sge_uld_txq *q) q 2207 drivers/net/ethernet/chelsio/cxgb4/sge.c q->mapping_err++; q 2208 drivers/net/ethernet/chelsio/cxgb4/sge.c q->q.stops++; q 2209 drivers/net/ethernet/chelsio/cxgb4/sge.c set_bit(q->q.cntxt_id - q->adap->sge.egr_start, q 2210 drivers/net/ethernet/chelsio/cxgb4/sge.c q->adap->sge.txq_maperr); q 2221 drivers/net/ethernet/chelsio/cxgb4/sge.c static void ofldtxq_stop(struct sge_uld_txq *q, struct fw_wr_hdr *wr) q 2224 drivers/net/ethernet/chelsio/cxgb4/sge.c q->q.stops++; q 2225 drivers/net/ethernet/chelsio/cxgb4/sge.c q->full = 1; q 2246 drivers/net/ethernet/chelsio/cxgb4/sge.c static void service_ofldq(struct sge_uld_txq *q) q 2262 drivers/net/ethernet/chelsio/cxgb4/sge.c if (q->service_ofldq_running) q 2264 drivers/net/ethernet/chelsio/cxgb4/sge.c q->service_ofldq_running = true; q 2266 drivers/net/ethernet/chelsio/cxgb4/sge.c while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) { q 2274 drivers/net/ethernet/chelsio/cxgb4/sge.c spin_unlock(&q->sendq.lock); q 2276 drivers/net/ethernet/chelsio/cxgb4/sge.c cxgb4_reclaim_completed_tx(q->adap, &q->q, false); q 2280 drivers/net/ethernet/chelsio/cxgb4/sge.c credits = txq_avail(&q->q) - ndesc; q 2283 drivers/net/ethernet/chelsio/cxgb4/sge.c ofldtxq_stop(q, (struct fw_wr_hdr *)skb->data); q 2285 drivers/net/ethernet/chelsio/cxgb4/sge.c pos = (u64 *)&q->q.desc[q->q.pidx]; q 2287 drivers/net/ethernet/chelsio/cxgb4/sge.c cxgb4_inline_tx_skb(skb, &q->q, pos); q 2288 drivers/net/ethernet/chelsio/cxgb4/sge.c else if (cxgb4_map_skb(q->adap->pdev_dev, skb, q 2290 drivers/net/ethernet/chelsio/cxgb4/sge.c txq_stop_maperr(q); q 2291 drivers/net/ethernet/chelsio/cxgb4/sge.c spin_lock(&q->sendq.lock); q 2301 drivers/net/ethernet/chelsio/cxgb4/sge.c txq = &q->q; q 2302 drivers/net/ethernet/chelsio/cxgb4/sge.c pos = (void *)inline_tx_skb_header(skb, &q->q, q 2320 drivers/net/ethernet/chelsio/cxgb4/sge.c cxgb4_write_sgl(skb, &q->q, (void *)pos, q 2324 drivers/net/ethernet/chelsio/cxgb4/sge.c skb->dev = q->adap->port[0]; q 2327 drivers/net/ethernet/chelsio/cxgb4/sge.c last_desc = q->q.pidx + ndesc - 1; q 2328 drivers/net/ethernet/chelsio/cxgb4/sge.c if (last_desc >= q->q.size) q 2329 drivers/net/ethernet/chelsio/cxgb4/sge.c last_desc -= q->q.size; q 2330 drivers/net/ethernet/chelsio/cxgb4/sge.c q->q.sdesc[last_desc].skb = skb; q 2333 drivers/net/ethernet/chelsio/cxgb4/sge.c txq_advance(&q->q, ndesc); q 2336 drivers/net/ethernet/chelsio/cxgb4/sge.c cxgb4_ring_tx_db(q->adap, &q->q, written); q 2345 drivers/net/ethernet/chelsio/cxgb4/sge.c spin_lock(&q->sendq.lock); q 2346 drivers/net/ethernet/chelsio/cxgb4/sge.c __skb_unlink(skb, &q->sendq); q 2351 drivers/net/ethernet/chelsio/cxgb4/sge.c cxgb4_ring_tx_db(q->adap, &q->q, written); q 2356 drivers/net/ethernet/chelsio/cxgb4/sge.c q->service_ofldq_running = false; q 2366 drivers/net/ethernet/chelsio/cxgb4/sge.c static int ofld_xmit(struct sge_uld_txq *q, struct sk_buff *skb) q 2369 drivers/net/ethernet/chelsio/cxgb4/sge.c spin_lock(&q->sendq.lock); q 2379 drivers/net/ethernet/chelsio/cxgb4/sge.c __skb_queue_tail(&q->sendq, skb); q 2380 drivers/net/ethernet/chelsio/cxgb4/sge.c if (q->sendq.qlen == 1) q 2381 drivers/net/ethernet/chelsio/cxgb4/sge.c service_ofldq(q); q 2383 drivers/net/ethernet/chelsio/cxgb4/sge.c spin_unlock(&q->sendq.lock); q 2395 drivers/net/ethernet/chelsio/cxgb4/sge.c struct sge_uld_txq *q = (struct sge_uld_txq *)data; q 2397 drivers/net/ethernet/chelsio/cxgb4/sge.c spin_lock(&q->sendq.lock); q 2398 drivers/net/ethernet/chelsio/cxgb4/sge.c q->full = 0; /* the queue actually is completely empty now */ q 2399 drivers/net/ethernet/chelsio/cxgb4/sge.c service_ofldq(q); q 2400 drivers/net/ethernet/chelsio/cxgb4/sge.c spin_unlock(&q->sendq.lock); q 2485 drivers/net/ethernet/chelsio/cxgb4/sge.c const struct sge_txq *q, q 2488 drivers/net/ethernet/chelsio/cxgb4/sge.c int left = (void *)q->stat - pos; q 2496 drivers/net/ethernet/chelsio/cxgb4/sge.c memcpy(q->desc, src + left, length - left); q 2497 drivers/net/ethernet/chelsio/cxgb4/sge.c pos = (void *)q->desc + (length - left); q 2516 drivers/net/ethernet/chelsio/cxgb4/sge.c static int ofld_xmit_direct(struct sge_uld_txq *q, const void *src, q 2533 drivers/net/ethernet/chelsio/cxgb4/sge.c if (!spin_trylock(&q->sendq.lock)) q 2536 drivers/net/ethernet/chelsio/cxgb4/sge.c if (q->full || !skb_queue_empty(&q->sendq) || q 2537 drivers/net/ethernet/chelsio/cxgb4/sge.c q->service_ofldq_running) { q 2538 drivers/net/ethernet/chelsio/cxgb4/sge.c spin_unlock(&q->sendq.lock); q 2542 drivers/net/ethernet/chelsio/cxgb4/sge.c credits = txq_avail(&q->q) - ndesc; q 2543 drivers/net/ethernet/chelsio/cxgb4/sge.c pos = (u64 *)&q->q.desc[q->q.pidx]; q 2546 drivers/net/ethernet/chelsio/cxgb4/sge.c inline_tx_header(src, &q->q, pos, len); q 2548 drivers/net/ethernet/chelsio/cxgb4/sge.c ofldtxq_stop(q, (struct fw_wr_hdr *)pos); q 2549 drivers/net/ethernet/chelsio/cxgb4/sge.c txq_advance(&q->q, ndesc); q 2550 drivers/net/ethernet/chelsio/cxgb4/sge.c cxgb4_ring_tx_db(q->adap, &q->q, ndesc); q 2552 drivers/net/ethernet/chelsio/cxgb4/sge.c spin_unlock(&q->sendq.lock); q 2926 drivers/net/ethernet/chelsio/cxgb4/sge.c int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp, q 2932 drivers/net/ethernet/chelsio/cxgb4/sge.c struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); q 2933 drivers/net/ethernet/chelsio/cxgb4/sge.c struct adapter *adapter = q->adap; q 2934 drivers/net/ethernet/chelsio/cxgb4/sge.c struct sge *s = &q->adap->sge; q 2935 drivers/net/ethernet/chelsio/cxgb4/sge.c int cpl_trace_pkt = is_t4(q->adap->params.chip) ? q 2946 drivers/net/ethernet/chelsio/cxgb4/sge.c t4_tx_completion_handler(q, rsp, si); q 2951 drivers/net/ethernet/chelsio/cxgb4/sge.c return handle_trace_pkt(q->adap, si); q 2955 drivers/net/ethernet/chelsio/cxgb4/sge.c if (q->adap->params.tp.rx_pkt_encap) { q 2963 drivers/net/ethernet/chelsio/cxgb4/sge.c (q->netdev->features & NETIF_F_RXCSUM); q 2970 drivers/net/ethernet/chelsio/cxgb4/sge.c (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { q 2981 drivers/net/ethernet/chelsio/cxgb4/sge.c pi = netdev_priv(q->netdev); q 2996 drivers/net/ethernet/chelsio/cxgb4/sge.c if (!t4_tx_hststamp(adapter, skb, q->netdev)) q 3000 drivers/net/ethernet/chelsio/cxgb4/sge.c skb->protocol = eth_type_trans(skb, q->netdev); q 3001 drivers/net/ethernet/chelsio/cxgb4/sge.c skb_record_rx_queue(skb, q->idx); q 3009 drivers/net/ethernet/chelsio/cxgb4/sge.c cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb), q 3036 drivers/net/ethernet/chelsio/cxgb4/sge.c if (q->adap->params.tp.rx_pkt_encap) q 3054 drivers/net/ethernet/chelsio/cxgb4/sge.c skb_mark_napi_id(skb, &q->napi); q 3074 drivers/net/ethernet/chelsio/cxgb4/sge.c static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q, q 3080 drivers/net/ethernet/chelsio/cxgb4/sge.c if (q->cidx == 0) q 3081 drivers/net/ethernet/chelsio/cxgb4/sge.c q->cidx = q->size - 1; q 3083 drivers/net/ethernet/chelsio/cxgb4/sge.c q->cidx--; q 3084 drivers/net/ethernet/chelsio/cxgb4/sge.c d = &q->sdesc[q->cidx]; q 3087 drivers/net/ethernet/chelsio/cxgb4/sge.c q->avail++; q 3100 drivers/net/ethernet/chelsio/cxgb4/sge.c const struct sge_rspq *q) q 3102 drivers/net/ethernet/chelsio/cxgb4/sge.c return (r->type_gen >> RSPD_GEN_S) == q->gen; q 3111 drivers/net/ethernet/chelsio/cxgb4/sge.c static inline void rspq_next(struct sge_rspq *q) q 3113 drivers/net/ethernet/chelsio/cxgb4/sge.c q->cur_desc = (void *)q->cur_desc + q->iqe_len; q 3114 drivers/net/ethernet/chelsio/cxgb4/sge.c if (unlikely(++q->cidx == q->size)) { q 3115 drivers/net/ethernet/chelsio/cxgb4/sge.c q->cidx = 0; q 3116 drivers/net/ethernet/chelsio/cxgb4/sge.c q->gen ^= 1; q 3117 drivers/net/ethernet/chelsio/cxgb4/sge.c q->cur_desc = q->desc; q 3134 drivers/net/ethernet/chelsio/cxgb4/sge.c static int process_responses(struct sge_rspq *q, int budget) q 3139 drivers/net/ethernet/chelsio/cxgb4/sge.c struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq); q 3140 drivers/net/ethernet/chelsio/cxgb4/sge.c struct adapter *adapter = q->adap; q 3144 drivers/net/ethernet/chelsio/cxgb4/sge.c rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); q 3145 drivers/net/ethernet/chelsio/cxgb4/sge.c if (!is_new_response(rc, q)) { q 3146 drivers/net/ethernet/chelsio/cxgb4/sge.c if (q->flush_handler) q 3147 drivers/net/ethernet/chelsio/cxgb4/sge.c q->flush_handler(q); q 3160 drivers/net/ethernet/chelsio/cxgb4/sge.c if (likely(q->offset > 0)) { q 3161 drivers/net/ethernet/chelsio/cxgb4/sge.c free_rx_bufs(q->adap, &rxq->fl, 1); q 3162 drivers/net/ethernet/chelsio/cxgb4/sge.c q->offset = 0; q 3173 drivers/net/ethernet/chelsio/cxgb4/sge.c fp->offset = q->offset; q 3178 drivers/net/ethernet/chelsio/cxgb4/sge.c unmap_rx_buf(q->adap, &rxq->fl); q 3187 drivers/net/ethernet/chelsio/cxgb4/sge.c dma_sync_single_for_cpu(q->adap->pdev_dev, q 3196 drivers/net/ethernet/chelsio/cxgb4/sge.c ret = q->handler(q, q->cur_desc, &si); q 3198 drivers/net/ethernet/chelsio/cxgb4/sge.c q->offset += ALIGN(fp->size, s->fl_align); q 3202 drivers/net/ethernet/chelsio/cxgb4/sge.c ret = q->handler(q, q->cur_desc, NULL); q 3204 drivers/net/ethernet/chelsio/cxgb4/sge.c ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN); q 3209 drivers/net/ethernet/chelsio/cxgb4/sge.c q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX); q 3213 drivers/net/ethernet/chelsio/cxgb4/sge.c rspq_next(q); q 3217 drivers/net/ethernet/chelsio/cxgb4/sge.c if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16) q 3218 drivers/net/ethernet/chelsio/cxgb4/sge.c __refill_fl(q->adap, &rxq->fl); q 3236 drivers/net/ethernet/chelsio/cxgb4/sge.c struct sge_rspq *q = container_of(napi, struct sge_rspq, napi); q 3240 drivers/net/ethernet/chelsio/cxgb4/sge.c work_done = process_responses(q, budget); q 3245 drivers/net/ethernet/chelsio/cxgb4/sge.c timer_index = QINTR_TIMER_IDX_G(q->next_intr_params); q 3247 drivers/net/ethernet/chelsio/cxgb4/sge.c if (q->adaptive_rx) { q 3255 drivers/net/ethernet/chelsio/cxgb4/sge.c q->next_intr_params = q 3258 drivers/net/ethernet/chelsio/cxgb4/sge.c params = q->next_intr_params; q 3260 drivers/net/ethernet/chelsio/cxgb4/sge.c params = q->next_intr_params; q 3261 drivers/net/ethernet/chelsio/cxgb4/sge.c q->next_intr_params = q->intr_params; q 3271 drivers/net/ethernet/chelsio/cxgb4/sge.c if (unlikely(q->bar2_addr == NULL)) { q 3272 drivers/net/ethernet/chelsio/cxgb4/sge.c t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A), q 3273 drivers/net/ethernet/chelsio/cxgb4/sge.c val | INGRESSQID_V((u32)q->cntxt_id)); q 3275 drivers/net/ethernet/chelsio/cxgb4/sge.c writel(val | INGRESSQID_V(q->bar2_qid), q 3276 drivers/net/ethernet/chelsio/cxgb4/sge.c q->bar2_addr + SGE_UDB_GTS); q 3287 drivers/net/ethernet/chelsio/cxgb4/sge.c struct sge_rspq *q = cookie; q 3289 drivers/net/ethernet/chelsio/cxgb4/sge.c napi_schedule(&q->napi); q 3301 drivers/net/ethernet/chelsio/cxgb4/sge.c struct sge_rspq *q = &adap->sge.intrq; q 3306 drivers/net/ethernet/chelsio/cxgb4/sge.c rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); q 3307 drivers/net/ethernet/chelsio/cxgb4/sge.c if (!is_new_response(rc, q)) q 3318 drivers/net/ethernet/chelsio/cxgb4/sge.c rspq_next(q); q 3321 drivers/net/ethernet/chelsio/cxgb4/sge.c val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params); q 3326 drivers/net/ethernet/chelsio/cxgb4/sge.c if (unlikely(q->bar2_addr == NULL)) { q 3328 drivers/net/ethernet/chelsio/cxgb4/sge.c val | INGRESSQID_V(q->cntxt_id)); q 3330 drivers/net/ethernet/chelsio/cxgb4/sge.c writel(val | INGRESSQID_V(q->bar2_qid), q 3331 drivers/net/ethernet/chelsio/cxgb4/sge.c q->bar2_addr + SGE_UDB_GTS); q 3438 drivers/net/ethernet/chelsio/cxgb4/sge.c struct sge_eth_txq *q = &s->ptptxq; q 3442 drivers/net/ethernet/chelsio/cxgb4/sge.c avail = reclaimable(&q->q); q 3445 drivers/net/ethernet/chelsio/cxgb4/sge.c free_tx_desc(adap, &q->q, avail, false); q 3446 drivers/net/ethernet/chelsio/cxgb4/sge.c q->q.in_use -= avail; q 3704 drivers/net/ethernet/chelsio/cxgb4/sge.c static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id) q 3706 drivers/net/ethernet/chelsio/cxgb4/sge.c q->cntxt_id = id; q 3707 drivers/net/ethernet/chelsio/cxgb4/sge.c q->bar2_addr = bar2_address(adap, q 3708 drivers/net/ethernet/chelsio/cxgb4/sge.c q->cntxt_id, q 3710 drivers/net/ethernet/chelsio/cxgb4/sge.c &q->bar2_qid); q 3711 drivers/net/ethernet/chelsio/cxgb4/sge.c q->in_use = 0; q 3712 drivers/net/ethernet/chelsio/cxgb4/sge.c q->cidx = q->pidx = 0; q 3713 drivers/net/ethernet/chelsio/cxgb4/sge.c q->stops = q->restarts = 0; q 3714 drivers/net/ethernet/chelsio/cxgb4/sge.c q->stat = (void *)&q->desc[q->size]; q 3715 drivers/net/ethernet/chelsio/cxgb4/sge.c spin_lock_init(&q->db_lock); q 3716 drivers/net/ethernet/chelsio/cxgb4/sge.c adap->sge.egr_map[id - adap->sge.egr_start] = q; q 3739 drivers/net/ethernet/chelsio/cxgb4/sge.c nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); q 3741 drivers/net/ethernet/chelsio/cxgb4/sge.c txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, q 3743 drivers/net/ethernet/chelsio/cxgb4/sge.c &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, q 3745 drivers/net/ethernet/chelsio/cxgb4/sge.c if (!txq->q.desc) q 3779 drivers/net/ethernet/chelsio/cxgb4/sge.c c.eqaddr = cpu_to_be64(txq->q.phys_addr); q 3794 drivers/net/ethernet/chelsio/cxgb4/sge.c kfree(txq->q.sdesc); q 3795 drivers/net/ethernet/chelsio/cxgb4/sge.c txq->q.sdesc = NULL; q 3798 drivers/net/ethernet/chelsio/cxgb4/sge.c txq->q.desc, txq->q.phys_addr); q 3799 drivers/net/ethernet/chelsio/cxgb4/sge.c txq->q.desc = NULL; q 3803 drivers/net/ethernet/chelsio/cxgb4/sge.c txq->q.q_type = CXGB4_TXQ_ETH; q 3804 drivers/net/ethernet/chelsio/cxgb4/sge.c init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd))); q 3824 drivers/net/ethernet/chelsio/cxgb4/sge.c nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); q 3826 drivers/net/ethernet/chelsio/cxgb4/sge.c txq->q.desc = alloc_ring(adap->pdev_dev, nentries, q 3827 drivers/net/ethernet/chelsio/cxgb4/sge.c sizeof(struct tx_desc), 0, &txq->q.phys_addr, q 3829 drivers/net/ethernet/chelsio/cxgb4/sge.c if (!txq->q.desc) q 3851 drivers/net/ethernet/chelsio/cxgb4/sge.c c.eqaddr = cpu_to_be64(txq->q.phys_addr); q 3857 drivers/net/ethernet/chelsio/cxgb4/sge.c txq->q.desc, txq->q.phys_addr); q 3858 drivers/net/ethernet/chelsio/cxgb4/sge.c txq->q.desc = NULL; q 3862 drivers/net/ethernet/chelsio/cxgb4/sge.c txq->q.q_type = CXGB4_TXQ_CTRL; q 3863 drivers/net/ethernet/chelsio/cxgb4/sge.c init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid))); q 3895 drivers/net/ethernet/chelsio/cxgb4/sge.c nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); q 3897 drivers/net/ethernet/chelsio/cxgb4/sge.c txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, q 3899 drivers/net/ethernet/chelsio/cxgb4/sge.c &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, q 3901 drivers/net/ethernet/chelsio/cxgb4/sge.c if (!txq->q.desc) q 3924 drivers/net/ethernet/chelsio/cxgb4/sge.c c.eqaddr = cpu_to_be64(txq->q.phys_addr); q 3928 drivers/net/ethernet/chelsio/cxgb4/sge.c kfree(txq->q.sdesc); q 3929 drivers/net/ethernet/chelsio/cxgb4/sge.c txq->q.sdesc = NULL; q 3932 drivers/net/ethernet/chelsio/cxgb4/sge.c txq->q.desc, txq->q.phys_addr); q 3933 drivers/net/ethernet/chelsio/cxgb4/sge.c txq->q.desc = NULL; q 3937 drivers/net/ethernet/chelsio/cxgb4/sge.c txq->q.q_type = CXGB4_TXQ_ULD; q 3938 drivers/net/ethernet/chelsio/cxgb4/sge.c init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_G(ntohl(c.eqid_pkd))); q 3947 drivers/net/ethernet/chelsio/cxgb4/sge.c void free_txq(struct adapter *adap, struct sge_txq *q) q 3952 drivers/net/ethernet/chelsio/cxgb4/sge.c q->size * sizeof(struct tx_desc) + s->stat_len, q 3953 drivers/net/ethernet/chelsio/cxgb4/sge.c q->desc, q->phys_addr); q 3954 drivers/net/ethernet/chelsio/cxgb4/sge.c q->cntxt_id = 0; q 3955 drivers/net/ethernet/chelsio/cxgb4/sge.c q->sdesc = NULL; q 3956 drivers/net/ethernet/chelsio/cxgb4/sge.c q->desc = NULL; q 3994 drivers/net/ethernet/chelsio/cxgb4/sge.c void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q) q 3996 drivers/net/ethernet/chelsio/cxgb4/sge.c for ( ; n; n--, q++) q 3997 drivers/net/ethernet/chelsio/cxgb4/sge.c if (q->rspq.desc) q 3998 drivers/net/ethernet/chelsio/cxgb4/sge.c free_rspq_fl(adap, &q->rspq, q 3999 drivers/net/ethernet/chelsio/cxgb4/sge.c q->fl.size ? &q->fl : NULL); q 4033 drivers/net/ethernet/chelsio/cxgb4/sge.c if (etq->q.desc) { q 4035 drivers/net/ethernet/chelsio/cxgb4/sge.c etq->q.cntxt_id); q 4037 drivers/net/ethernet/chelsio/cxgb4/sge.c free_tx_desc(adap, &etq->q, etq->q.in_use, true); q 4039 drivers/net/ethernet/chelsio/cxgb4/sge.c kfree(etq->q.sdesc); q 4040 drivers/net/ethernet/chelsio/cxgb4/sge.c free_txq(adap, &etq->q); q 4048 drivers/net/ethernet/chelsio/cxgb4/sge.c if (cq->q.desc) { q 4051 drivers/net/ethernet/chelsio/cxgb4/sge.c cq->q.cntxt_id); q 4053 drivers/net/ethernet/chelsio/cxgb4/sge.c free_txq(adap, &cq->q); q 4065 drivers/net/ethernet/chelsio/cxgb4/sge.c if (etq->q.desc) { q 4067 drivers/net/ethernet/chelsio/cxgb4/sge.c etq->q.cntxt_id); q 4069 drivers/net/ethernet/chelsio/cxgb4/sge.c free_tx_desc(adap, &etq->q, etq->q.in_use, true); q 4071 drivers/net/ethernet/chelsio/cxgb4/sge.c kfree(etq->q.sdesc); q 4072 drivers/net/ethernet/chelsio/cxgb4/sge.c free_txq(adap, &etq->q); q 4117 drivers/net/ethernet/chelsio/cxgb4/sge.c if (txq->q.desc) q 4131 drivers/net/ethernet/chelsio/cxgb4/sge.c if (txq->q.desc) q 4140 drivers/net/ethernet/chelsio/cxgb4/sge.c if (cq->q.desc) q 259 drivers/net/ethernet/chelsio/cxgb4vf/adapter.h struct sge_txq q; /* SGE TX Queue */ q 560 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c txq = container_of(tq, struct sge_eth_txq, q); q 572 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c txq->q.restarts++; q 659 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c s->egr_base = s->ethtxq[0].q.abs_id - s->ethtxq[0].q.cntxt_id; q 671 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c EQ_MAP(s, txq->q.abs_id) = &txq->q; q 1610 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c rp->tx_pending = s->ethtxq[pi->first_qset].q.size; q 1642 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c s->ethtxq[qs].q.size = rp->tx_pending; q 2088 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c T("TxQ ID:", q.abs_id); q 2089 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c T("TxQ size:", q.size); q 2090 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c T("TxQ inuse:", q.in_use); q 2091 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c T("TxQ PIdx:", q.pidx); q 2092 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c T("TxQ CIdx:", q.cidx); q 2250 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c T("TxQFull:", q.stops); q 2251 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c T("TxQRestarts:", q.restarts); q 2797 drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c txq->q.size = 1024; q 1136 drivers/net/ethernet/chelsio/cxgb4vf/sge.c txq->q.stops++; q 1208 drivers/net/ethernet/chelsio/cxgb4vf/sge.c reclaim_completed_tx(adapter, &txq->q, true); q 1217 drivers/net/ethernet/chelsio/cxgb4vf/sge.c credits = txq_avail(&txq->q) - ndesc; q 1266 drivers/net/ethernet/chelsio/cxgb4vf/sge.c wr = (void *)&txq->q.desc[txq->q.pidx]; q 1365 drivers/net/ethernet/chelsio/cxgb4vf/sge.c T4_TRACE5(adapter->tb[txq->q.cntxt_id & 7], q 1367 drivers/net/ethernet/chelsio/cxgb4vf/sge.c ndesc, credits, txq->q.pidx, skb->len, ssi->nr_frags); q 1379 drivers/net/ethernet/chelsio/cxgb4vf/sge.c inline_tx_skb(skb, &txq->q, cpl + 1); q 1420 drivers/net/ethernet/chelsio/cxgb4vf/sge.c struct sge_txq *tq = &txq->q; q 1449 drivers/net/ethernet/chelsio/cxgb4vf/sge.c txq_advance(&txq->q, ndesc); q 1451 drivers/net/ethernet/chelsio/cxgb4vf/sge.c ring_tx_db(adapter, &txq->q, ndesc); q 2136 drivers/net/ethernet/chelsio/cxgb4vf/sge.c if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) { q 2137 drivers/net/ethernet/chelsio/cxgb4vf/sge.c int avail = reclaimable(&txq->q); q 2142 drivers/net/ethernet/chelsio/cxgb4vf/sge.c free_tx_desc(adapter, &txq->q, avail, true); q 2143 drivers/net/ethernet/chelsio/cxgb4vf/sge.c txq->q.in_use -= avail; q 2426 drivers/net/ethernet/chelsio/cxgb4vf/sge.c nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); q 2432 drivers/net/ethernet/chelsio/cxgb4vf/sge.c txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size, q 2435 drivers/net/ethernet/chelsio/cxgb4vf/sge.c &txq->q.phys_addr, &txq->q.sdesc, s->stat_len); q 2436 drivers/net/ethernet/chelsio/cxgb4vf/sge.c if (!txq->q.desc) q 2468 drivers/net/ethernet/chelsio/cxgb4vf/sge.c cmd.eqaddr = cpu_to_be64(txq->q.phys_addr); q 2480 drivers/net/ethernet/chelsio/cxgb4vf/sge.c kfree(txq->q.sdesc); q 2481 drivers/net/ethernet/chelsio/cxgb4vf/sge.c txq->q.sdesc = NULL; q 2484 drivers/net/ethernet/chelsio/cxgb4vf/sge.c txq->q.desc, txq->q.phys_addr); q 2485 drivers/net/ethernet/chelsio/cxgb4vf/sge.c txq->q.desc = NULL; q 2489 drivers/net/ethernet/chelsio/cxgb4vf/sge.c txq->q.in_use = 0; q 2490 drivers/net/ethernet/chelsio/cxgb4vf/sge.c txq->q.cidx = 0; q 2491 drivers/net/ethernet/chelsio/cxgb4vf/sge.c txq->q.pidx = 0; q 2492 drivers/net/ethernet/chelsio/cxgb4vf/sge.c txq->q.stat = (void *)&txq->q.desc[txq->q.size]; q 2493 drivers/net/ethernet/chelsio/cxgb4vf/sge.c txq->q.cntxt_id = FW_EQ_ETH_CMD_EQID_G(be32_to_cpu(rpl.eqid_pkd)); q 2494 drivers/net/ethernet/chelsio/cxgb4vf/sge.c txq->q.bar2_addr = bar2_address(adapter, q 2495 drivers/net/ethernet/chelsio/cxgb4vf/sge.c txq->q.cntxt_id, q 2497 drivers/net/ethernet/chelsio/cxgb4vf/sge.c &txq->q.bar2_qid); q 2498 drivers/net/ethernet/chelsio/cxgb4vf/sge.c txq->q.abs_id = q 2504 drivers/net/ethernet/chelsio/cxgb4vf/sge.c txq->q.stops = 0; q 2505 drivers/net/ethernet/chelsio/cxgb4vf/sge.c txq->q.restarts = 0; q 2575 drivers/net/ethernet/chelsio/cxgb4vf/sge.c if (txq->q.desc) { q 2576 drivers/net/ethernet/chelsio/cxgb4vf/sge.c t4vf_eth_eq_free(adapter, txq->q.cntxt_id); q 2577 drivers/net/ethernet/chelsio/cxgb4vf/sge.c free_tx_desc(adapter, &txq->q, txq->q.in_use, true); q 2578 drivers/net/ethernet/chelsio/cxgb4vf/sge.c kfree(txq->q.sdesc); q 2579 drivers/net/ethernet/chelsio/cxgb4vf/sge.c free_txq(adapter, &txq->q); q 5183 drivers/net/ethernet/dec/tulip/de4x5.c char *p, *q, t; q 5191 drivers/net/ethernet/dec/tulip/de4x5.c if (!(q = strstr(p+strlen(dev->name), "eth"))) q = p + strlen(p); q 5192 drivers/net/ethernet/dec/tulip/de4x5.c t = *q; q 5193 drivers/net/ethernet/dec/tulip/de4x5.c *q = '\0'; q 5216 drivers/net/ethernet/dec/tulip/de4x5.c *q = t; q 152 drivers/net/ethernet/emulex/benet/be.h static inline void *queue_head_node(struct be_queue_info *q) q 154 drivers/net/ethernet/emulex/benet/be.h return q->dma_mem.va + q->head * q->entry_size; q 157 drivers/net/ethernet/emulex/benet/be.h static inline void *queue_tail_node(struct be_queue_info *q) q 159 drivers/net/ethernet/emulex/benet/be.h return q->dma_mem.va + q->tail * q->entry_size; q 162 drivers/net/ethernet/emulex/benet/be.h static inline void *queue_index_node(struct be_queue_info *q, u16 index) q 164 drivers/net/ethernet/emulex/benet/be.h return q->dma_mem.va + index * q->entry_size; q 167 drivers/net/ethernet/emulex/benet/be.h static inline void queue_head_inc(struct be_queue_info *q) q 169 drivers/net/ethernet/emulex/benet/be.h index_inc(&q->head, q->len); q 177 drivers/net/ethernet/emulex/benet/be.h static inline void queue_tail_inc(struct be_queue_info *q) q 179 drivers/net/ethernet/emulex/benet/be.h index_inc(&q->tail, q->len); q 183 drivers/net/ethernet/emulex/benet/be.h struct be_queue_info q; q 205 drivers/net/ethernet/emulex/benet/be.h struct be_queue_info q; q 239 drivers/net/ethernet/emulex/benet/be.h struct be_queue_info q; q 293 drivers/net/ethernet/emulex/benet/be.h struct be_queue_info q; q 121 drivers/net/ethernet/emulex/benet/be_cmds.c struct be_queue_info *mccq = &adapter->mcc_obj.q; q 560 drivers/net/ethernet/emulex/benet/be_cmds.c atomic_dec(&mcc_obj->q.used); q 586 drivers/net/ethernet/emulex/benet/be_cmds.c if (atomic_read(&mcc_obj->q.used) == 0) q 604 drivers/net/ethernet/emulex/benet/be_cmds.c u32 index = mcc_obj->q.head; q 607 drivers/net/ethernet/emulex/benet/be_cmds.c index_dec(&index, mcc_obj->q.len); q 608 drivers/net/ethernet/emulex/benet/be_cmds.c wrb = queue_index_node(&mcc_obj->q, index); q 841 drivers/net/ethernet/emulex/benet/be_cmds.c struct be_queue_info *mccq = &adapter->mcc_obj.q; q 859 drivers/net/ethernet/emulex/benet/be_cmds.c return adapter->mcc_obj.q.created; q 996 drivers/net/ethernet/emulex/benet/be_cmds.c struct be_dma_mem *q_mem = &eqo->q.dma_mem; q 1020 drivers/net/ethernet/emulex/benet/be_cmds.c __ilog2_u32(eqo->q.len / 256)); q 1029 drivers/net/ethernet/emulex/benet/be_cmds.c eqo->q.id = le16_to_cpu(resp->eq_id); q 1032 drivers/net/ethernet/emulex/benet/be_cmds.c eqo->q.created = true; q 1361 drivers/net/ethernet/emulex/benet/be_cmds.c struct be_queue_info *txq = &txo->q; q 1451 drivers/net/ethernet/emulex/benet/be_cmds.c int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, q 1492 drivers/net/ethernet/emulex/benet/be_cmds.c req->id = cpu_to_le16(q->id); q 1495 drivers/net/ethernet/emulex/benet/be_cmds.c q->created = false; q 1502 drivers/net/ethernet/emulex/benet/be_cmds.c int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q) q 1519 drivers/net/ethernet/emulex/benet/be_cmds.c req->id = cpu_to_le16(q->id); q 1522 drivers/net/ethernet/emulex/benet/be_cmds.c q->created = false; q 2404 drivers/net/ethernet/emulex/benet/be_cmds.h int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, q 2406 drivers/net/ethernet/emulex/benet/be_cmds.h int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q); q 687 drivers/net/ethernet/emulex/benet/be_ethtool.c ring->rx_max_pending = adapter->rx_obj[0].q.len; q 688 drivers/net/ethernet/emulex/benet/be_ethtool.c ring->rx_pending = adapter->rx_obj[0].q.len; q 689 drivers/net/ethernet/emulex/benet/be_ethtool.c ring->tx_max_pending = adapter->tx_obj[0].q.len; q 690 drivers/net/ethernet/emulex/benet/be_ethtool.c ring->tx_pending = adapter->tx_obj[0].q.len; q 146 drivers/net/ethernet/emulex/benet/be_main.c static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q) q 148 drivers/net/ethernet/emulex/benet/be_main.c struct be_dma_mem *mem = &q->dma_mem; q 157 drivers/net/ethernet/emulex/benet/be_main.c static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q, q 160 drivers/net/ethernet/emulex/benet/be_main.c struct be_dma_mem *mem = &q->dma_mem; q 162 drivers/net/ethernet/emulex/benet/be_main.c memset(q, 0, sizeof(*q)); q 163 drivers/net/ethernet/emulex/benet/be_main.c q->len = len; q 164 drivers/net/ethernet/emulex/benet/be_main.c q->entry_size = entry_size; q 230 drivers/net/ethernet/emulex/benet/be_main.c val |= txo->q.id & DB_TXULP_RING_ID_MASK; q 648 drivers/net/ethernet/emulex/benet/be_main.c erx_stat = erx->rx_drops_no_fragments[rxo->q.id]; q 819 drivers/net/ethernet/emulex/benet/be_main.c return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len; q 824 drivers/net/ethernet/emulex/benet/be_main.c return atomic_read(&txo->q.used) < txo->q.len / 2; q 829 drivers/net/ethernet/emulex/benet/be_main.c return atomic_read(&txo->q.used) > txo->pend_wrb_cnt; q 921 drivers/net/ethernet/emulex/benet/be_main.c u32 head = txo->q.head; q 923 drivers/net/ethernet/emulex/benet/be_main.c queue_head_inc(&txo->q); q 934 drivers/net/ethernet/emulex/benet/be_main.c struct be_queue_info *txq = &txo->q; q 953 drivers/net/ethernet/emulex/benet/be_main.c struct be_queue_info *txq = &txo->q; q 970 drivers/net/ethernet/emulex/benet/be_main.c struct be_queue_info *txq = &txo->q; q 1227 drivers/net/ethernet/emulex/benet/be_main.c struct be_queue_info *txq = &txo->q; q 1434 drivers/net/ethernet/emulex/benet/be_main.c i, txo->q.head, txo->q.tail, q 1435 drivers/net/ethernet/emulex/benet/be_main.c atomic_read(&txo->q.used), txo->q.id); q 1437 drivers/net/ethernet/emulex/benet/be_main.c entry = txo->q.dma_mem.va; q 2241 drivers/net/ethernet/emulex/benet/be_main.c set_eqd[num].eq_id = eqo->q.id; q 2282 drivers/net/ethernet/emulex/benet/be_main.c struct be_queue_info *rxq = &rxo->q; q 2593 drivers/net/ethernet/emulex/benet/be_main.c struct be_queue_info *rxq = &rxo->q; q 2752 drivers/net/ethernet/emulex/benet/be_main.c struct be_queue_info *txq = &txo->q; q 2790 drivers/net/ethernet/emulex/benet/be_main.c eqe = queue_tail_node(&eqo->q); q 2797 drivers/net/ethernet/emulex/benet/be_main.c queue_tail_inc(&eqo->q); q 2808 drivers/net/ethernet/emulex/benet/be_main.c be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0); q 2814 drivers/net/ethernet/emulex/benet/be_main.c struct be_queue_info *rxq = &rxo->q; q 2884 drivers/net/ethernet/emulex/benet/be_main.c txq = &txo->q; q 2909 drivers/net/ethernet/emulex/benet/be_main.c txq = &txo->q; q 2940 drivers/net/ethernet/emulex/benet/be_main.c if (eqo->q.created) { q 2942 drivers/net/ethernet/emulex/benet/be_main.c be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ); q 2946 drivers/net/ethernet/emulex/benet/be_main.c be_queue_free(adapter, &eqo->q); q 2972 drivers/net/ethernet/emulex/benet/be_main.c eq = &eqo->q; q 2994 drivers/net/ethernet/emulex/benet/be_main.c struct be_queue_info *q; q 2996 drivers/net/ethernet/emulex/benet/be_main.c q = &adapter->mcc_obj.q; q 2997 drivers/net/ethernet/emulex/benet/be_main.c if (q->created) q 2998 drivers/net/ethernet/emulex/benet/be_main.c be_cmd_q_destroy(adapter, q, QTYPE_MCCQ); q 2999 drivers/net/ethernet/emulex/benet/be_main.c be_queue_free(adapter, q); q 3001 drivers/net/ethernet/emulex/benet/be_main.c q = &adapter->mcc_obj.cq; q 3002 drivers/net/ethernet/emulex/benet/be_main.c if (q->created) q 3003 drivers/net/ethernet/emulex/benet/be_main.c be_cmd_q_destroy(adapter, q, QTYPE_CQ); q 3004 drivers/net/ethernet/emulex/benet/be_main.c be_queue_free(adapter, q); q 3010 drivers/net/ethernet/emulex/benet/be_main.c struct be_queue_info *q, *cq; q 3018 drivers/net/ethernet/emulex/benet/be_main.c if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0)) q 3021 drivers/net/ethernet/emulex/benet/be_main.c q = &adapter->mcc_obj.q; q 3022 drivers/net/ethernet/emulex/benet/be_main.c if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb))) q 3025 drivers/net/ethernet/emulex/benet/be_main.c if (be_cmd_mccq_create(adapter, q, cq)) q 3031 drivers/net/ethernet/emulex/benet/be_main.c be_queue_free(adapter, q); q 3042 drivers/net/ethernet/emulex/benet/be_main.c struct be_queue_info *q; q 3047 drivers/net/ethernet/emulex/benet/be_main.c q = &txo->q; q 3048 drivers/net/ethernet/emulex/benet/be_main.c if (q->created) q 3049 drivers/net/ethernet/emulex/benet/be_main.c be_cmd_q_destroy(adapter, q, QTYPE_TXQ); q 3050 drivers/net/ethernet/emulex/benet/be_main.c be_queue_free(adapter, q); q 3052 drivers/net/ethernet/emulex/benet/be_main.c q = &txo->cq; q 3053 drivers/net/ethernet/emulex/benet/be_main.c if (q->created) q 3054 drivers/net/ethernet/emulex/benet/be_main.c be_cmd_q_destroy(adapter, q, QTYPE_CQ); q 3055 drivers/net/ethernet/emulex/benet/be_main.c be_queue_free(adapter, q); q 3082 drivers/net/ethernet/emulex/benet/be_main.c status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3); q 3086 drivers/net/ethernet/emulex/benet/be_main.c status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN, q 3106 drivers/net/ethernet/emulex/benet/be_main.c struct be_queue_info *q; q 3111 drivers/net/ethernet/emulex/benet/be_main.c q = &rxo->cq; q 3112 drivers/net/ethernet/emulex/benet/be_main.c if (q->created) q 3113 drivers/net/ethernet/emulex/benet/be_main.c be_cmd_q_destroy(adapter, q, QTYPE_CQ); q 3114 drivers/net/ethernet/emulex/benet/be_main.c be_queue_free(adapter, q); q 3149 drivers/net/ethernet/emulex/benet/be_main.c eq = &adapter->eq_obj[i % adapter->num_evt_qs].q; q 3180 drivers/net/ethernet/emulex/benet/be_main.c be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0); q 3196 drivers/net/ethernet/emulex/benet/be_main.c be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0); q 3255 drivers/net/ethernet/emulex/benet/be_main.c if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM && q 3279 drivers/net/ethernet/emulex/benet/be_main.c atomic_sub(num_wrbs, &txo->q.used); q 3329 drivers/net/ethernet/emulex/benet/be_main.c be_eq_notify(adapter, eqo->q.id, true, false, num_evts, q 3333 drivers/net/ethernet/emulex/benet/be_main.c be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0); q 3567 drivers/net/ethernet/emulex/benet/be_main.c struct be_queue_info *q; q 3572 drivers/net/ethernet/emulex/benet/be_main.c q = &rxo->q; q 3573 drivers/net/ethernet/emulex/benet/be_main.c if (q->created) { q 3582 drivers/net/ethernet/emulex/benet/be_main.c if (atomic_read(&q->used) == 0) q 3587 drivers/net/ethernet/emulex/benet/be_main.c be_cmd_rxq_destroy(adapter, q); q 3591 drivers/net/ethernet/emulex/benet/be_main.c be_queue_free(adapter, q); q 3690 drivers/net/ethernet/emulex/benet/be_main.c rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN, q 3698 drivers/net/ethernet/emulex/benet/be_main.c rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, q 3706 drivers/net/ethernet/emulex/benet/be_main.c rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, q 3824 drivers/net/ethernet/emulex/benet/be_main.c be_eq_notify(adapter, eqo->q.id, true, true, 0, 0); q 4923 drivers/net/ethernet/emulex/benet/be_main.c be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0); q 834 drivers/net/ethernet/freescale/fec_main.c unsigned int q; q 836 drivers/net/ethernet/freescale/fec_main.c for (q = 0; q < fep->num_rx_queues; q++) { q 838 drivers/net/ethernet/freescale/fec_main.c rxq = fep->rx_queue[q]; q 858 drivers/net/ethernet/freescale/fec_main.c for (q = 0; q < fep->num_tx_queues; q++) { q 860 drivers/net/ethernet/freescale/fec_main.c txq = fep->tx_queue[q]; q 2750 drivers/net/ethernet/freescale/fec_main.c unsigned int q; q 2752 drivers/net/ethernet/freescale/fec_main.c for (q = 0; q < fep->num_rx_queues; q++) { q 2753 drivers/net/ethernet/freescale/fec_main.c rxq = fep->rx_queue[q]; q 2769 drivers/net/ethernet/freescale/fec_main.c for (q = 0; q < fep->num_tx_queues; q++) { q 2770 drivers/net/ethernet/freescale/fec_main.c txq = fep->tx_queue[q]; q 193 drivers/net/ethernet/hisilicon/hns/hnae.c hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags) q 200 drivers/net/ethernet/hisilicon/hns/hnae.c ring->q = q; q 202 drivers/net/ethernet/hisilicon/hns/hnae.c ring->coal_param = q->handle->coal_param; q 237 drivers/net/ethernet/hisilicon/hns/hnae.c static int hnae_init_queue(struct hnae_handle *h, struct hnae_queue *q, q 242 drivers/net/ethernet/hisilicon/hns/hnae.c q->dev = dev; q 243 drivers/net/ethernet/hisilicon/hns/hnae.c q->handle = h; q 245 drivers/net/ethernet/hisilicon/hns/hnae.c ret = hnae_init_ring(q, &q->tx_ring, q->tx_ring.flags | RINGF_DIR); q 249 drivers/net/ethernet/hisilicon/hns/hnae.c ret = hnae_init_ring(q, &q->rx_ring, q->rx_ring.flags & ~RINGF_DIR); q 254 drivers/net/ethernet/hisilicon/hns/hnae.c dev->ops->init_queue(q); q 259 drivers/net/ethernet/hisilicon/hns/hnae.c hnae_fini_ring(&q->tx_ring); q 264 drivers/net/ethernet/hisilicon/hns/hnae.c static void hnae_fini_queue(struct hnae_queue *q) q 266 drivers/net/ethernet/hisilicon/hns/hnae.c if (q->dev->ops->fini_queue) q 267 drivers/net/ethernet/hisilicon/hns/hnae.c q->dev->ops->fini_queue(q); q 269 drivers/net/ethernet/hisilicon/hns/hnae.c hnae_fini_ring(&q->tx_ring); q 270 drivers/net/ethernet/hisilicon/hns/hnae.c hnae_fini_ring(&q->rx_ring); q 270 drivers/net/ethernet/hisilicon/hns/hnae.h struct hnae_queue *q; q 470 drivers/net/ethernet/hisilicon/hns/hnae.h void (*init_queue)(struct hnae_queue *q); q 471 drivers/net/ethernet/hisilicon/hns/hnae.h void (*fini_queue)(struct hnae_queue *q); q 570 drivers/net/ethernet/hisilicon/hns/hnae.h #define ring_to_dev(ring) ((ring)->q->dev->dev) q 585 drivers/net/ethernet/hisilicon/hns/hnae.h #define hnae_queue_xmit(q, buf_num) writel_relaxed(buf_num, \ q 586 drivers/net/ethernet/hisilicon/hns/hnae.h (q)->tx_ring.io_base + RCB_REG_TAIL) q 595 drivers/net/ethernet/hisilicon/hns/hnae.h struct hnae_buf_ops *bops = ring->q->handle->bops; q 628 drivers/net/ethernet/hisilicon/hns/hnae.h ring->q->handle->bops->unmap_buffer(ring, &ring->desc_cb[i]); q 634 drivers/net/ethernet/hisilicon/hns/hnae.h struct hnae_buf_ops *bops = ring->q->handle->bops; q 648 drivers/net/ethernet/hisilicon/hns/hnae.h struct hnae_buf_ops *bops = ring->q->handle->bops; q 64 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q) q 66 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c return container_of(q, struct ring_pair_cb, q); q 115 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c ae_handle->qs[i] = &ring_pair_cb->q; q 116 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c ae_handle->qs[i]->rx_ring.q = ae_handle->qs[i]; q 117 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i]; q 196 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c static void hns_ae_init_queue(struct hnae_queue *q) q 199 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c container_of(q, struct ring_pair_cb, q); q 204 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c static void hns_ae_fini_queue(struct hnae_queue *q) q 206 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(q->handle); q 209 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c hns_rcb_reset_ring_hw(q); q 300 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c struct hnae_queue *q; q 319 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c q = handle->qs[i]; q 320 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c q->rx_ring.buf_size = rx_buf_size; q 321 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c hns_rcb_set_rx_ring_bs(q, rx_buf_size); q 401 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c hns_rcb_int_ctrl_hw(ring->q, flag, mask); q 413 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c hns_rcbv2_int_ctrl_hw(ring->q, flag, mask); q 544 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c container_of(handle->qs[0], struct ring_pair_cb, q); q 556 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c container_of(handle->qs[0], struct ring_pair_cb, q); q 574 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c container_of(handle->qs[0], struct ring_pair_cb, q); q 585 drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c container_of(handle->qs[0], struct ring_pair_cb, q); q 92 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c void hns_rcb_reset_ring_hw(struct hnae_queue *q) q 102 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c tx_fbd_num = dsaf_read_dev(q, RCB_RING_TX_RING_FBDNUM_REG); q 106 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, 0); q 108 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1); q 111 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST); q 115 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0); q 117 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1); q 120 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST); q 125 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0); q 132 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c dev_err(q->dev->dev, "port%d reset ring fail\n", q 133 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c hns_ae_get_vf_cb(q->handle)->port_index); q 142 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask) q 147 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en); q 148 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c dsaf_write_dev(q, RCB_RING_INTMSK_TX_OVERTIME_REG, q 153 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en); q 154 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c dsaf_write_dev(q, RCB_RING_INTMSK_RX_OVERTIME_REG, q 159 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag) q 162 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c dsaf_write_dev(q, RCB_RING_INTSTS_TX_RING_REG, 1); q 163 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c dsaf_write_dev(q, RCB_RING_INTSTS_TX_OVERTIME_REG, 1); q 167 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c dsaf_write_dev(q, RCB_RING_INTSTS_RX_RING_REG, 1); q 168 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c dsaf_write_dev(q, RCB_RING_INTSTS_RX_OVERTIME_REG, 1); q 172 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c void hns_rcbv2_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask) q 177 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en); q 180 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en); q 183 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag) q 186 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c dsaf_write_dev(q, RCBV2_TX_RING_INT_STS_REG, 1); q 189 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c dsaf_write_dev(q, RCBV2_RX_RING_INT_STS_REG, 1); q 196 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val) q 198 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, !!val); q 201 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c void hns_rcb_start(struct hnae_queue *q, u32 val) q 203 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c hns_rcb_ring_enable_hw(q, val); q 221 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c void hns_rcb_set_tx_ring_bs(struct hnae_queue *q, u32 buf_size) q 225 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG, q 233 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c void hns_rcb_set_rx_ring_bs(struct hnae_queue *q, u32 buf_size) q 237 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG, q 248 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c struct hnae_queue *q = &ring_pair->q; q 250 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c (ring_type == RX_RING) ? &q->rx_ring : &q->tx_ring; q 254 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_L_REG, q 256 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_H_REG, q 259 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c hns_rcb_set_rx_ring_bs(q, ring->buf_size); q 261 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG, q 263 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG, q 266 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG, q 268 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_H_REG, q 271 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c hns_rcb_set_tx_ring_bs(q, ring->buf_size); q 273 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG, q 275 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG, q 440 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type) q 448 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c ring_pair_cb = container_of(q, struct ring_pair_cb, q); q 451 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c ring = &q->rx_ring; q 452 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c ring->io_base = ring_pair_cb->q.io_base; q 456 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c ring = &q->tx_ring; q 457 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c ring->io_base = ring_pair_cb->q.io_base + q 484 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c ring_pair_cb->q.handle = NULL; q 486 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c hns_rcb_ring_get_cfg(&ring_pair_cb->q, RX_RING); q 487 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING); q 529 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c ring_pair_cb->q.io_base = q 543 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c ring_pair_cb->q.phy_base = q 820 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c container_of(queue, struct ring_pair_cb, q); q 854 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c container_of(queue, struct ring_pair_cb, q); q 1073 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c = container_of(queue, struct ring_pair_cb, q); q 87 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h struct hnae_queue q; q 119 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h void hns_rcb_start(struct hnae_queue *q, u32 val); q 126 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val); q 127 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag); q 128 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 enable); q 129 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h void hns_rcbv2_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask); q 130 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag); q 133 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h void hns_rcb_reset_ring_hw(struct hnae_queue *q); q 161 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h void hns_rcb_set_rx_ring_bs(struct hnae_queue *q, u32 buf_size); q 162 drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h void hns_rcb_set_tx_ring_bs(struct hnae_queue *q, u32 buf_size); q 583 drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c #define hns_xgmac_cpy_q(p, q) \ q 585 drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c *(p) = (u32)(q);\ q 586 drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c *((p) + 1) = (u32)((q) >> 32);\ q 68 drivers/net/ethernet/hisilicon/hns/hns_enet.c HNSV2_TXD_PORTID_S, ring->q->handle->dport_id); q 720 drivers/net/ethernet/hisilicon/hns/hns_enet.c bool coal_enable = ring->q->handle->coal_adapt_en; q 731 drivers/net/ethernet/hisilicon/hns/hns_enet.c bool coal_enable = ring->q->handle->coal_adapt_en; q 779 drivers/net/ethernet/hisilicon/hns/hns_enet.c struct hnae_handle *handle = ring->q->handle; q 881 drivers/net/ethernet/hisilicon/hns/hns_enet.c ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0); q 885 drivers/net/ethernet/hisilicon/hns/hns_enet.c if (ring->q->handle->coal_adapt_en) q 890 drivers/net/ethernet/hisilicon/hns/hns_enet.c ring_data->ring->q->handle->dev->ops->toggle_ring_irq( q 908 drivers/net/ethernet/hisilicon/hns/hns_enet.c if (ring->q->handle->coal_adapt_en) q 1008 drivers/net/ethernet/hisilicon/hns/hns_enet.c ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0); q 1013 drivers/net/ethernet/hisilicon/hns/hns_enet.c ring_data->ring->q->handle->dev->ops->toggle_ring_irq( q 1065 drivers/net/ethernet/hisilicon/hns/hns_enet.c ring->q->handle->dev->ops->toggle_ring_irq(ring, 0); q 1078 drivers/net/ethernet/hisilicon/hns/hns_enet.c ring_data->ring->q->handle->dev->ops->toggle_ring_irq( q 1692 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct netdev_queue *q; q 1695 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c q = netdev_get_tx_queue(ndev, i); q 1696 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c trans_start = q->trans_start; q 1697 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c if (netif_xmit_stopped(q) && q 1702 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c q->state, q 3486 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, q 3501 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ring_data[q->tqp_index].ring = ring; q 3502 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ring_data[q->tqp_index].queue_index = q->tqp_index; q 3503 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET; q 3506 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ring_data[q->tqp_index + queue_num].ring = ring; q 3507 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index; q 3508 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ring->io_base = q->io_base; q 3513 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ring->tqp = q; q 3518 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c ring->buf_size = q->buf_size; q 3670 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_queue *q = ring->tqp; q 3673 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma); q 3674 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG, q 3677 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG, q 3679 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG, q 3683 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG, q 3685 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG, q 3688 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG, q 3706 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c struct hnae3_queue *q; q 3708 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp; q 3709 drivers/net/ethernet/hisilicon/hns3/hns3_enet.c hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, q 548 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c tqp = container_of(queue, struct hclge_tqp, q); q 567 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c tqp = container_of(queue, struct hclge_tqp, q); q 596 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); q 601 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q); q 624 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c struct hclge_tqp, q); q 632 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c struct hclge_tqp, q); q 1463 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c tqp->q.ae_algo = &ae_algo; q 1464 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c tqp->q.buf_size = hdev->rx_buf_len; q 1465 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c tqp->q.tx_desc_num = hdev->num_tx_desc; q 1466 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c tqp->q.rx_desc_num = hdev->num_rx_desc; q 1467 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET + q 1509 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c hdev->htqp[i].q.handle = &vport->nic; q 1510 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c hdev->htqp[i].q.tqp_index = alloced; q 1511 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc; q 1512 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc; q 1513 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c kinfo->tqp[alloced] = &hdev->htqp[i].q; q 1564 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c struct hclge_tqp *q = q 1565 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c container_of(kinfo->tqp[i], struct hclge_tqp, q); q 1570 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index, q 6634 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c tqp = container_of(queue, struct hclge_tqp, q); q 8510 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c tqp = container_of(queue, struct hclge_tqp, q); q 294 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h struct hnae3_queue q; q 945 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q); q 762 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j]; q 765 drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c hclge_get_queue_id(q), q 108 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); q 150 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); q 154 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); q 176 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c struct hclgevf_tqp, q); q 184 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c struct hclgevf_tqp, q); q 369 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c tqp->q.ae_algo = &ae_algovf; q 370 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c tqp->q.buf_size = hdev->rx_buf_len; q 371 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c tqp->q.tx_desc_num = hdev->num_tx_desc; q 372 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c tqp->q.rx_desc_num = hdev->num_rx_desc; q 373 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET + q 409 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c hdev->htqp[i].q.handle = &hdev->nic; q 410 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c hdev->htqp[i].q.tqp_index = i; q 411 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c kinfo->tqp[i] = &hdev->htqp[i].q; q 1172 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q); q 184 drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h struct hnae3_queue q; q 329 drivers/net/ethernet/intel/fm10k/fm10k_common.c struct fm10k_hw_stats_q *q, q 341 drivers/net/ethernet/intel/fm10k/fm10k_common.c &q->tx_packets); q 346 drivers/net/ethernet/intel/fm10k/fm10k_common.c &q->tx_bytes); q 358 drivers/net/ethernet/intel/fm10k/fm10k_common.c if (q->tx_stats_idx == id_tx) { q 359 drivers/net/ethernet/intel/fm10k/fm10k_common.c q->tx_packets.count += tx_packets; q 360 drivers/net/ethernet/intel/fm10k/fm10k_common.c q->tx_bytes.count += tx_bytes; q 364 drivers/net/ethernet/intel/fm10k/fm10k_common.c fm10k_update_hw_base_32b(&q->tx_packets, tx_packets); q 365 drivers/net/ethernet/intel/fm10k/fm10k_common.c fm10k_update_hw_base_48b(&q->tx_bytes, tx_bytes); q 367 drivers/net/ethernet/intel/fm10k/fm10k_common.c q->tx_stats_idx = id_tx; q 380 drivers/net/ethernet/intel/fm10k/fm10k_common.c struct fm10k_hw_stats_q *q, q 392 drivers/net/ethernet/intel/fm10k/fm10k_common.c &q->rx_drops); q 395 drivers/net/ethernet/intel/fm10k/fm10k_common.c &q->rx_packets); q 400 drivers/net/ethernet/intel/fm10k/fm10k_common.c &q->rx_bytes); q 412 drivers/net/ethernet/intel/fm10k/fm10k_common.c if (q->rx_stats_idx == id_rx) { q 413 drivers/net/ethernet/intel/fm10k/fm10k_common.c q->rx_drops.count += rx_drops; q 414 drivers/net/ethernet/intel/fm10k/fm10k_common.c q->rx_packets.count += rx_packets; q 415 drivers/net/ethernet/intel/fm10k/fm10k_common.c q->rx_bytes.count += rx_bytes; q 419 drivers/net/ethernet/intel/fm10k/fm10k_common.c fm10k_update_hw_base_32b(&q->rx_drops, rx_drops); q 420 drivers/net/ethernet/intel/fm10k/fm10k_common.c fm10k_update_hw_base_32b(&q->rx_packets, rx_packets); q 421 drivers/net/ethernet/intel/fm10k/fm10k_common.c fm10k_update_hw_base_48b(&q->rx_bytes, rx_bytes); q 423 drivers/net/ethernet/intel/fm10k/fm10k_common.c q->rx_stats_idx = id_rx; q 436 drivers/net/ethernet/intel/fm10k/fm10k_common.c void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q, q 441 drivers/net/ethernet/intel/fm10k/fm10k_common.c for (i = 0; i < count; i++, idx++, q++) { q 442 drivers/net/ethernet/intel/fm10k/fm10k_common.c fm10k_update_hw_stats_tx_q(hw, q, idx); q 443 drivers/net/ethernet/intel/fm10k/fm10k_common.c fm10k_update_hw_stats_rx_q(hw, q, idx); q 456 drivers/net/ethernet/intel/fm10k/fm10k_common.c void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count) q 460 drivers/net/ethernet/intel/fm10k/fm10k_common.c for (i = 0; i < count; i++, idx++, q++) { q 461 drivers/net/ethernet/intel/fm10k/fm10k_common.c q->rx_stats_idx = 0; q 462 drivers/net/ethernet/intel/fm10k/fm10k_common.c q->tx_stats_idx = 0; q 43 drivers/net/ethernet/intel/fm10k/fm10k_common.h void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q, q 46 drivers/net/ethernet/intel/fm10k/fm10k_common.h void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count); q 93 drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c rxd->w.sglort, rxd->q.timestamp); q 133 drivers/net/ethernet/intel/fm10k/fm10k_main.c rx_desc->q.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); q 462 drivers/net/ethernet/intel/fm10k/fm10k_main.c FM10K_CB(skb)->tstamp = rx_desc->q.timestamp; q 614 drivers/net/ethernet/intel/fm10k/fm10k_pci.c struct fm10k_hw_stats_q *q = &interface->stats.q[i]; q 616 drivers/net/ethernet/intel/fm10k/fm10k_pci.c tx_bytes_nic += q->tx_bytes.count; q 617 drivers/net/ethernet/intel/fm10k/fm10k_pci.c tx_pkts_nic += q->tx_packets.count; q 618 drivers/net/ethernet/intel/fm10k/fm10k_pci.c rx_bytes_nic += q->rx_bytes.count; q 619 drivers/net/ethernet/intel/fm10k/fm10k_pci.c rx_pkts_nic += q->rx_packets.count; q 620 drivers/net/ethernet/intel/fm10k/fm10k_pci.c rx_drops_nic += q->rx_drops.count; q 1332 drivers/net/ethernet/intel/fm10k/fm10k_pci.c int q; q 1340 drivers/net/ethernet/intel/fm10k/fm10k_pci.c for (q = 255;;) { q 1342 drivers/net/ethernet/intel/fm10k/fm10k_pci.c if (q < FM10K_MAX_QUEUES_PF) { q 1344 drivers/net/ethernet/intel/fm10k/fm10k_pci.c fm10k_write_reg(hw, FM10K_RXDCTL(q), rxdctl); q 1352 drivers/net/ethernet/intel/fm10k/fm10k_pci.c q &= ~(32 - 1); q 1354 drivers/net/ethernet/intel/fm10k/fm10k_pci.c if (!q) q 1357 drivers/net/ethernet/intel/fm10k/fm10k_pci.c if (q-- % 32) q 1360 drivers/net/ethernet/intel/fm10k/fm10k_pci.c maxholdq = fm10k_read_reg(hw, FM10K_MAXHOLDQ(q / 32)); q 1362 drivers/net/ethernet/intel/fm10k/fm10k_pci.c fm10k_write_reg(hw, FM10K_MAXHOLDQ(q / 32), maxholdq); q 1134 drivers/net/ethernet/intel/fm10k/fm10k_pf.c struct fm10k_hw_stats_q *q, q 1142 drivers/net/ethernet/intel/fm10k/fm10k_pf.c fm10k_update_hw_stats_q(hw, q, idx, qpp); q 1487 drivers/net/ethernet/intel/fm10k/fm10k_pf.c fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues); q 1512 drivers/net/ethernet/intel/fm10k/fm10k_pf.c fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues); q 439 drivers/net/ethernet/intel/fm10k/fm10k_type.h struct fm10k_hw_stats_q q[FM10K_MAX_QUEUES_PF]; q 701 drivers/net/ethernet/intel/fm10k/fm10k_type.h } q; /* Read, Writeback, 64b quad-words */ q 454 drivers/net/ethernet/intel/fm10k/fm10k_vf.c fm10k_update_hw_stats_q(hw, stats->q, 0, hw->mac.max_queues); q 468 drivers/net/ethernet/intel/fm10k/fm10k_vf.c fm10k_unbind_hw_stats_q(stats->q, 0, hw->mac.max_queues); q 317 drivers/net/ethernet/intel/i40e/i40e_main.c struct netdev_queue *q; q 320 drivers/net/ethernet/intel/i40e/i40e_main.c q = netdev_get_tx_queue(netdev, i); q 321 drivers/net/ethernet/intel/i40e/i40e_main.c trans_start = q->trans_start; q 322 drivers/net/ethernet/intel/i40e/i40e_main.c if (netif_xmit_stopped(q) && q 786 drivers/net/ethernet/intel/i40e/i40e_main.c u16 q; q 806 drivers/net/ethernet/intel/i40e/i40e_main.c for (q = 0; q < vsi->num_queue_pairs; q++) { q 808 drivers/net/ethernet/intel/i40e/i40e_main.c p = READ_ONCE(vsi->tx_rings[q]); q 3521 drivers/net/ethernet/intel/i40e/i40e_main.c int i, q; q 3552 drivers/net/ethernet/intel/i40e/i40e_main.c for (q = 0; q < q_vector->num_ringpairs; q++) { q 3584 drivers/net/ethernet/intel/i40e/i40e_main.c if (q == (q_vector->num_ringpairs - 1)) q 2816 drivers/net/ethernet/intel/ice/ice_common.c struct ice_aqc_add_txqs_perq *q = list->txqs; q 2818 drivers/net/ethernet/intel/ice/ice_common.c sum_q_size += list->num_txqs * sizeof(*q); q 2819 drivers/net/ethernet/intel/ice/ice_common.c list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs); q 1989 drivers/net/ethernet/intel/ice/ice_lib.c int i, q; q 2011 drivers/net/ethernet/intel/ice/ice_lib.c for (q = 0; q < q_vector->num_ring_tx; q++) { q 2017 drivers/net/ethernet/intel/ice/ice_lib.c for (q = 0; q < q_vector->num_ring_rx; q++) { q 2683 drivers/net/ethernet/intel/ice/ice_lib.c int i, q; q 2691 drivers/net/ethernet/intel/ice/ice_lib.c for (q = 0; q < q_vector->num_ring_tx; q++) { q 2696 drivers/net/ethernet/intel/ice/ice_lib.c for (q = 0; q < q_vector->num_ring_rx; q++) { q 4654 drivers/net/ethernet/intel/ice/ice_main.c struct netdev_queue *q; q 4656 drivers/net/ethernet/intel/ice/ice_main.c q = netdev_get_tx_queue(netdev, i); q 4657 drivers/net/ethernet/intel/ice/ice_main.c trans_start = q->trans_start; q 4658 drivers/net/ethernet/intel/ice/ice_main.c if (netif_xmit_stopped(q) && q 690 drivers/net/ethernet/intel/igb/e1000_nvm.c u8 q, hval, rem, result; q 769 drivers/net/ethernet/intel/igb/e1000_nvm.c q = eeprom_verl / NVM_HEX_CONV; q 770 drivers/net/ethernet/intel/igb/e1000_nvm.c hval = q * NVM_HEX_TENS; q 382 drivers/net/ethernet/jme.h #define NETIF_NAPI_SET(dev, napis, pollfn, q) \ q 383 drivers/net/ethernet/jme.h netif_napi_add(dev, napis, pollfn, q); q 133 drivers/net/ethernet/marvell/mv643xx_eth.c #define RXQ_CURRENT_DESC_PTR(q) (0x020c + ((q) << 4)) q 135 drivers/net/ethernet/marvell/mv643xx_eth.c #define TXQ_CURRENT_DESC_PTR(q) (0x02c0 + ((q) << 2)) q 136 drivers/net/ethernet/marvell/mv643xx_eth.c #define TXQ_BW_TOKENS(q) (0x0300 + ((q) << 4)) q 137 drivers/net/ethernet/marvell/mv643xx_eth.c #define TXQ_BW_CONF(q) (0x0304 + ((q) << 4)) q 138 drivers/net/ethernet/marvell/mv643xx_eth.c #define TXQ_BW_WRR_CONF(q) (0x0308 + ((q) << 4)) q 42 drivers/net/ethernet/marvell/mvneta.c #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) q 50 drivers/net/ethernet/marvell/mvneta.c #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) q 52 drivers/net/ethernet/marvell/mvneta.c #define MVNETA_RXQ_BASE_ADDR_REG(q) (0x1480 + ((q) << 2)) q 53 drivers/net/ethernet/marvell/mvneta.c #define MVNETA_RXQ_SIZE_REG(q) (0x14a0 + ((q) << 2)) q 56 drivers/net/ethernet/marvell/mvneta.c #define MVNETA_RXQ_STATUS_REG(q) (0x14e0 + ((q) << 2)) q 58 drivers/net/ethernet/marvell/mvneta.c #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2)) q 79 drivers/net/ethernet/marvell/mvneta.c #define MVNETA_DEF_RXQ(q) ((q) << 1) q 80 drivers/net/ethernet/marvell/mvneta.c #define MVNETA_DEF_RXQ_ARP(q) ((q) << 4) q 82 drivers/net/ethernet/marvell/mvneta.c #define MVNETA_DEF_RXQ_TCP(q) ((q) << 16) q 83 drivers/net/ethernet/marvell/mvneta.c #define MVNETA_DEF_RXQ_UDP(q) ((q) << 19) q 84 drivers/net/ethernet/marvell/mvneta.c #define MVNETA_DEF_RXQ_BPDU(q) ((q) << 22) q 86 drivers/net/ethernet/marvell/mvneta.c #define MVNETA_PORT_CONFIG_DEFL_VALUE(q) (MVNETA_DEF_RXQ(q) | \ q 87 drivers/net/ethernet/marvell/mvneta.c MVNETA_DEF_RXQ_ARP(q) | \ q 88 drivers/net/ethernet/marvell/mvneta.c MVNETA_DEF_RXQ_TCP(q) | \ q 89 drivers/net/ethernet/marvell/mvneta.c MVNETA_DEF_RXQ_UDP(q) | \ q 90 drivers/net/ethernet/marvell/mvneta.c MVNETA_DEF_RXQ_BPDU(q) | \ q 127 drivers/net/ethernet/marvell/mvneta.c #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2)) q 179 drivers/net/ethernet/marvell/mvneta.c #define MVNETA_CAUSE_TXQ_ERROR_MASK(q) (1 << (MVNETA_CAUSE_TXQ_ERROR_SHIFT + (q))) q 188 drivers/net/ethernet/marvell/mvneta.c #define MVETH_TXQ_TOKEN_COUNT_REG(q) (0x2700 + ((q) << 4)) q 189 drivers/net/ethernet/marvell/mvneta.c #define MVETH_TXQ_TOKEN_CFG_REG(q) (0x2704 + ((q) << 4)) q 232 drivers/net/ethernet/marvell/mvneta.c #define MVNETA_TXQ_BASE_ADDR_REG(q) (0x3c00 + ((q) << 2)) q 233 drivers/net/ethernet/marvell/mvneta.c #define MVNETA_TXQ_SIZE_REG(q) (0x3c20 + ((q) << 2)) q 236 drivers/net/ethernet/marvell/mvneta.c #define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2)) q 239 drivers/net/ethernet/marvell/mvneta.c #define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2)) q 247 drivers/net/ethernet/marvell/mvneta.c #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2)) q 259 drivers/net/ethernet/marvell/mvneta.c #define MVNETA_QUEUE_NEXT_DESC(q, index) \ q 260 drivers/net/ethernet/marvell/mvneta.c (((index) < (q)->last_desc) ? ((index) + 1) : 0) q 371 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2)) q 375 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2)) q 377 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2)) q 496 drivers/net/ethernet/marvell/mvpp2/mvpp2.h #define MVPP2_QUEUE_NEXT_DESC(q, index) \ q 497 drivers/net/ethernet/marvell/mvpp2/mvpp2.h (((index) < (q)->last_desc) ? ((index) + 1) : 0) q 1491 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c int i, q; q 1508 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c for (q = 0; q < port->ntxqs; q++) { q 1511 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c mvpp2_ethtool_txq_regs[i].string, q); q 1516 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c for (q = 0; q < port->nrxqs; q++) { q 1520 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c q); q 1529 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c int i, q; q 1541 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c for (q = 0; q < port->ntxqs; q++) q 1550 drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c for (q = 0; q < port->nrxqs; q++) q 51 drivers/net/ethernet/marvell/octeontx2/af/common.h static inline int qmem_alloc(struct device *dev, struct qmem **q, q 60 drivers/net/ethernet/marvell/octeontx2/af/common.h *q = devm_kzalloc(dev, sizeof(*qmem), GFP_KERNEL); q 61 drivers/net/ethernet/marvell/octeontx2/af/common.h if (!*q) q 63 drivers/net/ethernet/marvell/octeontx2/af/common.h qmem = *q; q 2478 drivers/net/ethernet/marvell/skge.c static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len) q 2486 drivers/net/ethernet/marvell/skge.c skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); q 2487 drivers/net/ethernet/marvell/skge.c skge_write32(hw, RB_ADDR(q, RB_START), start); q 2488 drivers/net/ethernet/marvell/skge.c skge_write32(hw, RB_ADDR(q, RB_WP), start); q 2489 drivers/net/ethernet/marvell/skge.c skge_write32(hw, RB_ADDR(q, RB_RP), start); q 2490 drivers/net/ethernet/marvell/skge.c skge_write32(hw, RB_ADDR(q, RB_END), end); q 2492 drivers/net/ethernet/marvell/skge.c if (q == Q_R1 || q == Q_R2) { q 2494 drivers/net/ethernet/marvell/skge.c skge_write32(hw, RB_ADDR(q, RB_RX_UTPP), q 2496 drivers/net/ethernet/marvell/skge.c skge_write32(hw, RB_ADDR(q, RB_RX_LTPP), q 2502 drivers/net/ethernet/marvell/skge.c skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD); q 2505 drivers/net/ethernet/marvell/skge.c skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD); q 2509 drivers/net/ethernet/marvell/skge.c static void skge_qset(struct skge_port *skge, u16 q, q 2520 drivers/net/ethernet/marvell/skge.c skge_write32(hw, Q_ADDR(q, Q_CSR), CSR_CLR_RESET); q 2521 drivers/net/ethernet/marvell/skge.c skge_write32(hw, Q_ADDR(q, Q_F), watermark); q 2522 drivers/net/ethernet/marvell/skge.c skge_write32(hw, Q_ADDR(q, Q_DA_H), (u32)(base >> 32)); q 2523 drivers/net/ethernet/marvell/skge.c skge_write32(hw, Q_ADDR(q, Q_DA_L), (u32)base); q 1036 drivers/net/ethernet/marvell/sky2.c static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space) q 1045 drivers/net/ethernet/marvell/sky2.c sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); q 1046 drivers/net/ethernet/marvell/sky2.c sky2_write32(hw, RB_ADDR(q, RB_START), start); q 1047 drivers/net/ethernet/marvell/sky2.c sky2_write32(hw, RB_ADDR(q, RB_END), end); q 1048 drivers/net/ethernet/marvell/sky2.c sky2_write32(hw, RB_ADDR(q, RB_WP), start); q 1049 drivers/net/ethernet/marvell/sky2.c sky2_write32(hw, RB_ADDR(q, RB_RP), start); q 1051 drivers/net/ethernet/marvell/sky2.c if (q == Q_R1 || q == Q_R2) { q 1058 drivers/net/ethernet/marvell/sky2.c sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp); q 1059 drivers/net/ethernet/marvell/sky2.c sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2); q 1062 drivers/net/ethernet/marvell/sky2.c sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp); q 1063 drivers/net/ethernet/marvell/sky2.c sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4); q 1068 drivers/net/ethernet/marvell/sky2.c sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD); q 1071 drivers/net/ethernet/marvell/sky2.c sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD); q 1072 drivers/net/ethernet/marvell/sky2.c sky2_read8(hw, RB_ADDR(q, RB_CTRL)); q 1076 drivers/net/ethernet/marvell/sky2.c static void sky2_qset(struct sky2_hw *hw, u16 q) q 1078 drivers/net/ethernet/marvell/sky2.c sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_RESET); q 1079 drivers/net/ethernet/marvell/sky2.c sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_OPER_INIT); q 1080 drivers/net/ethernet/marvell/sky2.c sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_FIFO_OP_ON); q 1081 drivers/net/ethernet/marvell/sky2.c sky2_write32(hw, Q_ADDR(q, Q_WM), BMU_WM_DEFAULT); q 1125 drivers/net/ethernet/marvell/sky2.c static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, u16 idx) q 1129 drivers/net/ethernet/marvell/sky2.c sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx); q 2915 drivers/net/ethernet/marvell/sky2.c static void sky2_le_error(struct sky2_hw *hw, unsigned port, u16 q) q 2918 drivers/net/ethernet/marvell/sky2.c u16 idx = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX)); q 2921 drivers/net/ethernet/marvell/sky2.c dev->name, (unsigned) q, (unsigned) idx, q 2922 drivers/net/ethernet/marvell/sky2.c (unsigned) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX))); q 2924 drivers/net/ethernet/marvell/sky2.c sky2_write32(hw, Q_ADDR(q, Q_CSR), BMU_CLR_IRQ_CHK); q 788 drivers/net/ethernet/marvell/sky2.h #define Y2_QADDR(q,reg) (Y2_B8_PREF_REGS + (q) + (reg)) q 98 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci_queue *q; q 131 drivers/net/ethernet/mellanox/mlxsw/pci.c static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q) q 133 drivers/net/ethernet/mellanox/mlxsw/pci.c tasklet_schedule(&q->tasklet); q 136 drivers/net/ethernet/mellanox/mlxsw/pci.c static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, q 139 drivers/net/ethernet/mellanox/mlxsw/pci.c return q->mem_item.buf + (elem_size * elem_index); q 143 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index) q 145 drivers/net/ethernet/mellanox/mlxsw/pci.c return &q->elem_info[elem_index]; q 149 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q) q 151 drivers/net/ethernet/mellanox/mlxsw/pci.c int index = q->producer_counter & (q->count - 1); q 153 drivers/net/ethernet/mellanox/mlxsw/pci.c if ((u16) (q->producer_counter - q->consumer_counter) == q->count) q 155 drivers/net/ethernet/mellanox/mlxsw/pci.c return mlxsw_pci_queue_elem_info_get(q, index); q 159 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q) q 161 drivers/net/ethernet/mellanox/mlxsw/pci.c int index = q->consumer_counter & (q->count - 1); q 163 drivers/net/ethernet/mellanox/mlxsw/pci.c return mlxsw_pci_queue_elem_info_get(q, index); q 166 drivers/net/ethernet/mellanox/mlxsw/pci.c static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index) q 168 drivers/net/ethernet/mellanox/mlxsw/pci.c return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem; q 171 drivers/net/ethernet/mellanox/mlxsw/pci.c static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit) q 173 drivers/net/ethernet/mellanox/mlxsw/pci.c return owner_bit != !!(q->consumer_counter & q->count); q 206 drivers/net/ethernet/mellanox/mlxsw/pci.c return &mlxsw_pci->queues[q_type].q[q_num]; q 236 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci_queue *q, q 241 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_pci_doorbell_type_offset[q->type], q 242 drivers/net/ethernet/mellanox/mlxsw/pci.c q->num), val); q 246 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci_queue *q, q 251 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_pci_doorbell_arm_type_offset[q->type], q 252 drivers/net/ethernet/mellanox/mlxsw/pci.c q->num), val); q 256 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci_queue *q) q 259 drivers/net/ethernet/mellanox/mlxsw/pci.c __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter); q 263 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci_queue *q) q 266 drivers/net/ethernet/mellanox/mlxsw/pci.c __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q 267 drivers/net/ethernet/mellanox/mlxsw/pci.c q->consumer_counter + q->count); q 272 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci_queue *q) q 275 drivers/net/ethernet/mellanox/mlxsw/pci.c __mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter); q 278 drivers/net/ethernet/mellanox/mlxsw/pci.c static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q, q 281 drivers/net/ethernet/mellanox/mlxsw/pci.c return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index; q 285 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci_queue *q) q 290 drivers/net/ethernet/mellanox/mlxsw/pci.c q->producer_counter = 0; q 291 drivers/net/ethernet/mellanox/mlxsw/pci.c q->consumer_counter = 0; q 294 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num); q 298 drivers/net/ethernet/mellanox/mlxsw/pci.c dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); q 303 drivers/net/ethernet/mellanox/mlxsw/pci.c err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num); q 306 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); q 311 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci_queue *q) q 313 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num); q 387 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci_queue *q) q 394 drivers/net/ethernet/mellanox/mlxsw/pci.c q->producer_counter = 0; q 395 drivers/net/ethernet/mellanox/mlxsw/pci.c q->consumer_counter = 0; q 400 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num); q 403 drivers/net/ethernet/mellanox/mlxsw/pci.c dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); q 408 drivers/net/ethernet/mellanox/mlxsw/pci.c err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num); q 412 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); q 414 drivers/net/ethernet/mellanox/mlxsw/pci.c for (i = 0; i < q->count; i++) { q 415 drivers/net/ethernet/mellanox/mlxsw/pci.c elem_info = mlxsw_pci_queue_elem_info_producer_get(q); q 421 drivers/net/ethernet/mellanox/mlxsw/pci.c q->producer_counter++; q 422 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); q 429 drivers/net/ethernet/mellanox/mlxsw/pci.c elem_info = mlxsw_pci_queue_elem_info_get(q, i); q 432 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num); q 438 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci_queue *q) q 443 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num); q 444 drivers/net/ethernet/mellanox/mlxsw/pci.c for (i = 0; i < q->count; i++) { q 445 drivers/net/ethernet/mellanox/mlxsw/pci.c elem_info = mlxsw_pci_queue_elem_info_get(q, i); q 451 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci_queue *q) q 453 drivers/net/ethernet/mellanox/mlxsw/pci.c q->u.cq.v = mlxsw_pci->max_cqe_ver; q 456 drivers/net/ethernet/mellanox/mlxsw/pci.c if (q->u.cq.v == MLXSW_PCI_CQE_V2 && q 457 drivers/net/ethernet/mellanox/mlxsw/pci.c q->num < mlxsw_pci->num_sdq_cqs) q 458 drivers/net/ethernet/mellanox/mlxsw/pci.c q->u.cq.v = MLXSW_PCI_CQE_V1; q 462 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci_queue *q) q 467 drivers/net/ethernet/mellanox/mlxsw/pci.c q->consumer_counter = 0; q 469 drivers/net/ethernet/mellanox/mlxsw/pci.c for (i = 0; i < q->count; i++) { q 470 drivers/net/ethernet/mellanox/mlxsw/pci.c char *elem = mlxsw_pci_queue_elem_get(q, i); q 472 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1); q 475 drivers/net/ethernet/mellanox/mlxsw/pci.c if (q->u.cq.v == MLXSW_PCI_CQE_V1) q 478 drivers/net/ethernet/mellanox/mlxsw/pci.c else if (q->u.cq.v == MLXSW_PCI_CQE_V2) q 484 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count)); q 486 drivers/net/ethernet/mellanox/mlxsw/pci.c dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); q 490 drivers/net/ethernet/mellanox/mlxsw/pci.c err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num); q 493 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); q 494 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); q 499 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci_queue *q) q 501 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num); q 505 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci_queue *q, q 516 drivers/net/ethernet/mellanox/mlxsw/pci.c spin_lock(&q->lock); q 517 drivers/net/ethernet/mellanox/mlxsw/pci.c elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); q 535 drivers/net/ethernet/mellanox/mlxsw/pci.c if (q->consumer_counter++ != consumer_counter_limit) q 537 drivers/net/ethernet/mellanox/mlxsw/pci.c spin_unlock(&q->lock); q 541 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci_queue *q, q 553 drivers/net/ethernet/mellanox/mlxsw/pci.c elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); q 560 drivers/net/ethernet/mellanox/mlxsw/pci.c if (q->consumer_counter++ != consumer_counter_limit) q 581 drivers/net/ethernet/mellanox/mlxsw/pci.c memset(wqe, 0, q->elem_size); q 586 drivers/net/ethernet/mellanox/mlxsw/pci.c q->producer_counter++; q 587 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); q 591 drivers/net/ethernet/mellanox/mlxsw/pci.c static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q) q 597 drivers/net/ethernet/mellanox/mlxsw/pci.c elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); q 599 drivers/net/ethernet/mellanox/mlxsw/pci.c owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem); q 600 drivers/net/ethernet/mellanox/mlxsw/pci.c if (mlxsw_pci_elem_hw_owned(q, owner_bit)) q 602 drivers/net/ethernet/mellanox/mlxsw/pci.c q->consumer_counter++; q 609 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data; q 610 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci *mlxsw_pci = q->pci; q 613 drivers/net/ethernet/mellanox/mlxsw/pci.c int credits = q->count >> 1; q 615 drivers/net/ethernet/mellanox/mlxsw/pci.c while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) { q 617 drivers/net/ethernet/mellanox/mlxsw/pci.c u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe); q 618 drivers/net/ethernet/mellanox/mlxsw/pci.c u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe); q 621 drivers/net/ethernet/mellanox/mlxsw/pci.c memcpy(ncqe, cqe, q->elem_size); q 622 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); q 630 drivers/net/ethernet/mellanox/mlxsw/pci.c q->u.cq.comp_sdq_count++; q 636 drivers/net/ethernet/mellanox/mlxsw/pci.c wqe_counter, q->u.cq.v, ncqe); q 637 drivers/net/ethernet/mellanox/mlxsw/pci.c q->u.cq.comp_rdq_count++; q 643 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); q 646 drivers/net/ethernet/mellanox/mlxsw/pci.c static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q) q 648 drivers/net/ethernet/mellanox/mlxsw/pci.c return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT : q 652 drivers/net/ethernet/mellanox/mlxsw/pci.c static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q) q 654 drivers/net/ethernet/mellanox/mlxsw/pci.c return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE : q 659 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci_queue *q) q 664 drivers/net/ethernet/mellanox/mlxsw/pci.c q->consumer_counter = 0; q 666 drivers/net/ethernet/mellanox/mlxsw/pci.c for (i = 0; i < q->count; i++) { q 667 drivers/net/ethernet/mellanox/mlxsw/pci.c char *elem = mlxsw_pci_queue_elem_get(q, i); q 674 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count)); q 676 drivers/net/ethernet/mellanox/mlxsw/pci.c dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); q 680 drivers/net/ethernet/mellanox/mlxsw/pci.c err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num); q 683 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); q 684 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); q 689 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci_queue *q) q 691 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num); q 704 drivers/net/ethernet/mellanox/mlxsw/pci.c static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q) q 710 drivers/net/ethernet/mellanox/mlxsw/pci.c elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); q 713 drivers/net/ethernet/mellanox/mlxsw/pci.c if (mlxsw_pci_elem_hw_owned(q, owner_bit)) q 715 drivers/net/ethernet/mellanox/mlxsw/pci.c q->consumer_counter++; q 722 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data; q 723 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci *mlxsw_pci = q->pci; q 730 drivers/net/ethernet/mellanox/mlxsw/pci.c int credits = q->count >> 1; q 734 drivers/net/ethernet/mellanox/mlxsw/pci.c while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) { q 740 drivers/net/ethernet/mellanox/mlxsw/pci.c switch (q->num) { q 743 drivers/net/ethernet/mellanox/mlxsw/pci.c q->u.eq.ev_cmd_count++; q 749 drivers/net/ethernet/mellanox/mlxsw/pci.c q->u.eq.ev_comp_count++; q 752 drivers/net/ethernet/mellanox/mlxsw/pci.c q->u.eq.ev_other_count++; q 758 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); q 759 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); q 765 drivers/net/ethernet/mellanox/mlxsw/pci.c q = mlxsw_pci_cq_get(mlxsw_pci, cqn); q 766 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_pci_queue_tasklet_schedule(q); q 774 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci_queue *q); q 776 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci_queue *q); q 778 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci_queue *q); q 780 drivers/net/ethernet/mellanox/mlxsw/pci.c u16 (*elem_count_f)(const struct mlxsw_pci_queue *q); q 781 drivers/net/ethernet/mellanox/mlxsw/pci.c u8 (*elem_size_f)(const struct mlxsw_pci_queue *q); q 823 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci_queue *q, u8 q_num) q 825 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci_mem_item *mem_item = &q->mem_item; q 829 drivers/net/ethernet/mellanox/mlxsw/pci.c q->num = q_num; q 831 drivers/net/ethernet/mellanox/mlxsw/pci.c q_ops->pre_init(mlxsw_pci, q); q 833 drivers/net/ethernet/mellanox/mlxsw/pci.c spin_lock_init(&q->lock); q 834 drivers/net/ethernet/mellanox/mlxsw/pci.c q->count = q_ops->elem_count_f ? q_ops->elem_count_f(q) : q 836 drivers/net/ethernet/mellanox/mlxsw/pci.c q->elem_size = q_ops->elem_size_f ? q_ops->elem_size_f(q) : q 838 drivers/net/ethernet/mellanox/mlxsw/pci.c q->type = q_ops->type; q 839 drivers/net/ethernet/mellanox/mlxsw/pci.c q->pci = mlxsw_pci; q 842 drivers/net/ethernet/mellanox/mlxsw/pci.c tasklet_init(&q->tasklet, q_ops->tasklet, (unsigned long) q); q 851 drivers/net/ethernet/mellanox/mlxsw/pci.c q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL); q 852 drivers/net/ethernet/mellanox/mlxsw/pci.c if (!q->elem_info) { q 860 drivers/net/ethernet/mellanox/mlxsw/pci.c for (i = 0; i < q->count; i++) { q 863 drivers/net/ethernet/mellanox/mlxsw/pci.c elem_info = mlxsw_pci_queue_elem_info_get(q, i); q 865 drivers/net/ethernet/mellanox/mlxsw/pci.c __mlxsw_pci_queue_elem_get(q, q->elem_size, i); q 869 drivers/net/ethernet/mellanox/mlxsw/pci.c err = q_ops->init(mlxsw_pci, mbox, q); q 875 drivers/net/ethernet/mellanox/mlxsw/pci.c kfree(q->elem_info); q 884 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci_queue *q) q 886 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci_mem_item *mem_item = &q->mem_item; q 888 drivers/net/ethernet/mellanox/mlxsw/pci.c q_ops->fini(mlxsw_pci, q); q 889 drivers/net/ethernet/mellanox/mlxsw/pci.c kfree(q->elem_info); q 903 drivers/net/ethernet/mellanox/mlxsw/pci.c queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL); q 904 drivers/net/ethernet/mellanox/mlxsw/pci.c if (!queue_group->q) q 909 drivers/net/ethernet/mellanox/mlxsw/pci.c &queue_group->q[i], i); q 919 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]); q 920 drivers/net/ethernet/mellanox/mlxsw/pci.c kfree(queue_group->q); q 932 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]); q 933 drivers/net/ethernet/mellanox/mlxsw/pci.c kfree(queue_group->q); q 1285 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci_queue *q; q 1289 drivers/net/ethernet/mellanox/mlxsw/pci.c q = mlxsw_pci_eq_get(mlxsw_pci, i); q 1290 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_pci_queue_tasklet_schedule(q); q 1560 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info); q 1562 drivers/net/ethernet/mellanox/mlxsw/pci.c return !mlxsw_pci_queue_elem_info_producer_get(q); q 1569 drivers/net/ethernet/mellanox/mlxsw/pci.c struct mlxsw_pci_queue *q; q 1581 drivers/net/ethernet/mellanox/mlxsw/pci.c q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info); q 1582 drivers/net/ethernet/mellanox/mlxsw/pci.c spin_lock_bh(&q->lock); q 1583 drivers/net/ethernet/mellanox/mlxsw/pci.c elem_info = mlxsw_pci_queue_elem_info_producer_get(q); q 1621 drivers/net/ethernet/mellanox/mlxsw/pci.c q->producer_counter++; q 1622 drivers/net/ethernet/mellanox/mlxsw/pci.c mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); q 1630 drivers/net/ethernet/mellanox/mlxsw/pci.c spin_unlock_bh(&q->lock); q 670 drivers/net/ethernet/mscc/ocelot_ace.c struct list_head *pos, *q; q 672 drivers/net/ethernet/mscc/ocelot_ace.c list_for_each_safe(pos, q, &block->rules) { q 573 drivers/net/ethernet/netronome/nfp/flower/cmsg.h u8 vnic, u8 q) q 578 drivers/net/ethernet/netronome/nfp/flower/cmsg.h FIELD_PREP(NFP_FLOWER_CMSG_PORT_PCIE_Q, q) | q 784 drivers/net/ethernet/netronome/nfp/nfp_net.h static inline void _nfp_qcp_ptr_add(u8 __iomem *q, q 795 drivers/net/ethernet/netronome/nfp/nfp_net.h writel(NFP_QCP_MAX_ADD, q + off); q 799 drivers/net/ethernet/netronome/nfp/nfp_net.h writel(val, q + off); q 810 drivers/net/ethernet/netronome/nfp/nfp_net.h static inline void nfp_qcp_rd_ptr_add(u8 __iomem *q, u32 val) q 812 drivers/net/ethernet/netronome/nfp/nfp_net.h _nfp_qcp_ptr_add(q, NFP_QCP_READ_PTR, val); q 823 drivers/net/ethernet/netronome/nfp/nfp_net.h static inline void nfp_qcp_wr_ptr_add(u8 __iomem *q, u32 val) q 825 drivers/net/ethernet/netronome/nfp/nfp_net.h _nfp_qcp_ptr_add(q, NFP_QCP_WRITE_PTR, val); q 828 drivers/net/ethernet/netronome/nfp/nfp_net.h static inline u32 _nfp_qcp_read(u8 __iomem *q, enum nfp_qcp_ptr ptr) q 838 drivers/net/ethernet/netronome/nfp/nfp_net.h val = readl(q + off); q 852 drivers/net/ethernet/netronome/nfp/nfp_net.h static inline u32 nfp_qcp_rd_ptr_read(u8 __iomem *q) q 854 drivers/net/ethernet/netronome/nfp/nfp_net.h return _nfp_qcp_read(q, NFP_QCP_READ_PTR); q 863 drivers/net/ethernet/netronome/nfp/nfp_net.h static inline u32 nfp_qcp_wr_ptr_read(u8 __iomem *q) q 865 drivers/net/ethernet/netronome/nfp/nfp_net.h return _nfp_qcp_read(q, NFP_QCP_WRITE_PTR); q 77 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c struct ionic_queue *q = seq->private; q 79 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c seq_printf(seq, "%d\n", q->tail->index); q 87 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c struct ionic_queue *q = seq->private; q 89 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c seq_printf(seq, "%d\n", q->head->index); q 121 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c struct ionic_queue *q = &qcq->q; q 124 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c qcq->dentry = debugfs_create_dir(q->name, lif->dentry); q 131 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c debugfs_create_u32("index", 0400, q_dentry, &q->index); q 132 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c debugfs_create_x64("base_pa", 0400, q_dentry, &q->base_pa); q 135 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c &q->sg_base_pa); q 137 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c &q->sg_desc_size); q 139 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c debugfs_create_u32("num_descs", 0400, q_dentry, &q->num_descs); q 140 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c debugfs_create_u32("desc_size", 0400, q_dentry, &q->desc_size); q 141 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c debugfs_create_u32("pid", 0400, q_dentry, &q->pid); q 142 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c debugfs_create_u32("qid", 0400, q_dentry, &q->hw_index); q 143 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c debugfs_create_u32("qtype", 0400, q_dentry, &q->hw_type); q 144 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c debugfs_create_u64("drop", 0400, q_dentry, &q->drop); q 145 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c debugfs_create_u64("stop", 0400, q_dentry, &q->stop); q 146 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c debugfs_create_u64("wake", 0400, q_dentry, &q->wake); q 148 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c debugfs_create_file("tail", 0400, q_dentry, q, &q_tail_fops); q 149 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c debugfs_create_file("head", 0400, q_dentry, q, &q_head_fops); q 154 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c desc_blob->data = q->base; q 155 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c desc_blob->size = (unsigned long)q->num_descs * q->desc_size; q 162 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c desc_blob->data = q->sg_base; q 163 drivers/net/ethernet/pensando/ionic/ionic_debugfs.c desc_blob->size = (unsigned long)q->num_descs * q->sg_desc_size; q 268 drivers/net/ethernet/pensando/ionic/ionic_dev.c struct ionic_queue *q = &qcq->q; q 274 drivers/net/ethernet/pensando/ionic/ionic_dev.c .q_init.type = q->type, q 275 drivers/net/ethernet/pensando/ionic/ionic_dev.c .q_init.index = cpu_to_le32(q->index), q 278 drivers/net/ethernet/pensando/ionic/ionic_dev.c .q_init.pid = cpu_to_le16(q->pid), q 280 drivers/net/ethernet/pensando/ionic/ionic_dev.c .q_init.ring_size = ilog2(q->num_descs), q 281 drivers/net/ethernet/pensando/ionic/ionic_dev.c .q_init.ring_base = cpu_to_le64(q->base_pa), q 343 drivers/net/ethernet/pensando/ionic/ionic_dev.c void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q) q 345 drivers/net/ethernet/pensando/ionic/ionic_dev.c cq->bound_q = q; q 374 drivers/net/ethernet/pensando/ionic/ionic_dev.c struct ionic_queue *q, unsigned int index, const char *name, q 389 drivers/net/ethernet/pensando/ionic/ionic_dev.c q->lif = lif; q 390 drivers/net/ethernet/pensando/ionic/ionic_dev.c q->idev = idev; q 391 drivers/net/ethernet/pensando/ionic/ionic_dev.c q->index = index; q 392 drivers/net/ethernet/pensando/ionic/ionic_dev.c q->num_descs = num_descs; q 393 drivers/net/ethernet/pensando/ionic/ionic_dev.c q->desc_size = desc_size; q 394 drivers/net/ethernet/pensando/ionic/ionic_dev.c q->sg_desc_size = sg_desc_size; q 395 drivers/net/ethernet/pensando/ionic/ionic_dev.c q->tail = q->info; q 396 drivers/net/ethernet/pensando/ionic/ionic_dev.c q->head = q->tail; q 397 drivers/net/ethernet/pensando/ionic/ionic_dev.c q->pid = pid; q 399 drivers/net/ethernet/pensando/ionic/ionic_dev.c snprintf(q->name, sizeof(q->name), "L%d-%s%u", lif->index, name, index); q 401 drivers/net/ethernet/pensando/ionic/ionic_dev.c cur = q->info; q 405 drivers/net/ethernet/pensando/ionic/ionic_dev.c cur->next = q->info; q 416 drivers/net/ethernet/pensando/ionic/ionic_dev.c void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa) q 421 drivers/net/ethernet/pensando/ionic/ionic_dev.c q->base = base; q 422 drivers/net/ethernet/pensando/ionic/ionic_dev.c q->base_pa = base_pa; q 424 drivers/net/ethernet/pensando/ionic/ionic_dev.c for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) q 425 drivers/net/ethernet/pensando/ionic/ionic_dev.c cur->desc = base + (i * q->desc_size); q 428 drivers/net/ethernet/pensando/ionic/ionic_dev.c void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa) q 433 drivers/net/ethernet/pensando/ionic/ionic_dev.c q->sg_base = base; q 434 drivers/net/ethernet/pensando/ionic/ionic_dev.c q->sg_base_pa = base_pa; q 436 drivers/net/ethernet/pensando/ionic/ionic_dev.c for (i = 0, cur = q->info; i < q->num_descs; i++, cur++) q 437 drivers/net/ethernet/pensando/ionic/ionic_dev.c cur->sg_desc = base + (i * q->sg_desc_size); q 440 drivers/net/ethernet/pensando/ionic/ionic_dev.c void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb, q 443 drivers/net/ethernet/pensando/ionic/ionic_dev.c struct device *dev = q->lif->ionic->dev; q 444 drivers/net/ethernet/pensando/ionic/ionic_dev.c struct ionic_lif *lif = q->lif; q 446 drivers/net/ethernet/pensando/ionic/ionic_dev.c q->head->cb = cb; q 447 drivers/net/ethernet/pensando/ionic/ionic_dev.c q->head->cb_arg = cb_arg; q 448 drivers/net/ethernet/pensando/ionic/ionic_dev.c q->head = q->head->next; q 451 drivers/net/ethernet/pensando/ionic/ionic_dev.c q->lif->index, q->name, q->hw_type, q->hw_index, q 452 drivers/net/ethernet/pensando/ionic/ionic_dev.c q->head->index, ring_doorbell); q 455 drivers/net/ethernet/pensando/ionic/ionic_dev.c ionic_dbell_ring(lif->kern_dbpage, q->hw_type, q 456 drivers/net/ethernet/pensando/ionic/ionic_dev.c q->dbval | q->head->index); q 459 drivers/net/ethernet/pensando/ionic/ionic_dev.c static bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos) q 463 drivers/net/ethernet/pensando/ionic/ionic_dev.c mask = q->num_descs - 1; q 464 drivers/net/ethernet/pensando/ionic/ionic_dev.c tail = q->tail->index; q 465 drivers/net/ethernet/pensando/ionic/ionic_dev.c head = q->head->index; q 470 drivers/net/ethernet/pensando/ionic/ionic_dev.c void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info, q 478 drivers/net/ethernet/pensando/ionic/ionic_dev.c if (q->tail->index == q->head->index) q 482 drivers/net/ethernet/pensando/ionic/ionic_dev.c if (unlikely(!ionic_q_is_posted(q, stop_index))) q 483 drivers/net/ethernet/pensando/ionic/ionic_dev.c dev_err(q->lif->ionic->dev, q 485 drivers/net/ethernet/pensando/ionic/ionic_dev.c q->name, stop_index, q->tail->index, q->head->index); q 488 drivers/net/ethernet/pensando/ionic/ionic_dev.c desc_info = q->tail; q 489 drivers/net/ethernet/pensando/ionic/ionic_dev.c q->tail = desc_info->next; q 498 drivers/net/ethernet/pensando/ionic/ionic_dev.c cb(q, desc_info, cq_info, cb_arg); q 150 drivers/net/ethernet/pensando/ionic/ionic_dev.h typedef void (*ionic_desc_cb)(struct ionic_queue *q, q 228 drivers/net/ethernet/pensando/ionic/ionic_dev.h static inline unsigned int ionic_q_space_avail(struct ionic_queue *q) q 230 drivers/net/ethernet/pensando/ionic/ionic_dev.h unsigned int avail = q->tail->index; q 232 drivers/net/ethernet/pensando/ionic/ionic_dev.h if (q->head->index >= avail) q 233 drivers/net/ethernet/pensando/ionic/ionic_dev.h avail += q->head->left - 1; q 235 drivers/net/ethernet/pensando/ionic/ionic_dev.h avail -= q->head->index + 1; q 240 drivers/net/ethernet/pensando/ionic/ionic_dev.h static inline bool ionic_q_has_space(struct ionic_queue *q, unsigned int want) q 242 drivers/net/ethernet/pensando/ionic/ionic_dev.h return ionic_q_space_avail(q) >= want; q 280 drivers/net/ethernet/pensando/ionic/ionic_dev.h void ionic_cq_bind(struct ionic_cq *cq, struct ionic_queue *q); q 288 drivers/net/ethernet/pensando/ionic/ionic_dev.h struct ionic_queue *q, unsigned int index, const char *name, q 291 drivers/net/ethernet/pensando/ionic/ionic_dev.h void ionic_q_map(struct ionic_queue *q, void *base, dma_addr_t base_pa); q 292 drivers/net/ethernet/pensando/ionic/ionic_dev.h void ionic_q_sg_map(struct ionic_queue *q, void *base, dma_addr_t base_pa); q 293 drivers/net/ethernet/pensando/ionic/ionic_dev.h void ionic_q_post(struct ionic_queue *q, bool ring_doorbell, ionic_desc_cb cb, q 295 drivers/net/ethernet/pensando/ionic/ionic_dev.h void ionic_q_rewind(struct ionic_queue *q, struct ionic_desc_info *start); q 296 drivers/net/ethernet/pensando/ionic/ionic_dev.h void ionic_q_service(struct ionic_queue *q, struct ionic_cq_info *cq_info, q 137 drivers/net/ethernet/pensando/ionic/ionic_lif.c struct ionic_queue *q = &qcq->q; q 146 drivers/net/ethernet/pensando/ionic/ionic_lif.c "%s-%s-%s", IONIC_DRV_NAME, name, q->name); q 178 drivers/net/ethernet/pensando/ionic/ionic_lif.c struct ionic_queue *q = &qcq->q; q 179 drivers/net/ethernet/pensando/ionic/ionic_lif.c struct ionic_lif *lif = q->lif; q 188 drivers/net/ethernet/pensando/ionic/ionic_lif.c .type = q->type, q 189 drivers/net/ethernet/pensando/ionic/ionic_lif.c .index = cpu_to_le32(q->index), q 214 drivers/net/ethernet/pensando/ionic/ionic_lif.c struct ionic_queue *q = &qcq->q; q 215 drivers/net/ethernet/pensando/ionic/ionic_lif.c struct ionic_lif *lif = q->lif; q 224 drivers/net/ethernet/pensando/ionic/ionic_lif.c .type = q->type, q 225 drivers/net/ethernet/pensando/ionic/ionic_lif.c .index = cpu_to_le32(q->index), q 286 drivers/net/ethernet/pensando/ionic/ionic_lif.c devm_kfree(dev, qcq->q.info); q 287 drivers/net/ethernet/pensando/ionic/ionic_lif.c qcq->q.info = NULL; q 377 drivers/net/ethernet/pensando/ionic/ionic_lif.c new->q.info = devm_kzalloc(dev, sizeof(*new->q.info) * num_descs, q 379 drivers/net/ethernet/pensando/ionic/ionic_lif.c if (!new->q.info) { q 385 drivers/net/ethernet/pensando/ionic/ionic_lif.c new->q.type = type; q 387 drivers/net/ethernet/pensando/ionic/ionic_lif.c err = ionic_q_init(lif, idev, &new->q, index, name, num_descs, q 454 drivers/net/ethernet/pensando/ionic/ionic_lif.c ionic_q_sg_map(&new->q, sg_base, sg_base_pa); q 457 drivers/net/ethernet/pensando/ionic/ionic_lif.c ionic_q_map(&new->q, q_base, q_base_pa); q 459 drivers/net/ethernet/pensando/ionic/ionic_lif.c ionic_cq_bind(&new->cq, &new->q); q 556 drivers/net/ethernet/pensando/ionic/ionic_lif.c struct ionic_queue *q = &qcq->q; q 563 drivers/net/ethernet/pensando/ionic/ionic_lif.c .type = q->type, q 564 drivers/net/ethernet/pensando/ionic/ionic_lif.c .index = cpu_to_le32(q->index), q 567 drivers/net/ethernet/pensando/ionic/ionic_lif.c .intr_index = cpu_to_le16(lif->rxqcqs[q->index].qcq->intr.index), q 568 drivers/net/ethernet/pensando/ionic/ionic_lif.c .pid = cpu_to_le16(q->pid), q 569 drivers/net/ethernet/pensando/ionic/ionic_lif.c .ring_size = ilog2(q->num_descs), q 570 drivers/net/ethernet/pensando/ionic/ionic_lif.c .ring_base = cpu_to_le64(q->base_pa), q 572 drivers/net/ethernet/pensando/ionic/ionic_lif.c .sg_ring_base = cpu_to_le64(q->sg_base_pa), q 586 drivers/net/ethernet/pensando/ionic/ionic_lif.c q->hw_type = ctx.comp.q_init.hw_type; q 587 drivers/net/ethernet/pensando/ionic/ionic_lif.c q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); q 588 drivers/net/ethernet/pensando/ionic/ionic_lif.c q->dbval = IONIC_DBELL_QID(q->hw_index); q 590 drivers/net/ethernet/pensando/ionic/ionic_lif.c dev_dbg(dev, "txq->hw_type %d\n", q->hw_type); q 591 drivers/net/ethernet/pensando/ionic/ionic_lif.c dev_dbg(dev, "txq->hw_index %d\n", q->hw_index); q 603 drivers/net/ethernet/pensando/ionic/ionic_lif.c struct ionic_queue *q = &qcq->q; q 610 drivers/net/ethernet/pensando/ionic/ionic_lif.c .type = q->type, q 611 drivers/net/ethernet/pensando/ionic/ionic_lif.c .index = cpu_to_le32(q->index), q 614 drivers/net/ethernet/pensando/ionic/ionic_lif.c .pid = cpu_to_le16(q->pid), q 615 drivers/net/ethernet/pensando/ionic/ionic_lif.c .ring_size = ilog2(q->num_descs), q 616 drivers/net/ethernet/pensando/ionic/ionic_lif.c .ring_base = cpu_to_le64(q->base_pa), q 631 drivers/net/ethernet/pensando/ionic/ionic_lif.c q->hw_type = ctx.comp.q_init.hw_type; q 632 drivers/net/ethernet/pensando/ionic/ionic_lif.c q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); q 633 drivers/net/ethernet/pensando/ionic/ionic_lif.c q->dbval = IONIC_DBELL_QID(q->hw_index); q 635 drivers/net/ethernet/pensando/ionic/ionic_lif.c dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type); q 636 drivers/net/ethernet/pensando/ionic/ionic_lif.c dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index); q 659 drivers/net/ethernet/pensando/ionic/ionic_lif.c struct ionic_queue *q; q 663 drivers/net/ethernet/pensando/ionic/ionic_lif.c q = cq->bound_q; q 664 drivers/net/ethernet/pensando/ionic/ionic_lif.c lif = q->info[0].cb_arg; q 1416 drivers/net/ethernet/pensando/ionic/ionic_lif.c ionic_rx_empty(&lif->rxqcqs[i].qcq->q); q 1523 drivers/net/ethernet/pensando/ionic/ionic_lif.c ionic_rx_fill(&lif->rxqcqs[i].qcq->q); q 1835 drivers/net/ethernet/pensando/ionic/ionic_lif.c struct ionic_queue *q; q 1840 drivers/net/ethernet/pensando/ionic/ionic_lif.c q = &qcq->q; q 1852 drivers/net/ethernet/pensando/ionic/ionic_lif.c q->hw_type = comp.hw_type; q 1853 drivers/net/ethernet/pensando/ionic/ionic_lif.c q->hw_index = le32_to_cpu(comp.hw_index); q 1854 drivers/net/ethernet/pensando/ionic/ionic_lif.c q->dbval = IONIC_DBELL_QID(q->hw_index); q 1856 drivers/net/ethernet/pensando/ionic/ionic_lif.c dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type); q 1857 drivers/net/ethernet/pensando/ionic/ionic_lif.c dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index); q 1886 drivers/net/ethernet/pensando/ionic/ionic_lif.c struct ionic_queue *q = &qcq->q; q 1894 drivers/net/ethernet/pensando/ionic/ionic_lif.c .type = q->type, q 1895 drivers/net/ethernet/pensando/ionic/ionic_lif.c .index = cpu_to_le32(q->index), q 1899 drivers/net/ethernet/pensando/ionic/ionic_lif.c .pid = cpu_to_le16(q->pid), q 1900 drivers/net/ethernet/pensando/ionic/ionic_lif.c .ring_size = ilog2(q->num_descs), q 1901 drivers/net/ethernet/pensando/ionic/ionic_lif.c .ring_base = cpu_to_le64(q->base_pa), q 1914 drivers/net/ethernet/pensando/ionic/ionic_lif.c q->hw_type = ctx.comp.q_init.hw_type; q 1915 drivers/net/ethernet/pensando/ionic/ionic_lif.c q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index); q 1916 drivers/net/ethernet/pensando/ionic/ionic_lif.c q->dbval = IONIC_DBELL_QID(q->hw_index); q 1918 drivers/net/ethernet/pensando/ionic/ionic_lif.c dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type); q 1919 drivers/net/ethernet/pensando/ionic/ionic_lif.c dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index); q 1922 drivers/net/ethernet/pensando/ionic/ionic_lif.c q->info[0].cb_arg = lif; q 65 drivers/net/ethernet/pensando/ionic/ionic_lif.h struct ionic_queue q; q 80 drivers/net/ethernet/pensando/ionic/ionic_lif.h #define q_to_qcq(q) container_of(q, struct ionic_qcq, q) q 81 drivers/net/ethernet/pensando/ionic/ionic_lif.h #define q_to_tx_stats(q) (&q_to_qcq(q)->stats->tx) q 82 drivers/net/ethernet/pensando/ionic/ionic_lif.h #define q_to_rx_stats(q) (&q_to_qcq(q)->stats->rx) q 187 drivers/net/ethernet/pensando/ionic/ionic_lif.h #define lif_to_txq(lif, i) (&lif_to_txqcq((lif), i)->q) q 188 drivers/net/ethernet/pensando/ionic/ionic_lif.h #define lif_to_rxq(lif, i) (&lif_to_txqcq((lif), i)->q) q 252 drivers/net/ethernet/pensando/ionic/ionic_lif.h qcq->q.dbell_count += dbell; q 175 drivers/net/ethernet/pensando/ionic/ionic_main.c struct ionic_queue *adminq = &lif->adminqcq->q; q 214 drivers/net/ethernet/pensando/ionic/ionic_main.c static void ionic_adminq_cb(struct ionic_queue *q, q 226 drivers/net/ethernet/pensando/ionic/ionic_main.c dev = &q->lif->netdev->dev; q 239 drivers/net/ethernet/pensando/ionic/ionic_main.c struct ionic_queue *adminq = &lif->adminqcq->q; q 253 drivers/net/ethernet/pensando/ionic/ionic_stats.c **buf = IONIC_READ_STAT64(&txqcq->q, q 13 drivers/net/ethernet/pensando/ionic/ionic_txrx.c static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info, q 16 drivers/net/ethernet/pensando/ionic/ionic_txrx.c static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell, q 19 drivers/net/ethernet/pensando/ionic/ionic_txrx.c DEBUG_STATS_TXQ_POST(q_to_qcq(q), q->head->desc, ring_dbell); q 21 drivers/net/ethernet/pensando/ionic/ionic_txrx.c ionic_q_post(q, ring_dbell, cb_func, cb_arg); q 24 drivers/net/ethernet/pensando/ionic/ionic_txrx.c static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell, q 27 drivers/net/ethernet/pensando/ionic/ionic_txrx.c ionic_q_post(q, ring_dbell, cb_func, cb_arg); q 29 drivers/net/ethernet/pensando/ionic/ionic_txrx.c DEBUG_STATS_RX_BUFF_CNT(q_to_qcq(q)); q 32 drivers/net/ethernet/pensando/ionic/ionic_txrx.c static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q) q 34 drivers/net/ethernet/pensando/ionic/ionic_txrx.c return netdev_get_tx_queue(q->lif->netdev, q->index); q 37 drivers/net/ethernet/pensando/ionic/ionic_txrx.c static void ionic_rx_recycle(struct ionic_queue *q, struct ionic_desc_info *desc_info, q 41 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_rxq_desc *new = q->head->desc; q 46 drivers/net/ethernet/pensando/ionic/ionic_txrx.c ionic_rxq_post(q, true, ionic_rx_clean, skb); q 49 drivers/net/ethernet/pensando/ionic/ionic_txrx.c static bool ionic_rx_copybreak(struct ionic_queue *q, struct ionic_desc_info *desc_info, q 54 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct net_device *netdev = q->lif->netdev; q 55 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct device *dev = q->lif->ionic->dev; q 61 drivers/net/ethernet/pensando/ionic/ionic_txrx.c if (clen > q->lif->rx_copybreak) { q 79 drivers/net/ethernet/pensando/ionic/ionic_txrx.c ionic_rx_recycle(q, desc_info, *skb); q 85 drivers/net/ethernet/pensando/ionic/ionic_txrx.c static void ionic_rx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info, q 89 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_qcq *qcq = q_to_qcq(q); q 94 drivers/net/ethernet/pensando/ionic/ionic_txrx.c stats = q_to_rx_stats(q); q 95 drivers/net/ethernet/pensando/ionic/ionic_txrx.c netdev = q->lif->netdev; q 98 drivers/net/ethernet/pensando/ionic/ionic_txrx.c ionic_rx_recycle(q, desc_info, skb); q 102 drivers/net/ethernet/pensando/ionic/ionic_txrx.c if (unlikely(test_bit(IONIC_LIF_QUEUE_RESET, q->lif->state))) { q 104 drivers/net/ethernet/pensando/ionic/ionic_txrx.c ionic_rx_recycle(q, desc_info, skb); q 111 drivers/net/ethernet/pensando/ionic/ionic_txrx.c ionic_rx_copybreak(q, desc_info, cq_info, &skb); q 116 drivers/net/ethernet/pensando/ionic/ionic_txrx.c skb_record_rx_queue(skb, q->index); q 162 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_queue *q = cq->bound_q; q 169 drivers/net/ethernet/pensando/ionic/ionic_txrx.c if (q->tail->index == q->head->index) q 172 drivers/net/ethernet/pensando/ionic/ionic_txrx.c desc_info = q->tail; q 176 drivers/net/ethernet/pensando/ionic/ionic_txrx.c q->tail = desc_info->next; q 179 drivers/net/ethernet/pensando/ionic/ionic_txrx.c ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg); q 216 drivers/net/ethernet/pensando/ionic/ionic_txrx.c static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q, unsigned int len, q 219 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_lif *lif = q->lif; q 227 drivers/net/ethernet/pensando/ionic/ionic_txrx.c stats = q_to_rx_stats(q); q 231 drivers/net/ethernet/pensando/ionic/ionic_txrx.c netdev->name, q->name); q 240 drivers/net/ethernet/pensando/ionic/ionic_txrx.c netdev->name, q->name); q 250 drivers/net/ethernet/pensando/ionic/ionic_txrx.c void ionic_rx_fill(struct ionic_queue *q) q 252 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct net_device *netdev = q->lif->netdev; q 262 drivers/net/ethernet/pensando/ionic/ionic_txrx.c for (i = ionic_q_space_avail(q); i; i--) { q 263 drivers/net/ethernet/pensando/ionic/ionic_txrx.c skb = ionic_rx_skb_alloc(q, len, &dma_addr); q 267 drivers/net/ethernet/pensando/ionic/ionic_txrx.c desc = q->head->desc; q 272 drivers/net/ethernet/pensando/ionic/ionic_txrx.c ring_doorbell = ((q->head->index + 1) & q 275 drivers/net/ethernet/pensando/ionic/ionic_txrx.c ionic_rxq_post(q, ring_doorbell, ionic_rx_clean, skb); q 284 drivers/net/ethernet/pensando/ionic/ionic_txrx.c void ionic_rx_empty(struct ionic_queue *q) q 286 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct device *dev = q->lif->ionic->dev; q 290 drivers/net/ethernet/pensando/ionic/ionic_txrx.c for (cur = q->tail; cur != q->head; cur = cur->next) { q 337 drivers/net/ethernet/pensando/ionic/ionic_txrx.c static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, void *data, size_t len) q 339 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_tx_stats *stats = q_to_tx_stats(q); q 340 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct device *dev = q->lif->ionic->dev; q 346 drivers/net/ethernet/pensando/ionic/ionic_txrx.c q->lif->netdev->name, q->name); q 353 drivers/net/ethernet/pensando/ionic/ionic_txrx.c static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, const skb_frag_t *frag, q 356 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_tx_stats *stats = q_to_tx_stats(q); q 357 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct device *dev = q->lif->ionic->dev; q 363 drivers/net/ethernet/pensando/ionic/ionic_txrx.c q->lif->netdev->name, q->name); q 369 drivers/net/ethernet/pensando/ionic/ionic_txrx.c static void ionic_tx_clean(struct ionic_queue *q, struct ionic_desc_info *desc_info, q 374 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_tx_stats *stats = q_to_tx_stats(q); q 376 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct device *dev = q->lif->ionic->dev; q 405 drivers/net/ethernet/pensando/ionic/ionic_txrx.c if (unlikely(__netif_subqueue_stopped(q->lif->netdev, q 407 drivers/net/ethernet/pensando/ionic/ionic_txrx.c netif_wake_subqueue(q->lif->netdev, queue_index); q 408 drivers/net/ethernet/pensando/ionic/ionic_txrx.c q->wake++; q 412 drivers/net/ethernet/pensando/ionic/ionic_txrx.c netdev_tx_completed_queue(q_to_ndq(q), 1, len); q 420 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_queue *q = cq->bound_q; q 432 drivers/net/ethernet/pensando/ionic/ionic_txrx.c desc_info = q->tail; q 433 drivers/net/ethernet/pensando/ionic/ionic_txrx.c q->tail = desc_info->next; q 434 drivers/net/ethernet/pensando/ionic/ionic_txrx.c ionic_tx_clean(q, desc_info, cq->tail, q 503 drivers/net/ethernet/pensando/ionic/ionic_txrx.c static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, q 528 drivers/net/ethernet/pensando/ionic/ionic_txrx.c netdev_tx_sent_queue(q_to_ndq(q), skb->len); q 529 drivers/net/ethernet/pensando/ionic/ionic_txrx.c ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb); q 531 drivers/net/ethernet/pensando/ionic/ionic_txrx.c ionic_txq_post(q, false, ionic_tx_clean, NULL); q 535 drivers/net/ethernet/pensando/ionic/ionic_txrx.c static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q, q 538 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc; q 539 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_txq_desc *desc = q->head->desc; q 545 drivers/net/ethernet/pensando/ionic/ionic_txrx.c static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb) q 547 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_tx_stats *stats = q_to_tx_stats(q); q 548 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_desc_info *abort = q->head; q 549 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct device *dev = q->lif->ionic->dev; q 605 drivers/net/ethernet/pensando/ionic/ionic_txrx.c desc = ionic_tx_tso_next(q, &elem); q 613 drivers/net/ethernet/pensando/ionic/ionic_txrx.c desc_addr = ionic_tx_map_single(q, skb->data + offset, len); q 623 drivers/net/ethernet/pensando/ionic/ionic_txrx.c ionic_tx_tso_post(q, desc, skb, q 631 drivers/net/ethernet/pensando/ionic/ionic_txrx.c desc = ionic_tx_tso_next(q, &elem); q 650 drivers/net/ethernet/pensando/ionic/ionic_txrx.c cpu_to_le64(ionic_tx_map_frag(q, frag, q 662 drivers/net/ethernet/pensando/ionic/ionic_txrx.c ionic_tx_tso_post(q, desc, skb, desc_addr, q 669 drivers/net/ethernet/pensando/ionic/ionic_txrx.c desc = ionic_tx_tso_next(q, &elem); q 674 drivers/net/ethernet/pensando/ionic/ionic_txrx.c desc_addr = ionic_tx_map_frag(q, frag, q 685 drivers/net/ethernet/pensando/ionic/ionic_txrx.c ionic_tx_tso_post(q, desc, skb, desc_addr, q 692 drivers/net/ethernet/pensando/ionic/ionic_txrx.c desc = ionic_tx_tso_next(q, &elem); q 705 drivers/net/ethernet/pensando/ionic/ionic_txrx.c while (rewind->desc != q->head->desc) { q 706 drivers/net/ethernet/pensando/ionic/ionic_txrx.c ionic_tx_clean(q, rewind, NULL, NULL); q 709 drivers/net/ethernet/pensando/ionic/ionic_txrx.c q->head = abort; q 714 drivers/net/ethernet/pensando/ionic/ionic_txrx.c static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb) q 716 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_tx_stats *stats = q_to_tx_stats(q); q 717 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_txq_desc *desc = q->head->desc; q 718 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct device *dev = q->lif->ionic->dev; q 728 drivers/net/ethernet/pensando/ionic/ionic_txrx.c dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb)); q 751 drivers/net/ethernet/pensando/ionic/ionic_txrx.c static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb) q 753 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_tx_stats *stats = q_to_tx_stats(q); q 754 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_txq_desc *desc = q->head->desc; q 755 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct device *dev = q->lif->ionic->dev; q 765 drivers/net/ethernet/pensando/ionic/ionic_txrx.c dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb)); q 783 drivers/net/ethernet/pensando/ionic/ionic_txrx.c static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb) q 785 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_txq_sg_desc *sg_desc = q->head->sg_desc; q 788 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_tx_stats *stats = q_to_tx_stats(q); q 789 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct device *dev = q->lif->ionic->dev; q 797 drivers/net/ethernet/pensando/ionic/ionic_txrx.c dma_addr = ionic_tx_map_frag(q, frag, 0, len); q 808 drivers/net/ethernet/pensando/ionic/ionic_txrx.c static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb) q 810 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_tx_stats *stats = q_to_tx_stats(q); q 815 drivers/net/ethernet/pensando/ionic/ionic_txrx.c err = ionic_tx_calc_csum(q, skb); q 817 drivers/net/ethernet/pensando/ionic/ionic_txrx.c err = ionic_tx_calc_no_csum(q, skb); q 822 drivers/net/ethernet/pensando/ionic/ionic_txrx.c err = ionic_tx_skb_frags(q, skb); q 830 drivers/net/ethernet/pensando/ionic/ionic_txrx.c netdev_tx_sent_queue(q_to_ndq(q), skb->len); q 831 drivers/net/ethernet/pensando/ionic/ionic_txrx.c ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb); q 836 drivers/net/ethernet/pensando/ionic/ionic_txrx.c static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb) q 838 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_tx_stats *stats = q_to_tx_stats(q); q 860 drivers/net/ethernet/pensando/ionic/ionic_txrx.c static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs) q 864 drivers/net/ethernet/pensando/ionic/ionic_txrx.c if (unlikely(!ionic_q_has_space(q, ndescs))) { q 865 drivers/net/ethernet/pensando/ionic/ionic_txrx.c netif_stop_subqueue(q->lif->netdev, q->index); q 866 drivers/net/ethernet/pensando/ionic/ionic_txrx.c q->stop++; q 871 drivers/net/ethernet/pensando/ionic/ionic_txrx.c if (ionic_q_has_space(q, ndescs)) { q 872 drivers/net/ethernet/pensando/ionic/ionic_txrx.c netif_wake_subqueue(q->lif->netdev, q->index); q 884 drivers/net/ethernet/pensando/ionic/ionic_txrx.c struct ionic_queue *q; q 895 drivers/net/ethernet/pensando/ionic/ionic_txrx.c q = lif_to_txq(lif, queue_index); q 897 drivers/net/ethernet/pensando/ionic/ionic_txrx.c ndescs = ionic_tx_descs_needed(q, skb); q 901 drivers/net/ethernet/pensando/ionic/ionic_txrx.c if (unlikely(ionic_maybe_stop_tx(q, ndescs))) q 905 drivers/net/ethernet/pensando/ionic/ionic_txrx.c err = ionic_tx_tso(q, skb); q 907 drivers/net/ethernet/pensando/ionic/ionic_txrx.c err = ionic_tx(q, skb); q 916 drivers/net/ethernet/pensando/ionic/ionic_txrx.c ionic_maybe_stop_tx(q, 4); q 921 drivers/net/ethernet/pensando/ionic/ionic_txrx.c q->stop++; q 922 drivers/net/ethernet/pensando/ionic/ionic_txrx.c q->drop++; q 10 drivers/net/ethernet/pensando/ionic/ionic_txrx.h void ionic_rx_fill(struct ionic_queue *q); q 11 drivers/net/ethernet/pensando/ionic/ionic_txrx.h void ionic_rx_empty(struct ionic_queue *q); q 174 drivers/net/ethernet/renesas/ravb_main.c static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) q 177 drivers/net/ethernet/renesas/ravb_main.c struct net_device_stats *stats = &priv->stats[q]; q 184 drivers/net/ethernet/renesas/ravb_main.c for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { q 187 drivers/net/ethernet/renesas/ravb_main.c entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * q 189 drivers/net/ethernet/renesas/ravb_main.c desc = &priv->tx_ring[q][entry]; q 197 drivers/net/ethernet/renesas/ravb_main.c if (priv->tx_skb[q][entry / num_tx_desc]) { q 203 drivers/net/ethernet/renesas/ravb_main.c dev_kfree_skb_any(priv->tx_skb[q][entry]); q 204 drivers/net/ethernet/renesas/ravb_main.c priv->tx_skb[q][entry] = NULL; q 218 drivers/net/ethernet/renesas/ravb_main.c static void ravb_ring_free(struct net_device *ndev, int q) q 225 drivers/net/ethernet/renesas/ravb_main.c if (priv->rx_ring[q]) { q 226 drivers/net/ethernet/renesas/ravb_main.c for (i = 0; i < priv->num_rx_ring[q]; i++) { q 227 drivers/net/ethernet/renesas/ravb_main.c struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; q 237 drivers/net/ethernet/renesas/ravb_main.c (priv->num_rx_ring[q] + 1); q 238 drivers/net/ethernet/renesas/ravb_main.c dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], q 239 drivers/net/ethernet/renesas/ravb_main.c priv->rx_desc_dma[q]); q 240 drivers/net/ethernet/renesas/ravb_main.c priv->rx_ring[q] = NULL; q 243 drivers/net/ethernet/renesas/ravb_main.c if (priv->tx_ring[q]) { q 244 drivers/net/ethernet/renesas/ravb_main.c ravb_tx_free(ndev, q, false); q 247 drivers/net/ethernet/renesas/ravb_main.c (priv->num_tx_ring[q] * num_tx_desc + 1); q 248 drivers/net/ethernet/renesas/ravb_main.c dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], q 249 drivers/net/ethernet/renesas/ravb_main.c priv->tx_desc_dma[q]); q 250 drivers/net/ethernet/renesas/ravb_main.c priv->tx_ring[q] = NULL; q 254 drivers/net/ethernet/renesas/ravb_main.c if (priv->rx_skb[q]) { q 255 drivers/net/ethernet/renesas/ravb_main.c for (i = 0; i < priv->num_rx_ring[q]; i++) q 256 drivers/net/ethernet/renesas/ravb_main.c dev_kfree_skb(priv->rx_skb[q][i]); q 258 drivers/net/ethernet/renesas/ravb_main.c kfree(priv->rx_skb[q]); q 259 drivers/net/ethernet/renesas/ravb_main.c priv->rx_skb[q] = NULL; q 262 drivers/net/ethernet/renesas/ravb_main.c kfree(priv->tx_align[q]); q 263 drivers/net/ethernet/renesas/ravb_main.c priv->tx_align[q] = NULL; q 268 drivers/net/ethernet/renesas/ravb_main.c kfree(priv->tx_skb[q]); q 269 drivers/net/ethernet/renesas/ravb_main.c priv->tx_skb[q] = NULL; q 273 drivers/net/ethernet/renesas/ravb_main.c static void ravb_ring_format(struct net_device *ndev, int q) q 280 drivers/net/ethernet/renesas/ravb_main.c int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; q 281 drivers/net/ethernet/renesas/ravb_main.c int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] * q 286 drivers/net/ethernet/renesas/ravb_main.c priv->cur_rx[q] = 0; q 287 drivers/net/ethernet/renesas/ravb_main.c priv->cur_tx[q] = 0; q 288 drivers/net/ethernet/renesas/ravb_main.c priv->dirty_rx[q] = 0; q 289 drivers/net/ethernet/renesas/ravb_main.c priv->dirty_tx[q] = 0; q 291 drivers/net/ethernet/renesas/ravb_main.c memset(priv->rx_ring[q], 0, rx_ring_size); q 293 drivers/net/ethernet/renesas/ravb_main.c for (i = 0; i < priv->num_rx_ring[q]; i++) { q 295 drivers/net/ethernet/renesas/ravb_main.c rx_desc = &priv->rx_ring[q][i]; q 297 drivers/net/ethernet/renesas/ravb_main.c dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, q 308 drivers/net/ethernet/renesas/ravb_main.c rx_desc = &priv->rx_ring[q][i]; q 309 drivers/net/ethernet/renesas/ravb_main.c rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); q 312 drivers/net/ethernet/renesas/ravb_main.c memset(priv->tx_ring[q], 0, tx_ring_size); q 314 drivers/net/ethernet/renesas/ravb_main.c for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q]; q 322 drivers/net/ethernet/renesas/ravb_main.c tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); q 326 drivers/net/ethernet/renesas/ravb_main.c desc = &priv->desc_bat[RX_QUEUE_OFFSET + q]; q 328 drivers/net/ethernet/renesas/ravb_main.c desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); q 331 drivers/net/ethernet/renesas/ravb_main.c desc = &priv->desc_bat[q]; q 333 drivers/net/ethernet/renesas/ravb_main.c desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); q 337 drivers/net/ethernet/renesas/ravb_main.c static int ravb_ring_init(struct net_device *ndev, int q) q 346 drivers/net/ethernet/renesas/ravb_main.c priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], q 347 drivers/net/ethernet/renesas/ravb_main.c sizeof(*priv->rx_skb[q]), GFP_KERNEL); q 348 drivers/net/ethernet/renesas/ravb_main.c priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q], q 349 drivers/net/ethernet/renesas/ravb_main.c sizeof(*priv->tx_skb[q]), GFP_KERNEL); q 350 drivers/net/ethernet/renesas/ravb_main.c if (!priv->rx_skb[q] || !priv->tx_skb[q]) q 353 drivers/net/ethernet/renesas/ravb_main.c for (i = 0; i < priv->num_rx_ring[q]; i++) { q 358 drivers/net/ethernet/renesas/ravb_main.c priv->rx_skb[q][i] = skb; q 363 drivers/net/ethernet/renesas/ravb_main.c priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] + q 365 drivers/net/ethernet/renesas/ravb_main.c if (!priv->tx_align[q]) q 370 drivers/net/ethernet/renesas/ravb_main.c ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); q 371 drivers/net/ethernet/renesas/ravb_main.c priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, q 372 drivers/net/ethernet/renesas/ravb_main.c &priv->rx_desc_dma[q], q 374 drivers/net/ethernet/renesas/ravb_main.c if (!priv->rx_ring[q]) q 377 drivers/net/ethernet/renesas/ravb_main.c priv->dirty_rx[q] = 0; q 381 drivers/net/ethernet/renesas/ravb_main.c (priv->num_tx_ring[q] * num_tx_desc + 1); q 382 drivers/net/ethernet/renesas/ravb_main.c priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, q 383 drivers/net/ethernet/renesas/ravb_main.c &priv->tx_desc_dma[q], q 385 drivers/net/ethernet/renesas/ravb_main.c if (!priv->tx_ring[q]) q 391 drivers/net/ethernet/renesas/ravb_main.c ravb_ring_free(ndev, q); q 533 drivers/net/ethernet/renesas/ravb_main.c static bool ravb_rx(struct net_device *ndev, int *quota, int q) q 536 drivers/net/ethernet/renesas/ravb_main.c int entry = priv->cur_rx[q] % priv->num_rx_ring[q]; q 537 drivers/net/ethernet/renesas/ravb_main.c int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) - q 538 drivers/net/ethernet/renesas/ravb_main.c priv->cur_rx[q]; q 539 drivers/net/ethernet/renesas/ravb_main.c struct net_device_stats *stats = &priv->stats[q]; q 550 drivers/net/ethernet/renesas/ravb_main.c desc = &priv->rx_ring[q][entry]; q 581 drivers/net/ethernet/renesas/ravb_main.c skb = priv->rx_skb[q][entry]; q 582 drivers/net/ethernet/renesas/ravb_main.c priv->rx_skb[q][entry] = NULL; q 586 drivers/net/ethernet/renesas/ravb_main.c get_ts &= (q == RAVB_NC) ? q 604 drivers/net/ethernet/renesas/ravb_main.c napi_gro_receive(&priv->napi[q], skb); q 609 drivers/net/ethernet/renesas/ravb_main.c entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q]; q 610 drivers/net/ethernet/renesas/ravb_main.c desc = &priv->rx_ring[q][entry]; q 614 drivers/net/ethernet/renesas/ravb_main.c for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { q 615 drivers/net/ethernet/renesas/ravb_main.c entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; q 616 drivers/net/ethernet/renesas/ravb_main.c desc = &priv->rx_ring[q][entry]; q 619 drivers/net/ethernet/renesas/ravb_main.c if (!priv->rx_skb[q][entry]) { q 636 drivers/net/ethernet/renesas/ravb_main.c priv->rx_skb[q][entry] = skb; q 756 drivers/net/ethernet/renesas/ravb_main.c static bool ravb_queue_interrupt(struct net_device *ndev, int q) q 764 drivers/net/ethernet/renesas/ravb_main.c if (((ris0 & ric0) & BIT(q)) || ((tis & tic) & BIT(q))) { q 765 drivers/net/ethernet/renesas/ravb_main.c if (napi_schedule_prep(&priv->napi[q])) { q 768 drivers/net/ethernet/renesas/ravb_main.c ravb_write(ndev, ric0 & ~BIT(q), RIC0); q 769 drivers/net/ethernet/renesas/ravb_main.c ravb_write(ndev, tic & ~BIT(q), TIC); q 771 drivers/net/ethernet/renesas/ravb_main.c ravb_write(ndev, BIT(q), RID0); q 772 drivers/net/ethernet/renesas/ravb_main.c ravb_write(ndev, BIT(q), TID); q 774 drivers/net/ethernet/renesas/ravb_main.c __napi_schedule(&priv->napi[q]); q 813 drivers/net/ethernet/renesas/ravb_main.c int q; q 820 drivers/net/ethernet/renesas/ravb_main.c for (q = RAVB_NC; q >= RAVB_BE; q--) { q 821 drivers/net/ethernet/renesas/ravb_main.c if (ravb_queue_interrupt(ndev, q)) q 880 drivers/net/ethernet/renesas/ravb_main.c static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q) q 889 drivers/net/ethernet/renesas/ravb_main.c if (ravb_queue_interrupt(ndev, q)) q 911 drivers/net/ethernet/renesas/ravb_main.c int q = napi - priv->napi; q 912 drivers/net/ethernet/renesas/ravb_main.c int mask = BIT(q); q 926 drivers/net/ethernet/renesas/ravb_main.c if (ravb_rx(ndev, "a, q)) q 934 drivers/net/ethernet/renesas/ravb_main.c ravb_tx_free(ndev, q, true); q 935 drivers/net/ethernet/renesas/ravb_main.c netif_wake_subqueue(ndev, q); q 1159 drivers/net/ethernet/renesas/ravb_main.c int q; q 1162 drivers/net/ethernet/renesas/ravb_main.c for (q = RAVB_BE; q < NUM_RX_QUEUE; q++) { q 1163 drivers/net/ethernet/renesas/ravb_main.c struct net_device_stats *stats = &priv->stats[q]; q 1165 drivers/net/ethernet/renesas/ravb_main.c data[i++] = priv->cur_rx[q]; q 1166 drivers/net/ethernet/renesas/ravb_main.c data[i++] = priv->cur_tx[q]; q 1167 drivers/net/ethernet/renesas/ravb_main.c data[i++] = priv->dirty_rx[q]; q 1168 drivers/net/ethernet/renesas/ravb_main.c data[i++] = priv->dirty_tx[q]; q 1476 drivers/net/ethernet/renesas/ravb_main.c u16 q = skb_get_queue_mapping(skb); q 1486 drivers/net/ethernet/renesas/ravb_main.c if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) * q 1490 drivers/net/ethernet/renesas/ravb_main.c netif_stop_subqueue(ndev, q); q 1498 drivers/net/ethernet/renesas/ravb_main.c entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc); q 1499 drivers/net/ethernet/renesas/ravb_main.c priv->tx_skb[q][entry / num_tx_desc] = skb; q 1502 drivers/net/ethernet/renesas/ravb_main.c buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + q 1527 drivers/net/ethernet/renesas/ravb_main.c desc = &priv->tx_ring[q][entry]; q 1540 drivers/net/ethernet/renesas/ravb_main.c desc = &priv->tx_ring[q][entry]; q 1551 drivers/net/ethernet/renesas/ravb_main.c if (q == RAVB_NC) { q 1582 drivers/net/ethernet/renesas/ravb_main.c ravb_modify(ndev, TCCR, TCCR_TSRQ0 << q, TCCR_TSRQ0 << q); q 1584 drivers/net/ethernet/renesas/ravb_main.c priv->cur_tx[q] += num_tx_desc; q 1585 drivers/net/ethernet/renesas/ravb_main.c if (priv->cur_tx[q] - priv->dirty_tx[q] > q 1586 drivers/net/ethernet/renesas/ravb_main.c (priv->num_tx_ring[q] - 1) * num_tx_desc && q 1587 drivers/net/ethernet/renesas/ravb_main.c !ravb_tx_free(ndev, q, true)) q 1588 drivers/net/ethernet/renesas/ravb_main.c netif_stop_subqueue(ndev, q); q 1599 drivers/net/ethernet/renesas/ravb_main.c priv->tx_skb[q][entry / num_tx_desc] = NULL; q 1990 drivers/net/ethernet/renesas/ravb_main.c int error, irq, q; q 2125 drivers/net/ethernet/renesas/ravb_main.c for (q = RAVB_BE; q < DBAT_ENTRY_NUM; q++) q 2126 drivers/net/ethernet/renesas/ravb_main.c priv->desc_bat[q].die_dt = DT_EOS; q 835 drivers/net/ethernet/sfc/ptp.c static void efx_ptp_deliver_rx_queue(struct sk_buff_head *q) q 839 drivers/net/ethernet/sfc/ptp.c while ((skb = skb_dequeue(q))) { q 1228 drivers/net/ethernet/sfc/ptp.c static void efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q) q 1238 drivers/net/ethernet/sfc/ptp.c __skb_queue_tail(q, skb); q 1241 drivers/net/ethernet/sfc/ptp.c __skb_queue_tail(q, skb); q 1245 drivers/net/ethernet/sfc/ptp.c __skb_queue_tail(q, skb); q 250 drivers/net/ethernet/stmicro/stmmac/dwmac4.h #define MTL_RXQ_DMA_QXMDMACH(chan, q) ((chan) << (8 * (q))) q 1398 drivers/net/ethernet/ti/davinci_emac.c int q, m, ret; q 1538 drivers/net/ethernet/ti/davinci_emac.c for (q = res_num; q >= 0; q--) { q 1539 drivers/net/ethernet/ti/davinci_emac.c res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, q); q 1543 drivers/net/ethernet/ti/davinci_emac.c if (q != res_num) q 49 drivers/net/ethernet/ti/netcp_core.c #define knav_queue_get_id(q) knav_queue_device_control(q, \ q 52 drivers/net/ethernet/ti/netcp_core.c #define knav_queue_enable_notify(q) knav_queue_device_control(q, \ q 56 drivers/net/ethernet/ti/netcp_core.c #define knav_queue_disable_notify(q) knav_queue_device_control(q, \ q 60 drivers/net/ethernet/ti/netcp_core.c #define knav_queue_get_count(q) knav_queue_device_control(q, \ q 1741 drivers/net/ethernet/via/via-velocity.c int q, int n) q 1743 drivers/net/ethernet/via/via-velocity.c struct velocity_td_info *td_info = &(vptr->tx.infos[q][n]); q 1423 drivers/net/ethernet/via/via-velocity.h #define AVAIL_TD(p,q) ((p)->options.numtx-((p)->tx.used[(q)])) q 449 drivers/net/fddi/skfp/h/smc.h struct s_queue q ; /* queue */ q 860 drivers/net/fddi/skfp/hwmtm.c if (smc->q.ev_get != smc->q.ev_put) { q 32 drivers/net/fddi/skfp/queue.c smc->q.ev_put = smc->q.ev_get = smc->q.ev_queue ; q 41 drivers/net/fddi/skfp/queue.c smc->q.ev_put->class = class ; q 42 drivers/net/fddi/skfp/queue.c smc->q.ev_put->event = event ; q 43 drivers/net/fddi/skfp/queue.c if (++smc->q.ev_put == &smc->q.ev_queue[MAX_EVENT]) q 44 drivers/net/fddi/skfp/queue.c smc->q.ev_put = smc->q.ev_queue ; q 46 drivers/net/fddi/skfp/queue.c if (smc->q.ev_put == smc->q.ev_get) { q 74 drivers/net/fddi/skfp/queue.c ev = smc->q.ev_get ; q 75 drivers/net/fddi/skfp/queue.c PRINTF("dispatch get %x put %x\n",ev,smc->q.ev_put) ; q 76 drivers/net/fddi/skfp/queue.c while (ev != smc->q.ev_put) { q 108 drivers/net/fddi/skfp/queue.c if (++ev == &smc->q.ev_queue[MAX_EVENT]) q 109 drivers/net/fddi/skfp/queue.c ev = smc->q.ev_queue ; q 112 drivers/net/fddi/skfp/queue.c smc->q.ev_get = ev; q 43 drivers/net/hyperv/netvsc_trace.h TP_PROTO(const struct net_device *ndev, u16 q, q 45 drivers/net/hyperv/netvsc_trace.h TP_ARGS(ndev, q, msg), q 55 drivers/net/hyperv/netvsc_trace.h __entry->queue = q; q 66 drivers/net/hyperv/netvsc_trace.h TP_PROTO(const struct net_device *ndev, u16 q, q 68 drivers/net/hyperv/netvsc_trace.h TP_ARGS(ndev, q, msg) q 72 drivers/net/hyperv/netvsc_trace.h TP_PROTO(const struct net_device *ndev, u16 q, q 74 drivers/net/hyperv/netvsc_trace.h TP_ARGS(ndev, q, msg) q 1685 drivers/net/ppp/ppp_generic.c unsigned char *p, *q; q 1859 drivers/net/ppp/ppp_generic.c q = skb_put(frag, flen + hdrlen); q 1862 drivers/net/ppp/ppp_generic.c put_unaligned_be16(PPP_MP, q); q 1864 drivers/net/ppp/ppp_generic.c q[2] = bits + ((ppp->nxseq >> 8) & 0xf); q 1865 drivers/net/ppp/ppp_generic.c q[3] = ppp->nxseq; q 1867 drivers/net/ppp/ppp_generic.c q[2] = bits; q 1868 drivers/net/ppp/ppp_generic.c q[3] = ppp->nxseq >> 16; q 1869 drivers/net/ppp/ppp_generic.c q[4] = ppp->nxseq >> 8; q 1870 drivers/net/ppp/ppp_generic.c q[5] = ppp->nxseq; q 1873 drivers/net/ppp/ppp_generic.c memcpy(q + hdrlen, p, flen); q 33 drivers/net/tap.c static inline bool tap_legacy_is_little_endian(struct tap_queue *q) q 35 drivers/net/tap.c return q->flags & TAP_VNET_BE ? false : q 39 drivers/net/tap.c static long tap_get_vnet_be(struct tap_queue *q, int __user *sp) q 41 drivers/net/tap.c int s = !!(q->flags & TAP_VNET_BE); q 49 drivers/net/tap.c static long tap_set_vnet_be(struct tap_queue *q, int __user *sp) q 57 drivers/net/tap.c q->flags |= TAP_VNET_BE; q 59 drivers/net/tap.c q->flags &= ~TAP_VNET_BE; q 64 drivers/net/tap.c static inline bool tap_legacy_is_little_endian(struct tap_queue *q) q 69 drivers/net/tap.c static long tap_get_vnet_be(struct tap_queue *q, int __user *argp) q 74 drivers/net/tap.c static long tap_set_vnet_be(struct tap_queue *q, int __user *argp) q 80 drivers/net/tap.c static inline bool tap_is_little_endian(struct tap_queue *q) q 82 drivers/net/tap.c return q->flags & TAP_VNET_LE || q 83 drivers/net/tap.c tap_legacy_is_little_endian(q); q 86 drivers/net/tap.c static inline u16 tap16_to_cpu(struct tap_queue *q, __virtio16 val) q 88 drivers/net/tap.c return __virtio16_to_cpu(tap_is_little_endian(q), val); q 91 drivers/net/tap.c static inline __virtio16 cpu_to_tap16(struct tap_queue *q, u16 val) q 93 drivers/net/tap.c return __cpu_to_virtio16(tap_is_little_endian(q), val); q 145 drivers/net/tap.c struct tap_queue *q) q 151 drivers/net/tap.c if (q->enabled) q 155 drivers/net/tap.c rcu_assign_pointer(tap->taps[tap->numvtaps], q); q 156 drivers/net/tap.c q->queue_index = tap->numvtaps; q 157 drivers/net/tap.c q->enabled = true; q 166 drivers/net/tap.c struct tap_queue *q) q 171 drivers/net/tap.c rcu_assign_pointer(q->tap, tap); q 172 drivers/net/tap.c rcu_assign_pointer(tap->taps[tap->numvtaps], q); q 173 drivers/net/tap.c sock_hold(&q->sk); q 175 drivers/net/tap.c q->file = file; q 176 drivers/net/tap.c q->queue_index = tap->numvtaps; q 177 drivers/net/tap.c q->enabled = true; q 178 drivers/net/tap.c file->private_data = q; q 179 drivers/net/tap.c list_add_tail(&q->next, &tap->queue_list); q 187 drivers/net/tap.c static int tap_disable_queue(struct tap_queue *q) q 193 drivers/net/tap.c if (!q->enabled) q 196 drivers/net/tap.c tap = rtnl_dereference(q->tap); q 199 drivers/net/tap.c int index = q->queue_index; q 206 drivers/net/tap.c q->enabled = false; q 222 drivers/net/tap.c static void tap_put_queue(struct tap_queue *q) q 227 drivers/net/tap.c tap = rtnl_dereference(q->tap); q 230 drivers/net/tap.c if (q->enabled) q 231 drivers/net/tap.c BUG_ON(tap_disable_queue(q)); q 234 drivers/net/tap.c RCU_INIT_POINTER(q->tap, NULL); q 235 drivers/net/tap.c sock_put(&q->sk); q 236 drivers/net/tap.c list_del_init(&q->next); q 242 drivers/net/tap.c sock_put(&q->sk); q 300 drivers/net/tap.c struct tap_queue *q, *tmp; q 303 drivers/net/tap.c list_for_each_entry_safe(q, tmp, &tap->queue_list, next) { q 304 drivers/net/tap.c list_del_init(&q->next); q 305 drivers/net/tap.c RCU_INIT_POINTER(q->tap, NULL); q 306 drivers/net/tap.c if (q->enabled) q 309 drivers/net/tap.c sock_put(&q->sk); q 323 drivers/net/tap.c struct tap_queue *q; q 330 drivers/net/tap.c q = tap_get_queue(tap, skb); q 331 drivers/net/tap.c if (!q) q 340 drivers/net/tap.c if (q->flags & IFF_VNET_HDR) q 349 drivers/net/tap.c if (ptr_ring_produce(&q->ring, skb)) q 359 drivers/net/tap.c if (ptr_ring_produce(&q->ring, segs)) { q 376 drivers/net/tap.c if (ptr_ring_produce(&q->ring, skb)) q 381 drivers/net/tap.c wake_up_interruptible_poll(sk_sleep(&q->sk), EPOLLIN | EPOLLRDNORM | EPOLLRDBAND); q 496 drivers/net/tap.c struct tap_queue *q = container_of(sk, struct tap_queue, sk); q 498 drivers/net/tap.c ptr_ring_cleanup(&q->ring, __skb_array_destroy_skb); q 505 drivers/net/tap.c struct tap_queue *q; q 514 drivers/net/tap.c q = (struct tap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL, q 516 drivers/net/tap.c if (!q) q 518 drivers/net/tap.c if (ptr_ring_init(&q->ring, tap->dev->tx_queue_len, GFP_KERNEL)) { q 519 drivers/net/tap.c sk_free(&q->sk); q 523 drivers/net/tap.c init_waitqueue_head(&q->sock.wq.wait); q 524 drivers/net/tap.c q->sock.type = SOCK_RAW; q 525 drivers/net/tap.c q->sock.state = SS_CONNECTED; q 526 drivers/net/tap.c q->sock.file = file; q 527 drivers/net/tap.c q->sock.ops = &tap_socket_ops; q 528 drivers/net/tap.c sock_init_data(&q->sock, &q->sk); q 529 drivers/net/tap.c q->sk.sk_write_space = tap_sock_write_space; q 530 drivers/net/tap.c q->sk.sk_destruct = tap_sock_destruct; q 531 drivers/net/tap.c q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP; q 532 drivers/net/tap.c q->vnet_hdr_sz = sizeof(struct virtio_net_hdr); q 542 drivers/net/tap.c sock_set_flag(&q->sk, SOCK_ZEROCOPY); q 544 drivers/net/tap.c err = tap_set_queue(tap, file, q); q 556 drivers/net/tap.c sock_put(&q->sk); q 567 drivers/net/tap.c struct tap_queue *q = file->private_data; q 568 drivers/net/tap.c tap_put_queue(q); q 574 drivers/net/tap.c struct tap_queue *q = file->private_data; q 577 drivers/net/tap.c if (!q) q 581 drivers/net/tap.c poll_wait(file, &q->sock.wq.wait, wait); q 583 drivers/net/tap.c if (!ptr_ring_empty(&q->ring)) q 586 drivers/net/tap.c if (sock_writeable(&q->sk) || q 587 drivers/net/tap.c (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) && q 588 drivers/net/tap.c sock_writeable(&q->sk))) q 622 drivers/net/tap.c static ssize_t tap_get_user(struct tap_queue *q, void *msg_control, q 638 drivers/net/tap.c if (q->flags & IFF_VNET_HDR) { q 639 drivers/net/tap.c vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); q 651 drivers/net/tap.c tap16_to_cpu(q, vnet_hdr.csum_start) + q 652 drivers/net/tap.c tap16_to_cpu(q, vnet_hdr.csum_offset) + 2 > q 653 drivers/net/tap.c tap16_to_cpu(q, vnet_hdr.hdr_len)) q 654 drivers/net/tap.c vnet_hdr.hdr_len = cpu_to_tap16(q, q 655 drivers/net/tap.c tap16_to_cpu(q, vnet_hdr.csum_start) + q 656 drivers/net/tap.c tap16_to_cpu(q, vnet_hdr.csum_offset) + 2); q 658 drivers/net/tap.c if (tap16_to_cpu(q, vnet_hdr.hdr_len) > len) q 666 drivers/net/tap.c if (msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) { q 670 drivers/net/tap.c tap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN; q 684 drivers/net/tap.c linear = tap16_to_cpu(q, vnet_hdr.hdr_len); q 691 drivers/net/tap.c skb = tap_alloc_skb(&q->sk, TAP_RESERVE, copylen, q 710 drivers/net/tap.c tap_is_little_endian(q)); q 724 drivers/net/tap.c tap = rcu_dereference(q->tap); q 750 drivers/net/tap.c tap = rcu_dereference(q->tap); q 761 drivers/net/tap.c struct tap_queue *q = file->private_data; q 763 drivers/net/tap.c return tap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK); q 767 drivers/net/tap.c static ssize_t tap_put_user(struct tap_queue *q, q 776 drivers/net/tap.c if (q->flags & IFF_VNET_HDR) { q 780 drivers/net/tap.c vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); q 785 drivers/net/tap.c tap_is_little_endian(q), true, q 825 drivers/net/tap.c static ssize_t tap_do_read(struct tap_queue *q, q 842 drivers/net/tap.c prepare_to_wait(sk_sleep(&q->sk), &wait, q 846 drivers/net/tap.c skb = ptr_ring_consume(&q->ring); q 861 drivers/net/tap.c finish_wait(sk_sleep(&q->sk), &wait); q 865 drivers/net/tap.c ret = tap_put_user(q, skb, to); q 877 drivers/net/tap.c struct tap_queue *q = file->private_data; q 880 drivers/net/tap.c ret = tap_do_read(q, to, file->f_flags & O_NONBLOCK, NULL); q 887 drivers/net/tap.c static struct tap_dev *tap_get_tap_dev(struct tap_queue *q) q 892 drivers/net/tap.c tap = rtnl_dereference(q->tap); q 906 drivers/net/tap.c struct tap_queue *q = file->private_data; q 910 drivers/net/tap.c tap = tap_get_tap_dev(q); q 915 drivers/net/tap.c ret = tap_enable_queue(tap, file, q); q 917 drivers/net/tap.c ret = tap_disable_queue(q); q 925 drivers/net/tap.c static int set_offload(struct tap_queue *q, unsigned long arg) q 931 drivers/net/tap.c tap = rtnl_dereference(q->tap); q 979 drivers/net/tap.c struct tap_queue *q = file->private_data; q 1000 drivers/net/tap.c q->flags = (q->flags & ~TAP_IFFEATURES) | u; q 1006 drivers/net/tap.c tap = tap_get_tap_dev(q); q 1013 drivers/net/tap.c u = q->flags; q 1040 drivers/net/tap.c q->sk.sk_sndbuf = s; q 1044 drivers/net/tap.c s = q->vnet_hdr_sz; q 1055 drivers/net/tap.c q->vnet_hdr_sz = s; q 1059 drivers/net/tap.c s = !!(q->flags & TAP_VNET_LE); q 1068 drivers/net/tap.c q->flags |= TAP_VNET_LE; q 1070 drivers/net/tap.c q->flags &= ~TAP_VNET_LE; q 1074 drivers/net/tap.c return tap_get_vnet_be(q, sp); q 1077 drivers/net/tap.c return tap_set_vnet_be(q, sp); q 1086 drivers/net/tap.c ret = set_offload(q, arg); q 1092 drivers/net/tap.c tap = tap_get_tap_dev(q); q 1111 drivers/net/tap.c tap = tap_get_tap_dev(q); q 1148 drivers/net/tap.c static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp) q 1158 drivers/net/tap.c if (q->flags & IFF_VNET_HDR) q 1159 drivers/net/tap.c vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz); q 1175 drivers/net/tap.c err = virtio_net_hdr_to_skb(skb, gso, tap_is_little_endian(q)); q 1187 drivers/net/tap.c tap = rcu_dereference(q->tap); q 1203 drivers/net/tap.c tap = rcu_dereference(q->tap); q 1213 drivers/net/tap.c struct tap_queue *q = container_of(sock, struct tap_queue, sock); q 1221 drivers/net/tap.c tap_get_user_xdp(q, xdp); q 1226 drivers/net/tap.c return tap_get_user(q, ctl ? ctl->ptr : NULL, &m->msg_iter, q 1233 drivers/net/tap.c struct tap_queue *q = container_of(sock, struct tap_queue, sock); q 1240 drivers/net/tap.c ret = tap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT, skb); q 1250 drivers/net/tap.c struct tap_queue *q = container_of(sock, struct tap_queue, q 1252 drivers/net/tap.c return PTR_RING_PEEK_CALL(&q->ring, __skb_array_len_with_tag); q 1268 drivers/net/tap.c struct tap_queue *q; q 1271 drivers/net/tap.c q = file->private_data; q 1272 drivers/net/tap.c if (!q) q 1274 drivers/net/tap.c return &q->sock; q 1280 drivers/net/tap.c struct tap_queue *q; q 1284 drivers/net/tap.c q = file->private_data; q 1285 drivers/net/tap.c if (!q) q 1287 drivers/net/tap.c return &q->ring; q 1294 drivers/net/tap.c struct tap_queue *q; q 1303 drivers/net/tap.c list_for_each_entry(q, &tap->queue_list, next) q 1304 drivers/net/tap.c rings[i++] = &q->ring; q 179 drivers/net/usb/catc.c void (*callback)(struct catc *catc, struct ctrl_queue *q); q 472 drivers/net/usb/catc.c struct ctrl_queue *q = catc->ctrl_queue + catc->ctrl_tail; q 478 drivers/net/usb/catc.c dr->bRequest = q->request; q 479 drivers/net/usb/catc.c dr->bRequestType = 0x40 | q->dir; q 480 drivers/net/usb/catc.c dr->wValue = cpu_to_le16(q->value); q 481 drivers/net/usb/catc.c dr->wIndex = cpu_to_le16(q->index); q 482 drivers/net/usb/catc.c dr->wLength = cpu_to_le16(q->len); q 484 drivers/net/usb/catc.c urb->pipe = q->dir ? usb_rcvctrlpipe(usbdev, 0) : usb_sndctrlpipe(usbdev, 0); q 485 drivers/net/usb/catc.c urb->transfer_buffer_length = q->len; q 490 drivers/net/usb/catc.c if (!q->dir && q->buf && q->len) q 491 drivers/net/usb/catc.c memcpy(catc->ctrl_buf, q->buf, q->len); q 501 drivers/net/usb/catc.c struct ctrl_queue *q; q 511 drivers/net/usb/catc.c q = catc->ctrl_queue + catc->ctrl_tail; q 513 drivers/net/usb/catc.c if (q->dir) { q 514 drivers/net/usb/catc.c if (q->buf && q->len) q 515 drivers/net/usb/catc.c memcpy(q->buf, catc->ctrl_buf, q->len); q 517 drivers/net/usb/catc.c q->buf = catc->ctrl_buf; q 520 drivers/net/usb/catc.c if (q->callback) q 521 drivers/net/usb/catc.c q->callback(catc, q); q 534 drivers/net/usb/catc.c u16 index, void *buf, int len, void (*callback)(struct catc *catc, struct ctrl_queue *q)) q 536 drivers/net/usb/catc.c struct ctrl_queue *q; q 542 drivers/net/usb/catc.c q = catc->ctrl_queue + catc->ctrl_head; q 544 drivers/net/usb/catc.c q->dir = dir; q 545 drivers/net/usb/catc.c q->request = request; q 546 drivers/net/usb/catc.c q->value = value; q 547 drivers/net/usb/catc.c q->index = index; q 548 drivers/net/usb/catc.c q->buf = buf; q 549 drivers/net/usb/catc.c q->len = len; q 550 drivers/net/usb/catc.c q->callback = callback; q 572 drivers/net/usb/catc.c static void catc_stats_done(struct catc *catc, struct ctrl_queue *q) q 574 drivers/net/usb/catc.c int index = q->index - EthStats; q 577 drivers/net/usb/catc.c catc->stats_buf[index] = *((char *)q->buf); q 2231 drivers/net/usb/lan78xx.c static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q) q 2237 drivers/net/usb/lan78xx.c spin_lock_irqsave(&q->lock, flags); q 2238 drivers/net/usb/lan78xx.c while (!skb_queue_empty(q)) { q 2243 drivers/net/usb/lan78xx.c skb_queue_walk(q, skb) { q 2260 drivers/net/usb/lan78xx.c spin_unlock_irqrestore(&q->lock, flags); q 2270 drivers/net/usb/lan78xx.c spin_lock_irqsave(&q->lock, flags); q 2272 drivers/net/usb/lan78xx.c spin_unlock_irqrestore(&q->lock, flags); q 704 drivers/net/usb/usbnet.c static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q) q 710 drivers/net/usb/usbnet.c spin_lock_irqsave (&q->lock, flags); q 711 drivers/net/usb/usbnet.c while (!skb_queue_empty(q)) { q 716 drivers/net/usb/usbnet.c skb_queue_walk(q, skb) { q 734 drivers/net/usb/usbnet.c spin_unlock_irqrestore(&q->lock, flags); q 743 drivers/net/usb/usbnet.c spin_lock_irqsave(&q->lock, flags); q 745 drivers/net/usb/usbnet.c spin_unlock_irqrestore (&q->lock, flags); q 763 drivers/net/usb/usbnet.c static void wait_skb_queue_empty(struct sk_buff_head *q) q 767 drivers/net/usb/usbnet.c spin_lock_irqsave(&q->lock, flags); q 768 drivers/net/usb/usbnet.c while (!skb_queue_empty(q)) { q 769 drivers/net/usb/usbnet.c spin_unlock_irqrestore(&q->lock, flags); q 772 drivers/net/usb/usbnet.c spin_lock_irqsave(&q->lock, flags); q 774 drivers/net/usb/usbnet.c spin_unlock_irqrestore(&q->lock, flags); q 70 drivers/net/veth.c struct xdp_frame *q[VETH_XDP_TX_BULK_SIZE]; q 456 drivers/net/veth.c sent = veth_xdp_xmit(dev, bq->count, bq->q, 0); q 461 drivers/net/veth.c xdp_return_frame(bq->q[i]); q 502 drivers/net/veth.c bq->q[bq->count++] = frame; q 1390 drivers/net/virtio_net.c static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) q 1392 drivers/net/virtio_net.c if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) q 1394 drivers/net/virtio_net.c else if (q < vi->curr_queue_pairs) q 3626 drivers/net/wireless/ath/ath10k/mac.c struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue; q 3631 drivers/net/wireless/ath/ath10k/mac.c if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) { q 3637 drivers/net/wireless/ath/ath10k/mac.c __skb_queue_tail(q, skb); q 632 drivers/net/wireless/ath/ath5k/ath5k.h struct list_head q; q 831 drivers/net/wireless/ath/ath5k/base.c list_add_tail(&bf->list, &txq->q); q 1014 drivers/net/wireless/ath/ath5k/base.c INIT_LIST_HEAD(&txq->q); q 1120 drivers/net/wireless/ath/ath5k/base.c list_for_each_entry_safe(bf, bf0, &txq->q, list) { q 1741 drivers/net/wireless/ath/ath5k/base.c list_for_each_entry_safe(bf, bf0, &txq->q, list) { q 853 drivers/net/wireless/ath/ath5k/debug.c list_for_each_entry_safe(bf, bf0, &txq->q, list) q 41 drivers/net/wireless/ath/ath5k/trace.h struct ath5k_txq *q), q 43 drivers/net/wireless/ath/ath5k/trace.h TP_ARGS(priv, skb, q), q 55 drivers/net/wireless/ath/ath5k/trace.h __entry->qnum = (u8) q->qnum; q 67 drivers/net/wireless/ath/ath5k/trace.h struct ath5k_txq *q, struct ath5k_tx_status *ts), q 69 drivers/net/wireless/ath/ath5k/trace.h TP_ARGS(priv, skb, q, ts), q 83 drivers/net/wireless/ath/ath5k/trace.h __entry->qnum = (u8) q->qnum; q 330 drivers/net/wireless/ath/ath6kl/core.h struct sk_buff_head q; q 845 drivers/net/wireless/ath/ath6kl/txrx.c static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num) q 855 drivers/net/wireless/ath/ath6kl/txrx.c skb_queue_tail(q, skb); q 1028 drivers/net/wireless/ath/ath6kl/txrx.c skb_queue_tail(&rxtid->q, new_skb); q 1087 drivers/net/wireless/ath/ath6kl/txrx.c skb_queue_tail(&rxtid->q, node->skb); q 1099 drivers/net/wireless/ath/ath6kl/txrx.c stats->num_delivered += skb_queue_len(&rxtid->q); q 1101 drivers/net/wireless/ath/ath6kl/txrx.c while ((skb = skb_dequeue(&rxtid->q))) q 1127 drivers/net/wireless/ath/ath6kl/txrx.c while ((skb = skb_dequeue(&rxtid->q))) q 1740 drivers/net/wireless/ath/ath6kl/txrx.c if (!skb_queue_empty(&rxtid->q)) q 1763 drivers/net/wireless/ath/ath6kl/txrx.c skb_queue_head_init(&rxtid->q); q 591 drivers/net/wireless/ath/ath9k/ath9k.h struct ath9k_tx_queue_info *q); q 28 drivers/net/wireless/ath/ath9k/debug.h #define TX_STAT_INC(sc, q, c) do { (sc)->debug.stats.txstats[q].c++; } while (0) q 34 drivers/net/wireless/ath/ath9k/debug.h #define TX_STAT_INC(sc, q, c) do { (void)(sc); } while (0) q 335 drivers/net/wireless/ath/ath9k/htc.h #define TX_QSTAT_INC(q) (priv->debug.tx_stats.queue_stats[q]++) q 46 drivers/net/wireless/ath/ath9k/mac.c u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q) q 48 drivers/net/wireless/ath/ath9k/mac.c return REG_READ(ah, AR_QTXDP(q)); q 52 drivers/net/wireless/ath/ath9k/mac.c void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp) q 54 drivers/net/wireless/ath/ath9k/mac.c REG_WRITE(ah, AR_QTXDP(q), txdp); q 58 drivers/net/wireless/ath/ath9k/mac.c void ath9k_hw_txstart(struct ath_hw *ah, u32 q) q 60 drivers/net/wireless/ath/ath9k/mac.c ath_dbg(ath9k_hw_common(ah), QUEUE, "Enable TXE on queue: %u\n", q); q 61 drivers/net/wireless/ath/ath9k/mac.c REG_WRITE(ah, AR_Q_TXE, 1 << q); q 65 drivers/net/wireless/ath/ath9k/mac.c u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q) q 69 drivers/net/wireless/ath/ath9k/mac.c npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT; q 72 drivers/net/wireless/ath/ath9k/mac.c if (REG_READ(ah, AR_Q_TXE) & (1 << q)) q 137 drivers/net/wireless/ath/ath9k/mac.c int i, q; q 152 drivers/net/wireless/ath/ath9k/mac.c for (q = 0; q < AR_NUM_QCU; q++) { q 157 drivers/net/wireless/ath/ath9k/mac.c if (!ath9k_hw_numtxpending(ah, q)) q 170 drivers/net/wireless/ath/ath9k/mac.c bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q) q 177 drivers/net/wireless/ath/ath9k/mac.c REG_WRITE(ah, AR_Q_TXD, 1 << q); q 183 drivers/net/wireless/ath/ath9k/mac.c if (ath9k_hw_numtxpending(ah, q) == 0) q 196 drivers/net/wireless/ath/ath9k/mac.c bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q, q 203 drivers/net/wireless/ath/ath9k/mac.c qi = &ah->txq[q]; q 206 drivers/net/wireless/ath/ath9k/mac.c "Set TXQ properties, inactive queue: %u\n", q); q 210 drivers/net/wireless/ath/ath9k/mac.c ath_dbg(common, QUEUE, "Set queue properties for: %u\n", q); q 261 drivers/net/wireless/ath/ath9k/mac.c bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q, q 267 drivers/net/wireless/ath/ath9k/mac.c qi = &ah->txq[q]; q 270 drivers/net/wireless/ath/ath9k/mac.c "Get TXQ properties, inactive queue: %u\n", q); q 298 drivers/net/wireless/ath/ath9k/mac.c int q; q 302 drivers/net/wireless/ath/ath9k/mac.c q = ATH9K_NUM_TX_QUEUES - 1; q 305 drivers/net/wireless/ath/ath9k/mac.c q = ATH9K_NUM_TX_QUEUES - 2; q 308 drivers/net/wireless/ath/ath9k/mac.c q = 1; q 311 drivers/net/wireless/ath/ath9k/mac.c q = ATH9K_NUM_TX_QUEUES - 3; q 314 drivers/net/wireless/ath/ath9k/mac.c q = qinfo->tqi_subtype; q 321 drivers/net/wireless/ath/ath9k/mac.c ath_dbg(common, QUEUE, "Setup TX queue: %u\n", q); q 323 drivers/net/wireless/ath/ath9k/mac.c qi = &ah->txq[q]; q 325 drivers/net/wireless/ath/ath9k/mac.c ath_err(common, "TX queue: %u already active\n", q); q 331 drivers/net/wireless/ath/ath9k/mac.c (void) ath9k_hw_set_txq_props(ah, q, qinfo); q 333 drivers/net/wireless/ath/ath9k/mac.c return q; q 337 drivers/net/wireless/ath/ath9k/mac.c static void ath9k_hw_clear_queue_interrupts(struct ath_hw *ah, u32 q) q 339 drivers/net/wireless/ath/ath9k/mac.c ah->txok_interrupt_mask &= ~(1 << q); q 340 drivers/net/wireless/ath/ath9k/mac.c ah->txerr_interrupt_mask &= ~(1 << q); q 341 drivers/net/wireless/ath/ath9k/mac.c ah->txdesc_interrupt_mask &= ~(1 << q); q 342 drivers/net/wireless/ath/ath9k/mac.c ah->txeol_interrupt_mask &= ~(1 << q); q 343 drivers/net/wireless/ath/ath9k/mac.c ah->txurn_interrupt_mask &= ~(1 << q); q 346 drivers/net/wireless/ath/ath9k/mac.c bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q) q 351 drivers/net/wireless/ath/ath9k/mac.c qi = &ah->txq[q]; q 353 drivers/net/wireless/ath/ath9k/mac.c ath_dbg(common, QUEUE, "Release TXQ, inactive queue: %u\n", q); q 357 drivers/net/wireless/ath/ath9k/mac.c ath_dbg(common, QUEUE, "Release TX queue: %u\n", q); q 360 drivers/net/wireless/ath/ath9k/mac.c ath9k_hw_clear_queue_interrupts(ah, q); q 367 drivers/net/wireless/ath/ath9k/mac.c bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q) q 373 drivers/net/wireless/ath/ath9k/mac.c qi = &ah->txq[q]; q 375 drivers/net/wireless/ath/ath9k/mac.c ath_dbg(common, QUEUE, "Reset TXQ, inactive queue: %u\n", q); q 379 drivers/net/wireless/ath/ath9k/mac.c ath_dbg(common, QUEUE, "Reset TX queue: %u\n", q); q 390 drivers/net/wireless/ath/ath9k/mac.c REG_WRITE(ah, AR_DLCL_IFS(q), q 395 drivers/net/wireless/ath/ath9k/mac.c REG_WRITE(ah, AR_DRETRY_LIMIT(q), q 400 drivers/net/wireless/ath/ath9k/mac.c REG_WRITE(ah, AR_QMISC(q), AR_Q_MISC_DCU_EARLY_TERM_REQ); q 403 drivers/net/wireless/ath/ath9k/mac.c REG_WRITE(ah, AR_DMISC(q), q 406 drivers/net/wireless/ath/ath9k/mac.c REG_WRITE(ah, AR_DMISC(q), q 410 drivers/net/wireless/ath/ath9k/mac.c REG_WRITE(ah, AR_QCBRCFG(q), q 413 drivers/net/wireless/ath/ath9k/mac.c REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_FSP_CBR | q 418 drivers/net/wireless/ath/ath9k/mac.c REG_WRITE(ah, AR_QRDYTIMECFG(q), q 423 drivers/net/wireless/ath/ath9k/mac.c REG_WRITE(ah, AR_DCHNTIME(q), q 429 drivers/net/wireless/ath/ath9k/mac.c REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_RDYTIME_EXP_POLICY); q 432 drivers/net/wireless/ath/ath9k/mac.c REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS); q 437 drivers/net/wireless/ath/ath9k/mac.c REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_FRAG_BKOFF_EN); q 443 drivers/net/wireless/ath/ath9k/mac.c REG_SET_BIT(ah, AR_QMISC(q), q 448 drivers/net/wireless/ath/ath9k/mac.c REG_SET_BIT(ah, AR_DMISC(q), q 463 drivers/net/wireless/ath/ath9k/mac.c REG_WRITE(ah, AR_DLCL_IFS(q), SM(0, AR_D_LCL_IFS_CWMIN) q 471 drivers/net/wireless/ath/ath9k/mac.c REG_SET_BIT(ah, AR_QMISC(q), q 478 drivers/net/wireless/ath/ath9k/mac.c REG_WRITE(ah, AR_QRDYTIMECFG(q), q 480 drivers/net/wireless/ath/ath9k/mac.c REG_SET_BIT(ah, AR_DMISC(q), q 488 drivers/net/wireless/ath/ath9k/mac.c REG_SET_BIT(ah, AR_QMISC(q), AR_Q_MISC_CBR_INCR_DIS1); q 491 drivers/net/wireless/ath/ath9k/mac.c REG_SET_BIT(ah, AR_DMISC(q), AR_D_MISC_POST_FR_BKOFF_DIS); q 498 drivers/net/wireless/ath/ath9k/mac.c REG_SET_BIT(ah, AR_DMISC(q), q 507 drivers/net/wireless/ath/ath9k/mac.c ath9k_hw_clear_queue_interrupts(ah, q); q 509 drivers/net/wireless/ath/ath9k/mac.c ah->txok_interrupt_mask |= 1 << q; q 510 drivers/net/wireless/ath/ath9k/mac.c ah->txerr_interrupt_mask |= 1 << q; q 513 drivers/net/wireless/ath/ath9k/mac.c ah->txdesc_interrupt_mask |= 1 << q; q 515 drivers/net/wireless/ath/ath9k/mac.c ah->txeol_interrupt_mask |= 1 << q; q 517 drivers/net/wireless/ath/ath9k/mac.c ah->txurn_interrupt_mask |= 1 << q; q 716 drivers/net/wireless/ath/ath9k/mac.h u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q); q 717 drivers/net/wireless/ath/ath9k/mac.h void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp); q 718 drivers/net/wireless/ath/ath9k/mac.h void ath9k_hw_txstart(struct ath_hw *ah, u32 q); q 719 drivers/net/wireless/ath/ath9k/mac.h u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q); q 721 drivers/net/wireless/ath/ath9k/mac.h bool ath9k_hw_stop_dma_queue(struct ath_hw *ah, u32 q); q 723 drivers/net/wireless/ath/ath9k/mac.h bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q, q 725 drivers/net/wireless/ath/ath9k/mac.h bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q, q 729 drivers/net/wireless/ath/ath9k/mac.h bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q); q 730 drivers/net/wireless/ath/ath9k/mac.h bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q); q 105 drivers/net/wireless/ath/ath9k/xmit.c struct sk_buff_head q; q 108 drivers/net/wireless/ath/ath9k/xmit.c __skb_queue_head_init(&q); q 109 drivers/net/wireless/ath/ath9k/xmit.c skb_queue_splice_init(&txq->complete_q, &q); q 112 drivers/net/wireless/ath/ath9k/xmit.c while ((skb = __skb_dequeue(&q))) q 168 drivers/net/wireless/ath/ath9k/xmit.c int q = fi->txq; q 170 drivers/net/wireless/ath/ath9k/xmit.c if (q < 0) q 173 drivers/net/wireless/ath/ath9k/xmit.c txq = sc->tx.txq_map[q]; q 198 drivers/net/wireless/ath/ath9k/xmit.c int q, ret; q 210 drivers/net/wireless/ath/ath9k/xmit.c q = skb_get_queue_mapping(skb); q 211 drivers/net/wireless/ath/ath9k/xmit.c if (tid->txq == sc->tx.txq_map[q]) { q 213 drivers/net/wireless/ath/ath9k/xmit.c fi->txq = q; q 760 drivers/net/wireless/ath/ath9k/xmit.c int q = tid->txq->mac80211_qnum; q 792 drivers/net/wireless/ath/ath9k/xmit.c frmlen = sc->tx.max_aggr_framelen[q][modeidx][rates[i].idx]; q 2278 drivers/net/wireless/ath/ath9k/xmit.c int q, ret; q 2291 drivers/net/wireless/ath/ath9k/xmit.c q = skb_get_queue_mapping(skb); q 2302 drivers/net/wireless/ath/ath9k/xmit.c if (txq == sc->tx.txq_map[q]) { q 2303 drivers/net/wireless/ath/ath9k/xmit.c fi->txq = q; q 373 drivers/net/wireless/ath/carl9170/debug.c #define DEBUGFS_QUEUE_DUMP(q, qi) \ q 374 drivers/net/wireless/ath/carl9170/debug.c static char *carl9170_debugfs_##q ##_##qi ##_read(struct ar9170 *ar, \ q 377 drivers/net/wireless/ath/carl9170/debug.c carl9170_debugfs_queue_dump(ar, buf, len, bufsize, &ar->q[qi]); \ q 380 drivers/net/wireless/ath/carl9170/debug.c DEBUGFS_DECLARE_RO_FILE(q##_##qi, 8000); q 663 drivers/net/wireless/ath/carl9170/tx.c unsigned int r, t, q; q 666 drivers/net/wireless/ath/carl9170/tx.c q = ar9170_qmap[info & CARL9170_TX_STATUS_QUEUE]; q 668 drivers/net/wireless/ath/carl9170/tx.c skb = carl9170_get_queued_skb(ar, cookie, &ar->tx_status[q]); q 1276 drivers/net/wireless/ath/carl9170/tx.c uint8_t q = 0; q 1281 drivers/net/wireless/ath/carl9170/tx.c SET_VAL(CARL9170_TX_SUPER_MISC_QUEUE, q, q 1283 drivers/net/wireless/ath/carl9170/tx.c __carl9170_tx_process_status(ar, super->s.cookie, q); q 1342 drivers/net/wireless/ath/carl9170/tx.c unsigned int i, q; q 1365 drivers/net/wireless/ath/carl9170/tx.c q = __carl9170_get_queue(ar, i); q 1370 drivers/net/wireless/ath/carl9170/tx.c skb_queue_tail(&ar->tx_status[q], skb); q 243 drivers/net/wireless/ath/wil6210/netdev.c bool q; q 251 drivers/net/wireless/ath/wil6210/netdev.c q = queue_work(wil->wmi_wq, &vif->disconnect_worker); q 252 drivers/net/wireless/ath/wil6210/netdev.c wil_dbg_wmi(wil, "queue_work of disconnect_worker -> %d\n", q); q 852 drivers/net/wireless/ath/wil6210/txrx.c bool q = false; q 871 drivers/net/wireless/ath/wil6210/txrx.c q = true; q 879 drivers/net/wireless/ath/wil6210/txrx.c if (q) { q 880 drivers/net/wireless/ath/wil6210/txrx.c q = queue_work(wil->wmi_wq, &vif->enable_tx_key_worker); q 882 drivers/net/wireless/ath/wil6210/txrx.c q); q 1915 drivers/net/wireless/ath/wil6210/wmi.c bool q; q 2016 drivers/net/wireless/ath/wil6210/wmi.c q = queue_work(wil->wmi_wq, &wil->wmi_event_worker); q 2017 drivers/net/wireless/ath/wil6210/wmi.c wil_dbg_wmi(wil, "queue_work -> %d\n", q); q 2738 drivers/net/wireless/ath/wil6210/wmi.h __le32 q; q 464 drivers/net/wireless/broadcom/b43/debugfs.c cal->ctl.i, cal->ctl.q, q 51 drivers/net/wireless/broadcom/b43/lo.c if (unlikely(abs(control->i) > 16 || abs(control->q) > 16)) { q 53 drivers/net/wireless/broadcom/b43/lo.c "(I: %d, Q: %d)\n", control->i, control->q); q 60 drivers/net/wireless/broadcom/b43/lo.c value = (u8) (control->q); q 573 drivers/net/wireless/broadcom/b43/lo.c .q = -100, q 581 drivers/net/wireless/broadcom/b43/lo.c {.i = 1,.q = 1,}, q 582 drivers/net/wireless/broadcom/b43/lo.c {.i = 1,.q = 0,}, q 583 drivers/net/wireless/broadcom/b43/lo.c {.i = 1,.q = -1,}, q 584 drivers/net/wireless/broadcom/b43/lo.c {.i = 0,.q = -1,}, q 585 drivers/net/wireless/broadcom/b43/lo.c {.i = -1,.q = -1,}, q 586 drivers/net/wireless/broadcom/b43/lo.c {.i = -1,.q = 0,}, q 587 drivers/net/wireless/broadcom/b43/lo.c {.i = -1,.q = 1,}, q 588 drivers/net/wireless/broadcom/b43/lo.c {.i = 0,.q = 1,}, q 613 drivers/net/wireless/broadcom/b43/lo.c test_loctl.q += modifiers[i - 1].q * d->state_val_multiplier; q 615 drivers/net/wireless/broadcom/b43/lo.c test_loctl.q != prev_loctl.q) && q 616 drivers/net/wireless/broadcom/b43/lo.c (abs(test_loctl.i) <= 16 && abs(test_loctl.q) <= 16)) { q 692 drivers/net/wireless/broadcom/b43/lo.c (probe_loctl.q == d.min_loctl.q)) q 728 drivers/net/wireless/broadcom/b43/lo.c .q = 0, q 766 drivers/net/wireless/broadcom/b43/lo.c loctl.i, loctl.q); q 850 drivers/net/wireless/broadcom/b43/lo.c val = (u8)(cal->ctl.q); q 965 drivers/net/wireless/broadcom/b43/lo.c cal->ctl.i, cal->ctl.q); q 15 drivers/net/wireless/broadcom/b43/lo.h s8 q; q 2336 drivers/net/wireless/broadcom/b43/phy_g.c s32 m1, m2, f = 256, q, delta; q 2344 drivers/net/wireless/broadcom/b43/phy_g.c q = b43_tssi2dbm_ad(f * 4096 - q 2346 drivers/net/wireless/broadcom/b43/phy_g.c delta = abs(q - f); q 2347 drivers/net/wireless/broadcom/b43/phy_g.c f = q; q 1792 drivers/net/wireless/broadcom/b43/phy_lp.c buf[i] |= CORDIC_FLOAT((sample.q * max) & 0xFF); q 1519 drivers/net/wireless/broadcom/b43/phy_n.c data[i] |= samples[i].q & 0x3FF; q 1563 drivers/net/wireless/broadcom/b43/phy_n.c samples[i].q = CORDIC_FLOAT(samples[i].q * max); q 24 drivers/net/wireless/broadcom/b43/pio.c static u16 generate_cookie(struct b43_pio_txqueue *q, q 37 drivers/net/wireless/broadcom/b43/pio.c cookie = (((u16)q->index + 1) << 12); q 49 drivers/net/wireless/broadcom/b43/pio.c struct b43_pio_txqueue *q = NULL; q 54 drivers/net/wireless/broadcom/b43/pio.c q = pio->tx_queue_AC_BK; q 57 drivers/net/wireless/broadcom/b43/pio.c q = pio->tx_queue_AC_BE; q 60 drivers/net/wireless/broadcom/b43/pio.c q = pio->tx_queue_AC_VI; q 63 drivers/net/wireless/broadcom/b43/pio.c q = pio->tx_queue_AC_VO; q 66 drivers/net/wireless/broadcom/b43/pio.c q = pio->tx_queue_mcast; q 69 drivers/net/wireless/broadcom/b43/pio.c if (B43_WARN_ON(!q)) q 72 drivers/net/wireless/broadcom/b43/pio.c if (B43_WARN_ON(pack_index >= ARRAY_SIZE(q->packets))) q 74 drivers/net/wireless/broadcom/b43/pio.c *pack = &q->packets[pack_index]; q 76 drivers/net/wireless/broadcom/b43/pio.c return q; q 126 drivers/net/wireless/broadcom/b43/pio.c struct b43_pio_txqueue *q; q 130 drivers/net/wireless/broadcom/b43/pio.c q = kzalloc(sizeof(*q), GFP_KERNEL); q 131 drivers/net/wireless/broadcom/b43/pio.c if (!q) q 133 drivers/net/wireless/broadcom/b43/pio.c q->dev = dev; q 134 drivers/net/wireless/broadcom/b43/pio.c q->rev = dev->dev->core_rev; q 135 drivers/net/wireless/broadcom/b43/pio.c q->mmio_base = index_to_pioqueue_base(dev, index) + q 137 drivers/net/wireless/broadcom/b43/pio.c q->index = index; q 139 drivers/net/wireless/broadcom/b43/pio.c q->free_packet_slots = B43_PIO_MAX_NR_TXPACKETS; q 140 drivers/net/wireless/broadcom/b43/pio.c if (q->rev >= 8) { q 141 drivers/net/wireless/broadcom/b43/pio.c q->buffer_size = 1920; //FIXME this constant is wrong. q 143 drivers/net/wireless/broadcom/b43/pio.c q->buffer_size = b43_piotx_read16(q, B43_PIO_TXQBUFSIZE); q 144 drivers/net/wireless/broadcom/b43/pio.c q->buffer_size -= 80; q 147 drivers/net/wireless/broadcom/b43/pio.c INIT_LIST_HEAD(&q->packets_list); q 148 drivers/net/wireless/broadcom/b43/pio.c for (i = 0; i < ARRAY_SIZE(q->packets); i++) { q 149 drivers/net/wireless/broadcom/b43/pio.c p = &(q->packets[i]); q 152 drivers/net/wireless/broadcom/b43/pio.c p->queue = q; q 153 drivers/net/wireless/broadcom/b43/pio.c list_add(&p->list, &q->packets_list); q 156 drivers/net/wireless/broadcom/b43/pio.c return q; q 162 drivers/net/wireless/broadcom/b43/pio.c struct b43_pio_rxqueue *q; q 164 drivers/net/wireless/broadcom/b43/pio.c q = kzalloc(sizeof(*q), GFP_KERNEL); q 165 drivers/net/wireless/broadcom/b43/pio.c if (!q) q 167 drivers/net/wireless/broadcom/b43/pio.c q->dev = dev; q 168 drivers/net/wireless/broadcom/b43/pio.c q->rev = dev->dev->core_rev; q 169 drivers/net/wireless/broadcom/b43/pio.c q->mmio_base = index_to_pioqueue_base(dev, index) + q 175 drivers/net/wireless/broadcom/b43/pio.c return q; q 178 drivers/net/wireless/broadcom/b43/pio.c static void b43_pio_cancel_tx_packets(struct b43_pio_txqueue *q) q 183 drivers/net/wireless/broadcom/b43/pio.c for (i = 0; i < ARRAY_SIZE(q->packets); i++) { q 184 drivers/net/wireless/broadcom/b43/pio.c pack = &(q->packets[i]); q 186 drivers/net/wireless/broadcom/b43/pio.c ieee80211_free_txskb(q->dev->wl->hw, pack->skb); q 192 drivers/net/wireless/broadcom/b43/pio.c static void b43_destroy_pioqueue_tx(struct b43_pio_txqueue *q, q 195 drivers/net/wireless/broadcom/b43/pio.c if (!q) q 197 drivers/net/wireless/broadcom/b43/pio.c b43_pio_cancel_tx_packets(q); q 198 drivers/net/wireless/broadcom/b43/pio.c kfree(q); q 201 drivers/net/wireless/broadcom/b43/pio.c static void b43_destroy_pioqueue_rx(struct b43_pio_rxqueue *q, q 204 drivers/net/wireless/broadcom/b43/pio.c if (!q) q 206 drivers/net/wireless/broadcom/b43/pio.c kfree(q); q 290 drivers/net/wireless/broadcom/b43/pio.c struct b43_pio_txqueue *q; q 299 drivers/net/wireless/broadcom/b43/pio.c q = dev->pio.tx_queue_AC_VO; q 302 drivers/net/wireless/broadcom/b43/pio.c q = dev->pio.tx_queue_AC_VI; q 305 drivers/net/wireless/broadcom/b43/pio.c q = dev->pio.tx_queue_AC_BE; q 308 drivers/net/wireless/broadcom/b43/pio.c q = dev->pio.tx_queue_AC_BK; q 312 drivers/net/wireless/broadcom/b43/pio.c q = dev->pio.tx_queue_AC_BE; q 314 drivers/net/wireless/broadcom/b43/pio.c return q; q 317 drivers/net/wireless/broadcom/b43/pio.c static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q, q 322 drivers/net/wireless/broadcom/b43/pio.c struct b43_wldev *dev = q->dev; q 327 drivers/net/wireless/broadcom/b43/pio.c b43_piotx_write16(q, B43_PIO_TXCTL, ctl); q 330 drivers/net/wireless/broadcom/b43/pio.c q->mmio_base + B43_PIO_TXDATA, q 338 drivers/net/wireless/broadcom/b43/pio.c b43_piotx_write16(q, B43_PIO_TXCTL, ctl); q 342 drivers/net/wireless/broadcom/b43/pio.c q->mmio_base + B43_PIO_TXDATA, q 352 drivers/net/wireless/broadcom/b43/pio.c struct b43_pio_txqueue *q = pack->queue; q 357 drivers/net/wireless/broadcom/b43/pio.c ctl = b43_piotx_read16(q, B43_PIO_TXCTL); q 362 drivers/net/wireless/broadcom/b43/pio.c ctl = tx_write_2byte_queue(q, ctl, hdr, hdrlen); q 364 drivers/net/wireless/broadcom/b43/pio.c ctl = tx_write_2byte_queue(q, ctl, frame, frame_len); q 367 drivers/net/wireless/broadcom/b43/pio.c b43_piotx_write16(q, B43_PIO_TXCTL, ctl); q 370 drivers/net/wireless/broadcom/b43/pio.c static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q, q 375 drivers/net/wireless/broadcom/b43/pio.c struct b43_wldev *dev = q->dev; q 381 drivers/net/wireless/broadcom/b43/pio.c b43_piotx_write32(q, B43_PIO8_TXCTL, ctl); q 384 drivers/net/wireless/broadcom/b43/pio.c q->mmio_base + B43_PIO8_TXDATA, q 410 drivers/net/wireless/broadcom/b43/pio.c b43_piotx_write32(q, B43_PIO8_TXCTL, ctl); q 412 drivers/net/wireless/broadcom/b43/pio.c q->mmio_base + B43_PIO8_TXDATA, q 422 drivers/net/wireless/broadcom/b43/pio.c struct b43_pio_txqueue *q = pack->queue; q 427 drivers/net/wireless/broadcom/b43/pio.c ctl = b43_piotx_read32(q, B43_PIO8_TXCTL); q 432 drivers/net/wireless/broadcom/b43/pio.c ctl = tx_write_4byte_queue(q, ctl, hdr, hdrlen); q 434 drivers/net/wireless/broadcom/b43/pio.c ctl = tx_write_4byte_queue(q, ctl, frame, frame_len); q 437 drivers/net/wireless/broadcom/b43/pio.c b43_piotx_write32(q, B43_PIO_TXCTL, ctl); q 440 drivers/net/wireless/broadcom/b43/pio.c static int pio_tx_frame(struct b43_pio_txqueue *q, q 443 drivers/net/wireless/broadcom/b43/pio.c struct b43_wldev *dev = q->dev; q 452 drivers/net/wireless/broadcom/b43/pio.c B43_WARN_ON(list_empty(&q->packets_list)); q 453 drivers/net/wireless/broadcom/b43/pio.c pack = list_entry(q->packets_list.next, q 456 drivers/net/wireless/broadcom/b43/pio.c cookie = generate_cookie(q, pack); q 473 drivers/net/wireless/broadcom/b43/pio.c if (q->rev >= 8) q 483 drivers/net/wireless/broadcom/b43/pio.c q->buffer_used += roundup(skb->len + hdrlen, 4); q 484 drivers/net/wireless/broadcom/b43/pio.c q->free_packet_slots -= 1; q 491 drivers/net/wireless/broadcom/b43/pio.c struct b43_pio_txqueue *q; q 501 drivers/net/wireless/broadcom/b43/pio.c q = dev->pio.tx_queue_mcast; q 507 drivers/net/wireless/broadcom/b43/pio.c q = select_queue_by_priority(dev, skb_get_queue_mapping(skb)); q 513 drivers/net/wireless/broadcom/b43/pio.c if (unlikely(total_len > q->buffer_size)) { q 518 drivers/net/wireless/broadcom/b43/pio.c if (unlikely(q->free_packet_slots == 0)) { q 523 drivers/net/wireless/broadcom/b43/pio.c B43_WARN_ON(q->buffer_used > q->buffer_size); q 525 drivers/net/wireless/broadcom/b43/pio.c if (total_len > (q->buffer_size - q->buffer_used)) { q 529 drivers/net/wireless/broadcom/b43/pio.c q->stopped = true; q 536 drivers/net/wireless/broadcom/b43/pio.c q->queue_prio = skb_get_queue_mapping(skb); q 538 drivers/net/wireless/broadcom/b43/pio.c err = pio_tx_frame(q, skb); q 551 drivers/net/wireless/broadcom/b43/pio.c B43_WARN_ON(q->buffer_used > q->buffer_size); q 552 drivers/net/wireless/broadcom/b43/pio.c if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) || q 553 drivers/net/wireless/broadcom/b43/pio.c (q->free_packet_slots == 0)) { q 556 drivers/net/wireless/broadcom/b43/pio.c q->stopped = true; q 566 drivers/net/wireless/broadcom/b43/pio.c struct b43_pio_txqueue *q; q 571 drivers/net/wireless/broadcom/b43/pio.c q = parse_cookie(dev, status->cookie, &pack); q 572 drivers/net/wireless/broadcom/b43/pio.c if (unlikely(!q)) q 582 drivers/net/wireless/broadcom/b43/pio.c q->buffer_used -= total_len; q 583 drivers/net/wireless/broadcom/b43/pio.c q->free_packet_slots += 1; q 587 drivers/net/wireless/broadcom/b43/pio.c list_add(&pack->list, &q->packets_list); q 589 drivers/net/wireless/broadcom/b43/pio.c if (q->stopped) { q 590 drivers/net/wireless/broadcom/b43/pio.c ieee80211_wake_queue(dev->wl->hw, q->queue_prio); q 591 drivers/net/wireless/broadcom/b43/pio.c q->stopped = false; q 596 drivers/net/wireless/broadcom/b43/pio.c static bool pio_rx_frame(struct b43_pio_rxqueue *q) q 598 drivers/net/wireless/broadcom/b43/pio.c struct b43_wldev *dev = q->dev; q 622 drivers/net/wireless/broadcom/b43/pio.c if (q->rev >= 8) { q 625 drivers/net/wireless/broadcom/b43/pio.c ctl = b43_piorx_read32(q, B43_PIO8_RXCTL); q 628 drivers/net/wireless/broadcom/b43/pio.c b43_piorx_write32(q, B43_PIO8_RXCTL, q 631 drivers/net/wireless/broadcom/b43/pio.c ctl = b43_piorx_read32(q, B43_PIO8_RXCTL); q 639 drivers/net/wireless/broadcom/b43/pio.c ctl = b43_piorx_read16(q, B43_PIO_RXCTL); q 642 drivers/net/wireless/broadcom/b43/pio.c b43_piorx_write16(q, B43_PIO_RXCTL, q 645 drivers/net/wireless/broadcom/b43/pio.c ctl = b43_piorx_read16(q, B43_PIO_RXCTL); q 651 drivers/net/wireless/broadcom/b43/pio.c b43dbg(q->dev->wl, "PIO RX timed out\n"); q 656 drivers/net/wireless/broadcom/b43/pio.c if (q->rev >= 8) { q 658 drivers/net/wireless/broadcom/b43/pio.c q->mmio_base + B43_PIO8_RXDATA, q 662 drivers/net/wireless/broadcom/b43/pio.c q->mmio_base + B43_PIO_RXDATA, q 686 drivers/net/wireless/broadcom/b43/pio.c if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) { q 704 drivers/net/wireless/broadcom/b43/pio.c if (q->rev >= 8) { q 706 drivers/net/wireless/broadcom/b43/pio.c q->mmio_base + B43_PIO8_RXDATA, q 714 drivers/net/wireless/broadcom/b43/pio.c q->mmio_base + B43_PIO8_RXDATA, q 733 drivers/net/wireless/broadcom/b43/pio.c q->mmio_base + B43_PIO_RXDATA, q 741 drivers/net/wireless/broadcom/b43/pio.c q->mmio_base + B43_PIO_RXDATA, q 747 drivers/net/wireless/broadcom/b43/pio.c b43_rx(q->dev, skb, rxhdr); q 753 drivers/net/wireless/broadcom/b43/pio.c b43dbg(q->dev->wl, "PIO RX error: %s\n", err_msg); q 754 drivers/net/wireless/broadcom/b43/pio.c if (q->rev >= 8) q 755 drivers/net/wireless/broadcom/b43/pio.c b43_piorx_write32(q, B43_PIO8_RXCTL, B43_PIO8_RXCTL_DATARDY); q 757 drivers/net/wireless/broadcom/b43/pio.c b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_DATARDY); q 762 drivers/net/wireless/broadcom/b43/pio.c void b43_pio_rx(struct b43_pio_rxqueue *q) q 768 drivers/net/wireless/broadcom/b43/pio.c stop = (pio_rx_frame(q) == 0); q 777 drivers/net/wireless/broadcom/b43/pio.c static void b43_pio_tx_suspend_queue(struct b43_pio_txqueue *q) q 779 drivers/net/wireless/broadcom/b43/pio.c if (q->rev >= 8) { q 780 drivers/net/wireless/broadcom/b43/pio.c b43_piotx_write32(q, B43_PIO8_TXCTL, q 781 drivers/net/wireless/broadcom/b43/pio.c b43_piotx_read32(q, B43_PIO8_TXCTL) q 784 drivers/net/wireless/broadcom/b43/pio.c b43_piotx_write16(q, B43_PIO_TXCTL, q 785 drivers/net/wireless/broadcom/b43/pio.c b43_piotx_read16(q, B43_PIO_TXCTL) q 790 drivers/net/wireless/broadcom/b43/pio.c static void b43_pio_tx_resume_queue(struct b43_pio_txqueue *q) q 792 drivers/net/wireless/broadcom/b43/pio.c if (q->rev >= 8) { q 793 drivers/net/wireless/broadcom/b43/pio.c b43_piotx_write32(q, B43_PIO8_TXCTL, q 794 drivers/net/wireless/broadcom/b43/pio.c b43_piotx_read32(q, B43_PIO8_TXCTL) q 797 drivers/net/wireless/broadcom/b43/pio.c b43_piotx_write16(q, B43_PIO_TXCTL, q 798 drivers/net/wireless/broadcom/b43/pio.c b43_piotx_read16(q, B43_PIO_TXCTL) q 109 drivers/net/wireless/broadcom/b43/pio.h static inline u16 b43_piotx_read16(struct b43_pio_txqueue *q, u16 offset) q 111 drivers/net/wireless/broadcom/b43/pio.h return b43_read16(q->dev, q->mmio_base + offset); q 114 drivers/net/wireless/broadcom/b43/pio.h static inline u32 b43_piotx_read32(struct b43_pio_txqueue *q, u16 offset) q 116 drivers/net/wireless/broadcom/b43/pio.h return b43_read32(q->dev, q->mmio_base + offset); q 119 drivers/net/wireless/broadcom/b43/pio.h static inline void b43_piotx_write16(struct b43_pio_txqueue *q, q 122 drivers/net/wireless/broadcom/b43/pio.h b43_write16(q->dev, q->mmio_base + offset, value); q 125 drivers/net/wireless/broadcom/b43/pio.h static inline void b43_piotx_write32(struct b43_pio_txqueue *q, q 128 drivers/net/wireless/broadcom/b43/pio.h b43_write32(q->dev, q->mmio_base + offset, value); q 132 drivers/net/wireless/broadcom/b43/pio.h static inline u16 b43_piorx_read16(struct b43_pio_rxqueue *q, u16 offset) q 134 drivers/net/wireless/broadcom/b43/pio.h return b43_read16(q->dev, q->mmio_base + offset); q 137 drivers/net/wireless/broadcom/b43/pio.h static inline u32 b43_piorx_read32(struct b43_pio_rxqueue *q, u16 offset) q 139 drivers/net/wireless/broadcom/b43/pio.h return b43_read32(q->dev, q->mmio_base + offset); q 142 drivers/net/wireless/broadcom/b43/pio.h static inline void b43_piorx_write16(struct b43_pio_rxqueue *q, q 145 drivers/net/wireless/broadcom/b43/pio.h b43_write16(q->dev, q->mmio_base + offset, value); q 148 drivers/net/wireless/broadcom/b43/pio.h static inline void b43_piorx_write32(struct b43_pio_rxqueue *q, q 151 drivers/net/wireless/broadcom/b43/pio.h b43_write32(q->dev, q->mmio_base + offset, value); q 161 drivers/net/wireless/broadcom/b43/pio.h void b43_pio_rx(struct b43_pio_rxqueue *q); q 39 drivers/net/wireless/broadcom/b43/sdio.c const struct b43_sdio_quirk *q; q 41 drivers/net/wireless/broadcom/b43/sdio.c for (q = b43_sdio_quirks; q->quirks; q++) { q 42 drivers/net/wireless/broadcom/b43/sdio.c if (vendor == q->vendor && device == q->device) q 43 drivers/net/wireless/broadcom/b43/sdio.c return q->quirks; q 1947 drivers/net/wireless/broadcom/b43legacy/phy.c s32 q; q 1956 drivers/net/wireless/broadcom/b43legacy/phy.c q = b43legacy_tssi2dbm_ad(f * 4096 - q 1959 drivers/net/wireless/broadcom/b43legacy/phy.c delta = abs(q - f); q 1960 drivers/net/wireless/broadcom/b43legacy/phy.c f = q; q 29 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c struct list_head q; q 89 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c list_add_tail(&event->q, &fweh->event_q); q 200 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c struct brcmf_fweh_queue_item, q); q 201 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c list_del(&event->q); q 643 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q, q 653 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c for (prec = 0; prec < q->num_prec; prec++) { q 654 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx); q 660 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx); q 1280 drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c queue = &pq->q[prec].skblist; q 2715 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c static bool brcmf_sdio_prec_enq(struct pktq *q, struct sk_buff *pkt, int prec) q 2723 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c if (!pktq_pfull(q, prec) && !pktq_full(q)) { q 2724 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c brcmu_pktq_penq(q, prec, pkt); q 2729 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c if (pktq_pfull(q, prec)) { q 2731 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c } else if (pktq_full(q)) { q 2732 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c p = brcmu_pktq_peek_tail(q, &eprec); q 2743 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c p = brcmu_pktq_pdeq_tail(q, eprec); q 2750 drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c p = brcmu_pktq_penq(q, prec, pkt); q 373 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c struct list_head *q, int *counter) q 378 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c if (list_empty(q)) { q 382 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c req = list_entry(q->next, struct brcmf_usbreq, list); q 383 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c list_del_init(q->next); q 392 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c struct list_head *q, struct brcmf_usbreq *req, q 397 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c list_add_tail(&req->list, q); q 404 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c brcmf_usbdev_qinit(struct list_head *q, int qsize) q 421 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c list_add_tail(&req->list, q); q 427 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c while (!list_empty(q)) { q 428 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c req = list_entry(q->next, struct brcmf_usbreq, list); q 431 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c list_del(q->next); q 438 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c static void brcmf_usb_free_q(struct list_head *q) q 442 drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c list_for_each_entry_safe(req, next, q, list) { q 234 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h int q; q 356 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h s8 q; q 869 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h s32 q; q 3437 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c q_samp = (u16)(CORDIC_FLOAT(tone_samp.q * max_val) & 0x3ff); q 23033 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c (((unsigned int)tone_buf[t].q) & 0x3ff); q 23081 drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c tone_buf[t].q = (s32)CORDIC_FLOAT(tone_buf[t].q * max_val); q 50 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c struct sk_buff_head *q; q 55 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c q = &pq->q[prec].skblist; q 56 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c skb_queue_tail(q, p); q 69 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c struct sk_buff_head *q; q 74 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c q = &pq->q[prec].skblist; q 75 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c skb_queue_head(q, p); q 87 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c struct sk_buff_head *q; q 90 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c q = &pq->q[prec].skblist; q 91 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c p = skb_dequeue(q); q 110 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c struct sk_buff_head *q; q 113 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c q = &pq->q[prec].skblist; q 114 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c skb_queue_walk_safe(q, p, next) { q 116 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c skb_unlink(p, q); q 127 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c struct sk_buff_head *q; q 130 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c q = &pq->q[prec].skblist; q 131 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c p = skb_dequeue_tail(q); q 144 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c struct sk_buff_head *q; q 147 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c q = &pq->q[prec].skblist; q 148 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c skb_queue_walk_safe(q, p, next) { q 150 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c skb_unlink(p, q); q 173 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c offsetof(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec)); q 180 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c pq->q[prec].max = pq->max; q 181 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c skb_queue_head_init(&pq->q[prec].skblist); q 194 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c if (!skb_queue_empty(&pq->q[prec].skblist)) q 200 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c return skb_peek_tail(&pq->q[prec].skblist); q 213 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c len += pq->q[prec].skblist.qlen; q 223 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c struct sk_buff_head *q; q 231 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c skb_queue_empty(&pq->q[prec].skblist)) q 235 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c skb_queue_empty(&pq->q[prec].skblist)) q 239 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c q = &pq->q[prec].skblist; q 240 drivers/net/wireless/broadcom/brcm80211/brcmutil/utils.c p = skb_dequeue(q); q 71 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h struct pktq_prec q[PKTQ_MAX_PREC]; q 78 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h return pq->q[prec].skblist.qlen; q 83 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h return pq->q[prec].max - pq->q[prec].skblist.qlen; q 88 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h return pq->q[prec].skblist.qlen >= pq->q[prec].max; q 93 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h return skb_queue_empty(&pq->q[prec].skblist); q 98 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h return skb_peek(&pq->q[prec].skblist); q 103 drivers/net/wireless/broadcom/brcm80211/include/brcmu_utils.h return skb_peek_tail(&pq->q[prec].skblist); q 4320 drivers/net/wireless/intel/ipw2x00/ipw2100.c struct ipw2100_status_queue *q = &priv->status_queue; q 4324 drivers/net/wireless/intel/ipw2x00/ipw2100.c q->size = entries * sizeof(struct ipw2100_status); q 4325 drivers/net/wireless/intel/ipw2x00/ipw2100.c q->drv = pci_zalloc_consistent(priv->pci_dev, q->size, &q->nic); q 4326 drivers/net/wireless/intel/ipw2x00/ipw2100.c if (!q->drv) { q 4351 drivers/net/wireless/intel/ipw2x00/ipw2100.c struct ipw2100_bd_queue *q, int entries) q 4355 drivers/net/wireless/intel/ipw2x00/ipw2100.c memset(q, 0, sizeof(struct ipw2100_bd_queue)); q 4357 drivers/net/wireless/intel/ipw2x00/ipw2100.c q->entries = entries; q 4358 drivers/net/wireless/intel/ipw2x00/ipw2100.c q->size = entries * sizeof(struct ipw2100_bd); q 4359 drivers/net/wireless/intel/ipw2x00/ipw2100.c q->drv = pci_zalloc_consistent(priv->pci_dev, q->size, &q->nic); q 4360 drivers/net/wireless/intel/ipw2x00/ipw2100.c if (!q->drv) { q 4371 drivers/net/wireless/intel/ipw2x00/ipw2100.c static void bd_queue_free(struct ipw2100_priv *priv, struct ipw2100_bd_queue *q) q 4375 drivers/net/wireless/intel/ipw2x00/ipw2100.c if (!q) q 4378 drivers/net/wireless/intel/ipw2x00/ipw2100.c if (q->drv) { q 4379 drivers/net/wireless/intel/ipw2x00/ipw2100.c pci_free_consistent(priv->pci_dev, q->size, q->drv, q->nic); q 4380 drivers/net/wireless/intel/ipw2x00/ipw2100.c q->drv = NULL; q 4387 drivers/net/wireless/intel/ipw2x00/ipw2100.c struct ipw2100_bd_queue *q, u32 base, u32 size, q 4392 drivers/net/wireless/intel/ipw2x00/ipw2100.c IPW_DEBUG_INFO("initializing bd queue at virt=%p, phys=%08x\n", q->drv, q 4393 drivers/net/wireless/intel/ipw2x00/ipw2100.c (u32) q->nic); q 4395 drivers/net/wireless/intel/ipw2x00/ipw2100.c write_register(priv->net_dev, base, q->nic); q 4396 drivers/net/wireless/intel/ipw2x00/ipw2100.c write_register(priv->net_dev, size, q->entries); q 4397 drivers/net/wireless/intel/ipw2x00/ipw2100.c write_register(priv->net_dev, r, q->oldest); q 4398 drivers/net/wireless/intel/ipw2x00/ipw2100.c write_register(priv->net_dev, w, q->next); q 3699 drivers/net/wireless/intel/ipw2x00/ipw2200.c static int ipw_rx_queue_space(const struct ipw_rx_queue *q) q 3701 drivers/net/wireless/intel/ipw2x00/ipw2200.c int s = q->read - q->write; q 3711 drivers/net/wireless/intel/ipw2x00/ipw2200.c static inline int ipw_tx_queue_space(const struct clx2_queue *q) q 3713 drivers/net/wireless/intel/ipw2x00/ipw2200.c int s = q->last_used - q->first_empty; q 3715 drivers/net/wireless/intel/ipw2x00/ipw2200.c s += q->n_bd; q 3741 drivers/net/wireless/intel/ipw2x00/ipw2200.c static void ipw_queue_init(struct ipw_priv *priv, struct clx2_queue *q, q 3744 drivers/net/wireless/intel/ipw2x00/ipw2200.c q->n_bd = count; q 3746 drivers/net/wireless/intel/ipw2x00/ipw2200.c q->low_mark = q->n_bd / 4; q 3747 drivers/net/wireless/intel/ipw2x00/ipw2200.c if (q->low_mark < 4) q 3748 drivers/net/wireless/intel/ipw2x00/ipw2200.c q->low_mark = 4; q 3750 drivers/net/wireless/intel/ipw2x00/ipw2200.c q->high_mark = q->n_bd / 8; q 3751 drivers/net/wireless/intel/ipw2x00/ipw2200.c if (q->high_mark < 2) q 3752 drivers/net/wireless/intel/ipw2x00/ipw2200.c q->high_mark = 2; q 3754 drivers/net/wireless/intel/ipw2x00/ipw2200.c q->first_empty = q->last_used = 0; q 3755 drivers/net/wireless/intel/ipw2x00/ipw2200.c q->reg_r = read; q 3756 drivers/net/wireless/intel/ipw2x00/ipw2200.c q->reg_w = write; q 3758 drivers/net/wireless/intel/ipw2x00/ipw2200.c ipw_write32(priv, base, q->dma_addr); q 3767 drivers/net/wireless/intel/ipw2x00/ipw2200.c struct clx2_tx_queue *q, q 3772 drivers/net/wireless/intel/ipw2x00/ipw2200.c q->txb = kmalloc_array(count, sizeof(q->txb[0]), GFP_KERNEL); q 3773 drivers/net/wireless/intel/ipw2x00/ipw2200.c if (!q->txb) { q 3778 drivers/net/wireless/intel/ipw2x00/ipw2200.c q->bd = q 3779 drivers/net/wireless/intel/ipw2x00/ipw2200.c pci_alloc_consistent(dev, sizeof(q->bd[0]) * count, &q->q.dma_addr); q 3780 drivers/net/wireless/intel/ipw2x00/ipw2200.c if (!q->bd) { q 3782 drivers/net/wireless/intel/ipw2x00/ipw2200.c sizeof(q->bd[0]) * count); q 3783 drivers/net/wireless/intel/ipw2x00/ipw2200.c kfree(q->txb); q 3784 drivers/net/wireless/intel/ipw2x00/ipw2200.c q->txb = NULL; q 3788 drivers/net/wireless/intel/ipw2x00/ipw2200.c ipw_queue_init(priv, &q->q, count, read, write, base, size); q 3802 drivers/net/wireless/intel/ipw2x00/ipw2200.c struct tfd_frame *bd = &txq->bd[txq->q.last_used]; q 3824 drivers/net/wireless/intel/ipw2x00/ipw2200.c if (txq->txb[txq->q.last_used]) { q 3825 drivers/net/wireless/intel/ipw2x00/ipw2200.c libipw_txb_free(txq->txb[txq->q.last_used]); q 3826 drivers/net/wireless/intel/ipw2x00/ipw2200.c txq->txb[txq->q.last_used] = NULL; q 3842 drivers/net/wireless/intel/ipw2x00/ipw2200.c struct clx2_queue *q = &txq->q; q 3845 drivers/net/wireless/intel/ipw2x00/ipw2200.c if (q->n_bd == 0) q 3849 drivers/net/wireless/intel/ipw2x00/ipw2200.c for (; q->first_empty != q->last_used; q 3850 drivers/net/wireless/intel/ipw2x00/ipw2200.c q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) { q 3855 drivers/net/wireless/intel/ipw2x00/ipw2200.c pci_free_consistent(dev, sizeof(txq->bd[0]) * q->n_bd, txq->bd, q 3856 drivers/net/wireless/intel/ipw2x00/ipw2200.c q->dma_addr); q 5011 drivers/net/wireless/intel/ipw2x00/ipw2200.c struct clx2_queue *q = &txq->q; q 5013 drivers/net/wireless/intel/ipw2x00/ipw2200.c hw_tail = ipw_read32(priv, q->reg_r); q 5014 drivers/net/wireless/intel/ipw2x00/ipw2200.c if (hw_tail >= q->n_bd) { q 5017 drivers/net/wireless/intel/ipw2x00/ipw2200.c hw_tail, q->n_bd); q 5020 drivers/net/wireless/intel/ipw2x00/ipw2200.c for (; q->last_used != hw_tail; q 5021 drivers/net/wireless/intel/ipw2x00/ipw2200.c q->last_used = ipw_queue_inc_wrap(q->last_used, q->n_bd)) { q 5026 drivers/net/wireless/intel/ipw2x00/ipw2200.c if ((ipw_tx_queue_space(q) > q->low_mark) && q 5029 drivers/net/wireless/intel/ipw2x00/ipw2200.c used = q->first_empty - q->last_used; q 5031 drivers/net/wireless/intel/ipw2x00/ipw2200.c used += q->n_bd; q 5040 drivers/net/wireless/intel/ipw2x00/ipw2200.c struct clx2_queue *q = &txq->q; q 5043 drivers/net/wireless/intel/ipw2x00/ipw2200.c if (ipw_tx_queue_space(q) < (sync ? 1 : 2)) { q 5048 drivers/net/wireless/intel/ipw2x00/ipw2200.c tfd = &txq->bd[q->first_empty]; q 5049 drivers/net/wireless/intel/ipw2x00/ipw2200.c txq->txb[q->first_empty] = NULL; q 5058 drivers/net/wireless/intel/ipw2x00/ipw2200.c q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd); q 5059 drivers/net/wireless/intel/ipw2x00/ipw2200.c ipw_write32(priv, q->reg_w, q->first_empty); q 10105 drivers/net/wireless/intel/ipw2x00/ipw2200.c struct clx2_queue *q = &txq->q; q 10135 drivers/net/wireless/intel/ipw2x00/ipw2200.c tfd = &txq->bd[q->first_empty]; q 10136 drivers/net/wireless/intel/ipw2x00/ipw2200.c txq->txb[q->first_empty] = txb; q 10272 drivers/net/wireless/intel/ipw2x00/ipw2200.c q->first_empty = ipw_queue_inc_wrap(q->first_empty, q->n_bd); q 10273 drivers/net/wireless/intel/ipw2x00/ipw2200.c ipw_write32(priv, q->reg_w, q->first_empty); q 10275 drivers/net/wireless/intel/ipw2x00/ipw2200.c if (ipw_tx_queue_space(q) < q->high_mark) q 10296 drivers/net/wireless/intel/ipw2x00/ipw2200.c if (ipw_tx_queue_space(&txq->q) < txq->q.high_mark) q 11778 drivers/net/wireless/intel/ipw2x00/ipw2200.c struct list_head *p, *q; q 11832 drivers/net/wireless/intel/ipw2x00/ipw2200.c list_for_each_safe(p, q, &priv->ibss_mac_hash[i]) { q 519 drivers/net/wireless/intel/ipw2x00/ipw2200.h struct clx2_queue q; q 455 drivers/net/wireless/intel/iwlegacy/3945-mac.c struct il_queue *q = NULL; q 514 drivers/net/wireless/intel/iwlegacy/3945-mac.c q = &txq->q; q 516 drivers/net/wireless/intel/iwlegacy/3945-mac.c if ((il_queue_space(q) < q->high_mark)) q 521 drivers/net/wireless/intel/iwlegacy/3945-mac.c idx = il_get_cmd_idx(q, q->write_ptr, 0); q 523 drivers/net/wireless/intel/iwlegacy/3945-mac.c txq->skbs[q->write_ptr] = skb; q 541 drivers/net/wireless/intel/iwlegacy/3945-mac.c (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr))); q 618 drivers/net/wireless/intel/iwlegacy/3945-mac.c q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd); q 622 drivers/net/wireless/intel/iwlegacy/3945-mac.c if (il_queue_space(q) < q->high_mark && il->mac80211_registered) { q 275 drivers/net/wireless/intel/iwlegacy/3945.c struct il_queue *q = &txq->q; q 280 drivers/net/wireless/intel/iwlegacy/3945.c for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; q 281 drivers/net/wireless/intel/iwlegacy/3945.c q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) { q 283 drivers/net/wireless/intel/iwlegacy/3945.c skb = txq->skbs[txq->q.read_ptr]; q 285 drivers/net/wireless/intel/iwlegacy/3945.c txq->skbs[txq->q.read_ptr] = NULL; q 289 drivers/net/wireless/intel/iwlegacy/3945.c if (il_queue_space(q) > q->low_mark && txq_id >= 0 && q 311 drivers/net/wireless/intel/iwlegacy/3945.c if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) { q 314 drivers/net/wireless/intel/iwlegacy/3945.c txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr); q 332 drivers/net/wireless/intel/iwlegacy/3945.c info = IEEE80211_SKB_CB(txq->skbs[txq->q.read_ptr]); q 601 drivers/net/wireless/intel/iwlegacy/3945.c struct il_queue *q; q 604 drivers/net/wireless/intel/iwlegacy/3945.c q = &txq->q; q 606 drivers/net/wireless/intel/iwlegacy/3945.c tfd = &tfd_tmp[q->write_ptr]; q 639 drivers/net/wireless/intel/iwlegacy/3945.c int idx = txq->q.read_ptr; q 668 drivers/net/wireless/intel/iwlegacy/3945.c struct sk_buff *skb = txq->skbs[txq->q.read_ptr]; q 673 drivers/net/wireless/intel/iwlegacy/3945.c txq->skbs[txq->q.read_ptr] = NULL; q 2199 drivers/net/wireless/intel/iwlegacy/3945.c int txq_id = txq->q.id; q 2203 drivers/net/wireless/intel/iwlegacy/3945.c shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32) txq->q.dma_addr); q 1649 drivers/net/wireless/intel/iwlegacy/4965-mac.c struct il_queue *q; q 1750 drivers/net/wireless/intel/iwlegacy/4965-mac.c q = &txq->q; q 1752 drivers/net/wireless/intel/iwlegacy/4965-mac.c if (unlikely(il_queue_space(q) < q->high_mark)) { q 1765 drivers/net/wireless/intel/iwlegacy/4965-mac.c txq->skbs[q->write_ptr] = skb; q 1768 drivers/net/wireless/intel/iwlegacy/4965-mac.c out_cmd = txq->cmd[q->write_ptr]; q 1769 drivers/net/wireless/intel/iwlegacy/4965-mac.c out_meta = &txq->meta[q->write_ptr]; q 1783 drivers/net/wireless/intel/iwlegacy/4965-mac.c (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr))); q 1875 drivers/net/wireless/intel/iwlegacy/4965-mac.c q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd); q 1896 drivers/net/wireless/intel/iwlegacy/4965-mac.c if (il_queue_space(q) < q->high_mark && il->mac80211_registered) { q 2187 drivers/net/wireless/intel/iwlegacy/4965-mac.c il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); q 2188 drivers/net/wireless/intel/iwlegacy/4965-mac.c il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); q 2299 drivers/net/wireless/intel/iwlegacy/4965-mac.c il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); q 2300 drivers/net/wireless/intel/iwlegacy/4965-mac.c il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); q 2354 drivers/net/wireless/intel/iwlegacy/4965-mac.c write_ptr = il->txq[txq_id].q.write_ptr; q 2355 drivers/net/wireless/intel/iwlegacy/4965-mac.c read_ptr = il->txq[txq_id].q.read_ptr; q 2392 drivers/net/wireless/intel/iwlegacy/4965-mac.c struct il_queue *q = &il->txq[txq_id].q; q 2403 drivers/net/wireless/intel/iwlegacy/4965-mac.c q->read_ptr == q->write_ptr) { q 2458 drivers/net/wireless/intel/iwlegacy/4965-mac.c struct il_queue *q = &txq->q; q 2463 drivers/net/wireless/intel/iwlegacy/4965-mac.c if (idx >= q->n_bd || il_queue_used(q, idx) == 0) { q 2465 drivers/net/wireless/intel/iwlegacy/4965-mac.c "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd, q 2466 drivers/net/wireless/intel/iwlegacy/4965-mac.c q->write_ptr, q->read_ptr); q 2470 drivers/net/wireless/intel/iwlegacy/4965-mac.c for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; q 2471 drivers/net/wireless/intel/iwlegacy/4965-mac.c q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) { q 2473 drivers/net/wireless/intel/iwlegacy/4965-mac.c skb = txq->skbs[txq->q.read_ptr]; q 2484 drivers/net/wireless/intel/iwlegacy/4965-mac.c txq->skbs[txq->q.read_ptr] = NULL; q 2777 drivers/net/wireless/intel/iwlegacy/4965-mac.c if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) { q 2780 drivers/net/wireless/intel/iwlegacy/4965-mac.c txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr); q 2786 drivers/net/wireless/intel/iwlegacy/4965-mac.c skb = txq->skbs[txq->q.read_ptr]; q 2830 drivers/net/wireless/intel/iwlegacy/4965-mac.c if (txq->q.read_ptr != (scd_ssn & 0xff)) { q 2831 drivers/net/wireless/intel/iwlegacy/4965-mac.c idx = il_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); q 2840 drivers/net/wireless/intel/iwlegacy/4965-mac.c il_queue_space(&txq->q) > txq->q.low_mark && q 2864 drivers/net/wireless/intel/iwlegacy/4965-mac.c il_queue_space(&txq->q) > txq->q.low_mark) q 2946 drivers/net/wireless/intel/iwlegacy/4965-mac.c idx = il_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd); q 2966 drivers/net/wireless/intel/iwlegacy/4965-mac.c if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) { q 2971 drivers/net/wireless/intel/iwlegacy/4965-mac.c if (il_queue_space(&txq->q) > txq->q.low_mark && q 3920 drivers/net/wireless/intel/iwlegacy/4965-mac.c int idx = txq->q.read_ptr; q 3949 drivers/net/wireless/intel/iwlegacy/4965-mac.c struct sk_buff *skb = txq->skbs[txq->q.read_ptr]; q 3954 drivers/net/wireless/intel/iwlegacy/4965-mac.c txq->skbs[txq->q.read_ptr] = NULL; q 3963 drivers/net/wireless/intel/iwlegacy/4965-mac.c struct il_queue *q; q 3967 drivers/net/wireless/intel/iwlegacy/4965-mac.c q = &txq->q; q 3969 drivers/net/wireless/intel/iwlegacy/4965-mac.c tfd = &tfd_tmp[q->write_ptr]; q 4002 drivers/net/wireless/intel/iwlegacy/4965-mac.c int txq_id = txq->q.id; q 4005 drivers/net/wireless/intel/iwlegacy/4965-mac.c il_wr(il, FH49_MEM_CBBC_QUEUE(txq_id), txq->q.dma_addr >> 8); q 6294 drivers/net/wireless/intel/iwlegacy/4965-mac.c int txq_id = txq->q.id; q 1539 drivers/net/wireless/intel/iwlegacy/4965.c int txq_id = txq->q.id; q 1540 drivers/net/wireless/intel/iwlegacy/4965.c int write_ptr = txq->q.write_ptr; q 166 drivers/net/wireless/intel/iwlegacy/commands.h #define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8) q 2538 drivers/net/wireless/intel/iwlegacy/common.c il_rx_queue_space(const struct il_rx_queue *q) q 2540 drivers/net/wireless/intel/iwlegacy/common.c int s = q->read - q->write; q 2555 drivers/net/wireless/intel/iwlegacy/common.c il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q) q 2561 drivers/net/wireless/intel/iwlegacy/common.c spin_lock_irqsave(&q->lock, flags); q 2563 drivers/net/wireless/intel/iwlegacy/common.c if (q->need_update == 0) q 2578 drivers/net/wireless/intel/iwlegacy/common.c q->write_actual = (q->write & ~0x7); q 2579 drivers/net/wireless/intel/iwlegacy/common.c il_wr(il, rx_wrt_ptr_reg, q->write_actual); q 2584 drivers/net/wireless/intel/iwlegacy/common.c q->write_actual = (q->write & ~0x7); q 2585 drivers/net/wireless/intel/iwlegacy/common.c il_wr(il, rx_wrt_ptr_reg, q->write_actual); q 2588 drivers/net/wireless/intel/iwlegacy/common.c q->need_update = 0; q 2591 drivers/net/wireless/intel/iwlegacy/common.c spin_unlock_irqrestore(&q->lock, flags); q 2713 drivers/net/wireless/intel/iwlegacy/common.c int txq_id = txq->q.id; q 2733 drivers/net/wireless/intel/iwlegacy/common.c il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); q 2741 drivers/net/wireless/intel/iwlegacy/common.c _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); q 2753 drivers/net/wireless/intel/iwlegacy/common.c struct il_queue *q = &txq->q; q 2755 drivers/net/wireless/intel/iwlegacy/common.c if (q->n_bd == 0) q 2758 drivers/net/wireless/intel/iwlegacy/common.c while (q->write_ptr != q->read_ptr) { q 2760 drivers/net/wireless/intel/iwlegacy/common.c q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd); q 2789 drivers/net/wireless/intel/iwlegacy/common.c if (txq->q.n_bd) q 2790 drivers/net/wireless/intel/iwlegacy/common.c dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd, q 2791 drivers/net/wireless/intel/iwlegacy/common.c txq->tfds, txq->q.dma_addr); q 2815 drivers/net/wireless/intel/iwlegacy/common.c struct il_queue *q = &txq->q; q 2818 drivers/net/wireless/intel/iwlegacy/common.c if (q->n_bd == 0) q 2821 drivers/net/wireless/intel/iwlegacy/common.c while (q->read_ptr != q->write_ptr) { q 2822 drivers/net/wireless/intel/iwlegacy/common.c i = il_get_cmd_idx(q, q->read_ptr, 0); q 2832 drivers/net/wireless/intel/iwlegacy/common.c q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd); q 2835 drivers/net/wireless/intel/iwlegacy/common.c i = q->n_win; q 2870 drivers/net/wireless/intel/iwlegacy/common.c if (txq->q.n_bd) q 2871 drivers/net/wireless/intel/iwlegacy/common.c dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd, q 2872 drivers/net/wireless/intel/iwlegacy/common.c txq->tfds, txq->q.dma_addr); q 2909 drivers/net/wireless/intel/iwlegacy/common.c il_queue_space(const struct il_queue *q) q 2911 drivers/net/wireless/intel/iwlegacy/common.c int s = q->read_ptr - q->write_ptr; q 2913 drivers/net/wireless/intel/iwlegacy/common.c if (q->read_ptr > q->write_ptr) q 2914 drivers/net/wireless/intel/iwlegacy/common.c s -= q->n_bd; q 2917 drivers/net/wireless/intel/iwlegacy/common.c s += q->n_win; q 2931 drivers/net/wireless/intel/iwlegacy/common.c il_queue_init(struct il_priv *il, struct il_queue *q, int slots, u32 id) q 2939 drivers/net/wireless/intel/iwlegacy/common.c q->n_bd = TFD_QUEUE_SIZE_MAX; q 2941 drivers/net/wireless/intel/iwlegacy/common.c q->n_win = slots; q 2942 drivers/net/wireless/intel/iwlegacy/common.c q->id = id; q 2948 drivers/net/wireless/intel/iwlegacy/common.c q->low_mark = q->n_win / 4; q 2949 drivers/net/wireless/intel/iwlegacy/common.c if (q->low_mark < 4) q 2950 drivers/net/wireless/intel/iwlegacy/common.c q->low_mark = 4; q 2952 drivers/net/wireless/intel/iwlegacy/common.c q->high_mark = q->n_win / 8; q 2953 drivers/net/wireless/intel/iwlegacy/common.c if (q->high_mark < 2) q 2954 drivers/net/wireless/intel/iwlegacy/common.c q->high_mark = 2; q 2956 drivers/net/wireless/intel/iwlegacy/common.c q->write_ptr = q->read_ptr = 0; q 2986 drivers/net/wireless/intel/iwlegacy/common.c dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL); q 2990 drivers/net/wireless/intel/iwlegacy/common.c txq->q.id = id; q 3062 drivers/net/wireless/intel/iwlegacy/common.c il_queue_init(il, &txq->q, slots, txq_id); q 3099 drivers/net/wireless/intel/iwlegacy/common.c il_queue_init(il, &txq->q, slots, txq_id); q 3121 drivers/net/wireless/intel/iwlegacy/common.c struct il_queue *q = &txq->q; q 3150 drivers/net/wireless/intel/iwlegacy/common.c if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { q 3158 drivers/net/wireless/intel/iwlegacy/common.c idx = il_get_cmd_idx(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE); q 3182 drivers/net/wireless/intel/iwlegacy/common.c cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr)); q 3197 drivers/net/wireless/intel/iwlegacy/common.c q->write_ptr, idx, il->cmd_queue); q 3203 drivers/net/wireless/intel/iwlegacy/common.c le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr, q 3228 drivers/net/wireless/intel/iwlegacy/common.c q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd); q 3247 drivers/net/wireless/intel/iwlegacy/common.c struct il_queue *q = &txq->q; q 3250 drivers/net/wireless/intel/iwlegacy/common.c if (idx >= q->n_bd || il_queue_used(q, idx) == 0) { q 3252 drivers/net/wireless/intel/iwlegacy/common.c "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd, q 3253 drivers/net/wireless/intel/iwlegacy/common.c q->write_ptr, q->read_ptr); q 3257 drivers/net/wireless/intel/iwlegacy/common.c for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; q 3258 drivers/net/wireless/intel/iwlegacy/common.c q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) { q 3262 drivers/net/wireless/intel/iwlegacy/common.c q->write_ptr, q->read_ptr); q 3297 drivers/net/wireless/intel/iwlegacy/common.c txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr, q 3298 drivers/net/wireless/intel/iwlegacy/common.c il->txq[il->cmd_queue].q.write_ptr)) { q 3303 drivers/net/wireless/intel/iwlegacy/common.c cmd_idx = il_get_cmd_idx(&txq->q, idx, huge); q 4497 drivers/net/wireless/intel/iwlegacy/common.c int q; q 4511 drivers/net/wireless/intel/iwlegacy/common.c q = AC_NUM - 1 - queue; q 4515 drivers/net/wireless/intel/iwlegacy/common.c il->qos_data.def_qos_parm.ac[q].cw_min = q 4517 drivers/net/wireless/intel/iwlegacy/common.c il->qos_data.def_qos_parm.ac[q].cw_max = q 4519 drivers/net/wireless/intel/iwlegacy/common.c il->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; q 4520 drivers/net/wireless/intel/iwlegacy/common.c il->qos_data.def_qos_parm.ac[q].edca_txop = q 4523 drivers/net/wireless/intel/iwlegacy/common.c il->qos_data.def_qos_parm.ac[q].reserved1 = 0; q 4767 drivers/net/wireless/intel/iwlegacy/common.c struct il_queue *q; q 4772 drivers/net/wireless/intel/iwlegacy/common.c q = &il->txq[i].q; q 4773 drivers/net/wireless/intel/iwlegacy/common.c if (q->read_ptr == q->write_ptr) q 4777 drivers/net/wireless/intel/iwlegacy/common.c IL_ERR("Failed to flush queue %d\n", q->id); q 4797 drivers/net/wireless/intel/iwlegacy/common.c struct il_queue *q = &txq->q; q 4802 drivers/net/wireless/intel/iwlegacy/common.c if (q->read_ptr == q->write_ptr) { q 4812 drivers/net/wireless/intel/iwlegacy/common.c IL_ERR("Queue %d stuck for %u ms.\n", q->id, q 151 drivers/net/wireless/intel/iwlegacy/common.h struct il_queue q; q 849 drivers/net/wireless/intel/iwlegacy/common.h int il_queue_space(const struct il_queue *q); q 851 drivers/net/wireless/intel/iwlegacy/common.h il_queue_used(const struct il_queue *q, int i) q 853 drivers/net/wireless/intel/iwlegacy/common.h return q->write_ptr >= q->read_ptr ? (i >= q->read_ptr && q 854 drivers/net/wireless/intel/iwlegacy/common.h i < q->write_ptr) : !(i < q 855 drivers/net/wireless/intel/iwlegacy/common.h q->read_ptr q 857 drivers/net/wireless/intel/iwlegacy/common.h q-> q 862 drivers/net/wireless/intel/iwlegacy/common.h il_get_cmd_idx(struct il_queue *q, u32 idx, int is_huge) q 870 drivers/net/wireless/intel/iwlegacy/common.h return q->n_win; /* must be power of 2 */ q 873 drivers/net/wireless/intel/iwlegacy/common.h return idx & (q->n_win - 1); q 1740 drivers/net/wireless/intel/iwlegacy/common.h void il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q); q 1741 drivers/net/wireless/intel/iwlegacy/common.h int il_rx_queue_space(const struct il_rx_queue *q); q 818 drivers/net/wireless/intel/iwlegacy/debug.c struct il_queue *q; q 836 drivers/net/wireless/intel/iwlegacy/debug.c q = &txq->q; q 841 drivers/net/wireless/intel/iwlegacy/debug.c q->read_ptr, q->write_ptr, q 1166 drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c int q; q 1183 drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c q = AC_NUM - 1 - queue; q 1187 drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c ctx->qos_data.def_qos_parm.ac[q].cw_min = q 1189 drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c ctx->qos_data.def_qos_parm.ac[q].cw_max = q 1191 drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs; q 1192 drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c ctx->qos_data.def_qos_parm.ac[q].edca_txop = q 1195 drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0; q 467 drivers/net/wireless/intel/iwlwifi/dvm/tx.c int q; q 469 drivers/net/wireless/intel/iwlwifi/dvm/tx.c for (q = IWLAGN_FIRST_AMPDU_QUEUE; q 470 drivers/net/wireless/intel/iwlwifi/dvm/tx.c q < priv->trans->trans_cfg->base_params->num_of_queues; q++) { q 471 drivers/net/wireless/intel/iwlwifi/dvm/tx.c if (!test_and_set_bit(q, priv->agg_q_alloc)) { q 472 drivers/net/wireless/intel/iwlwifi/dvm/tx.c priv->queue_to_mac80211[q] = mq; q 473 drivers/net/wireless/intel/iwlwifi/dvm/tx.c return q; q 480 drivers/net/wireless/intel/iwlwifi/dvm/tx.c static void iwlagn_dealloc_agg_txq(struct iwl_priv *priv, int q) q 482 drivers/net/wireless/intel/iwlwifi/dvm/tx.c clear_bit(q, priv->agg_q_alloc); q 483 drivers/net/wireless/intel/iwlwifi/dvm/tx.c priv->queue_to_mac80211[q] = IWL_INVALID_MAC80211_QUEUE; q 686 drivers/net/wireless/intel/iwlwifi/dvm/tx.c int q, fifo; q 693 drivers/net/wireless/intel/iwlwifi/dvm/tx.c q = priv->tid_data[sta_priv->sta_id][tid].agg.txq_id; q 699 drivers/net/wireless/intel/iwlwifi/dvm/tx.c iwl_trans_txq_enable(priv->trans, q, fifo, sta_priv->sta_id, tid, q 74 drivers/net/wireless/intel/iwlwifi/fw/api/cmdhdr.h #define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8) q 586 drivers/net/wireless/intel/iwlwifi/iwl-csr.h #define MSIX_FH_INT_CAUSES_Q(q) (q) q 388 drivers/net/wireless/intel/iwlwifi/iwl-fh.h #define RFH_Q_FRBDCB_BA_LSB(q) (RFH_Q0_FRBDCB_BA_LSB + (q) * 8) q 391 drivers/net/wireless/intel/iwlwifi/iwl-fh.h #define RFH_Q_FRBDCB_WIDX(q) (RFH_Q0_FRBDCB_WIDX + (q) * 4) q 394 drivers/net/wireless/intel/iwlwifi/iwl-fh.h #define RFH_Q_FRBDCB_WIDX_TRG(q) (RFH_Q0_FRBDCB_WIDX_TRG + (q) * 4) q 397 drivers/net/wireless/intel/iwlwifi/iwl-fh.h #define RFH_Q_FRBDCB_RIDX(q) (RFH_Q0_FRBDCB_RIDX + (q) * 4) q 400 drivers/net/wireless/intel/iwlwifi/iwl-fh.h #define RFH_Q_URBDCB_BA_LSB(q) (RFH_Q0_URBDCB_BA_LSB + (q) * 8) q 403 drivers/net/wireless/intel/iwlwifi/iwl-fh.h #define RFH_Q_URBDCB_WIDX(q) (RFH_Q0_URBDCB_WIDX + (q) * 4) q 405 drivers/net/wireless/intel/iwlwifi/iwl-fh.h #define RFH_Q_URBDCB_VAID(q) (RFH_Q0_URBDCB_VAID + (q) * 4) q 408 drivers/net/wireless/intel/iwlwifi/iwl-fh.h #define RFH_Q_URBD_STTS_WPTR_LSB(q) (RFH_Q0_URBD_STTS_WPTR_LSB + (q) * 8) q 411 drivers/net/wireless/intel/iwlwifi/iwl-fh.h #define RFH_Q_ORB_WPTR_LSB(q) (RFH_Q0_ORB_WPTR_LSB + (q) * 8) q 413 drivers/net/wireless/intel/iwlwifi/iwl-fh.h #define RFH_RBDBUF_RBD_LSB(q) (RFH_RBDBUF_RBD0_LSB + (q) * 8) q 322 drivers/net/wireless/intel/iwlwifi/iwl-io.c #define IWL_CMD_MQ(arg, reg, q) { if (arg == reg(q)) return #reg; } q 352 drivers/net/wireless/intel/iwlwifi/iwl-io.c int i, q; q 395 drivers/net/wireless/intel/iwlwifi/iwl-io.c for (q = 0; q < num_q; q++) { q 398 drivers/net/wireless/intel/iwlwifi/iwl-io.c addr += q * (rfh_mq_tbl[i].is64 ? 8 : 4); q 401 drivers/net/wireless/intel/iwlwifi/iwl-io.c get_rfh_string(addr), q, q 416 drivers/net/wireless/intel/iwlwifi/iwl-io.c for (q = 0; q < num_q; q++) { q 419 drivers/net/wireless/intel/iwlwifi/iwl-io.c addr += q * (rfh_mq_tbl[i].is64 ? 8 : 4); q 421 drivers/net/wireless/intel/iwlwifi/iwl-io.c get_rfh_string(addr), q, q 152 drivers/net/wireless/intel/iwlwifi/mvm/d3.c const u8 *tmp = ptk_pn->q[i].pn[tid]; q 1301 drivers/net/wireless/intel/iwlwifi/mvm/d3.c memcpy(ptk_pn->q[i].pn[tid], q 3435 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c int tid, q; q 3439 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c ptk_pn = kzalloc(struct_size(ptk_pn, q, q 3449 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c for (q = 0; q < mvm->trans->num_rx_queues; q++) q 3450 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c memcpy(ptk_pn->q[q].pn[tid], q 159 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c res = memcmp(pn, ptk_pn->q[queue].pn[tid], IEEE80211_CCMP_PN_LEN); q 165 drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c memcpy(ptk_pn->q[queue].pn[tid], pn, IEEE80211_CCMP_PN_LEN); q 1657 drivers/net/wireless/intel/iwlwifi/mvm/sta.c int q; q 1672 drivers/net/wireless/intel/iwlwifi/mvm/sta.c for (q = 0; q < mvm->trans->num_rx_queues; q++) q 1673 drivers/net/wireless/intel/iwlwifi/mvm/sta.c memset(dup_data[q].last_seq, 0xff, q 1674 drivers/net/wireless/intel/iwlwifi/mvm/sta.c sizeof(dup_data[q].last_seq)); q 339 drivers/net/wireless/intel/iwlwifi/mvm/sta.h } ____cacheline_aligned_in_smp q[]; q 908 drivers/net/wireless/intel/iwlwifi/pcie/internal.h static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index) q 910 drivers/net/wireless/intel/iwlwifi/pcie/internal.h return index & (q->n_window - 1); q 999 drivers/net/wireless/intel/iwlwifi/pcie/internal.h static inline bool iwl_queue_used(const struct iwl_txq *q, int i) q 1001 drivers/net/wireless/intel/iwlwifi/pcie/internal.h int index = iwl_pcie_get_cmd_index(q, i); q 1002 drivers/net/wireless/intel/iwlwifi/pcie/internal.h int r = iwl_pcie_get_cmd_index(q, q->read_ptr); q 1003 drivers/net/wireless/intel/iwlwifi/pcie/internal.h int w = iwl_pcie_get_cmd_index(q, q->write_ptr); q 1076 drivers/net/wireless/intel/iwlwifi/pcie/internal.h int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q); q 105 drivers/net/wireless/intel/iwlwifi/pcie/tx.c int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q) q 116 drivers/net/wireless/intel/iwlwifi/pcie/tx.c if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size) q 117 drivers/net/wireless/intel/iwlwifi/pcie/tx.c max = q->n_window; q 125 drivers/net/wireless/intel/iwlwifi/pcie/tx.c used = (q->write_ptr - q->read_ptr) & q 137 drivers/net/wireless/intel/iwlwifi/pcie/tx.c static int iwl_queue_init(struct iwl_txq *q, int slots_num) q 139 drivers/net/wireless/intel/iwlwifi/pcie/tx.c q->n_window = slots_num; q 146 drivers/net/wireless/intel/iwlwifi/pcie/tx.c q->low_mark = q->n_window / 4; q 147 drivers/net/wireless/intel/iwlwifi/pcie/tx.c if (q->low_mark < 4) q 148 drivers/net/wireless/intel/iwlwifi/pcie/tx.c q->low_mark = 4; q 150 drivers/net/wireless/intel/iwlwifi/pcie/tx.c q->high_mark = q->n_window / 8; q 151 drivers/net/wireless/intel/iwlwifi/pcie/tx.c if (q->high_mark < 2) q 152 drivers/net/wireless/intel/iwlwifi/pcie/tx.c q->high_mark = 2; q 154 drivers/net/wireless/intel/iwlwifi/pcie/tx.c q->write_ptr = 0; q 155 drivers/net/wireless/intel/iwlwifi/pcie/tx.c q->read_ptr = 0; q 3161 drivers/net/wireless/intersil/hostap/hostap_hw.c #define HOSTAP_TASKLET_INIT(q, f, d) \ q 3162 drivers/net/wireless/intersil/hostap/hostap_hw.c do { memset((q), 0, sizeof(*(q))); (q)->func = (f); (q)->data = (d); } \ q 5376 drivers/net/wireless/marvell/mwl8k.c int q = MWL8K_TX_WMM_QUEUES - 1 - queue; q 5377 drivers/net/wireless/marvell/mwl8k.c rc = mwl8k_cmd_set_edca_params(hw, q, q 35 drivers/net/wireless/mediatek/mt76/debugfs.c struct mt76_sw_queue *q = &dev->q_tx[i]; q 37 drivers/net/wireless/mediatek/mt76/debugfs.c if (!q->q) q 42 drivers/net/wireless/mediatek/mt76/debugfs.c i, q->q->queued, q->q->head, q->q->tail, q 43 drivers/net/wireless/mediatek/mt76/debugfs.c q->swq_queued); q 11 drivers/net/wireless/mediatek/mt76/dma.c mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q, q 18 drivers/net/wireless/mediatek/mt76/dma.c spin_lock_init(&q->lock); q 20 drivers/net/wireless/mediatek/mt76/dma.c q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE; q 21 drivers/net/wireless/mediatek/mt76/dma.c q->ndesc = n_desc; q 22 drivers/net/wireless/mediatek/mt76/dma.c q->buf_size = bufsize; q 23 drivers/net/wireless/mediatek/mt76/dma.c q->hw_idx = idx; q 25 drivers/net/wireless/mediatek/mt76/dma.c size = q->ndesc * sizeof(struct mt76_desc); q 26 drivers/net/wireless/mediatek/mt76/dma.c q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL); q 27 drivers/net/wireless/mediatek/mt76/dma.c if (!q->desc) q 30 drivers/net/wireless/mediatek/mt76/dma.c size = q->ndesc * sizeof(*q->entry); q 31 drivers/net/wireless/mediatek/mt76/dma.c q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL); q 32 drivers/net/wireless/mediatek/mt76/dma.c if (!q->entry) q 36 drivers/net/wireless/mediatek/mt76/dma.c for (i = 0; i < q->ndesc; i++) q 37 drivers/net/wireless/mediatek/mt76/dma.c q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE); q 39 drivers/net/wireless/mediatek/mt76/dma.c writel(q->desc_dma, &q->regs->desc_base); q 40 drivers/net/wireless/mediatek/mt76/dma.c writel(0, &q->regs->cpu_idx); q 41 drivers/net/wireless/mediatek/mt76/dma.c writel(0, &q->regs->dma_idx); q 42 drivers/net/wireless/mediatek/mt76/dma.c writel(q->ndesc, &q->regs->ring_size); q 48 drivers/net/wireless/mediatek/mt76/dma.c mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q, q 57 drivers/net/wireless/mediatek/mt76/dma.c q->entry[q->head].txwi = DMA_DUMMY_DATA; q 58 drivers/net/wireless/mediatek/mt76/dma.c q->entry[q->head].skip_buf0 = true; q 75 drivers/net/wireless/mediatek/mt76/dma.c idx = q->head; q 76 drivers/net/wireless/mediatek/mt76/dma.c q->head = (q->head + 1) % q->ndesc; q 78 drivers/net/wireless/mediatek/mt76/dma.c desc = &q->desc[idx]; q 85 drivers/net/wireless/mediatek/mt76/dma.c q->queued++; q 88 drivers/net/wireless/mediatek/mt76/dma.c q->entry[idx].txwi = txwi; q 89 drivers/net/wireless/mediatek/mt76/dma.c q->entry[idx].skb = skb; q 95 drivers/net/wireless/mediatek/mt76/dma.c mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx, q 98 drivers/net/wireless/mediatek/mt76/dma.c struct mt76_queue_entry *e = &q->entry[idx]; q 99 drivers/net/wireless/mediatek/mt76/dma.c __le32 __ctrl = READ_ONCE(q->desc[idx].ctrl); q 103 drivers/net/wireless/mediatek/mt76/dma.c __le32 addr = READ_ONCE(q->desc[idx].buf0); q 111 drivers/net/wireless/mediatek/mt76/dma.c __le32 addr = READ_ONCE(q->desc[idx].buf1); q 129 drivers/net/wireless/mediatek/mt76/dma.c mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q) q 131 drivers/net/wireless/mediatek/mt76/dma.c writel(q->desc_dma, &q->regs->desc_base); q 132 drivers/net/wireless/mediatek/mt76/dma.c writel(q->ndesc, &q->regs->ring_size); q 133 drivers/net/wireless/mediatek/mt76/dma.c q->head = readl(&q->regs->dma_idx); q 134 drivers/net/wireless/mediatek/mt76/dma.c q->tail = q->head; q 135 drivers/net/wireless/mediatek/mt76/dma.c writel(q->head, &q->regs->cpu_idx); q 142 drivers/net/wireless/mediatek/mt76/dma.c struct mt76_queue *q = sq->q; q 149 drivers/net/wireless/mediatek/mt76/dma.c if (!q) q 155 drivers/net/wireless/mediatek/mt76/dma.c last = readl(&q->regs->dma_idx); q 157 drivers/net/wireless/mediatek/mt76/dma.c while ((q->queued > n_queued) && q->tail != last) { q 158 drivers/net/wireless/mediatek/mt76/dma.c mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry); q 162 drivers/net/wireless/mediatek/mt76/dma.c q->tail = (q->tail + 1) % q->ndesc; q 174 drivers/net/wireless/mediatek/mt76/dma.c if (!flush && q->tail == last) q 175 drivers/net/wireless/mediatek/mt76/dma.c last = readl(&q->regs->dma_idx); q 178 drivers/net/wireless/mediatek/mt76/dma.c spin_lock_bh(&q->lock); q 180 drivers/net/wireless/mediatek/mt76/dma.c q->queued -= n_queued; q 189 drivers/net/wireless/mediatek/mt76/dma.c mt76_dma_sync_idx(dev, q); q 191 drivers/net/wireless/mediatek/mt76/dma.c wake = wake && q->stopped && q 192 drivers/net/wireless/mediatek/mt76/dma.c qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8; q 194 drivers/net/wireless/mediatek/mt76/dma.c q->stopped = false; q 196 drivers/net/wireless/mediatek/mt76/dma.c if (!q->queued) q 199 drivers/net/wireless/mediatek/mt76/dma.c spin_unlock_bh(&q->lock); q 206 drivers/net/wireless/mediatek/mt76/dma.c mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx, q 209 drivers/net/wireless/mediatek/mt76/dma.c struct mt76_queue_entry *e = &q->entry[idx]; q 210 drivers/net/wireless/mediatek/mt76/dma.c struct mt76_desc *desc = &q->desc[idx]; q 213 drivers/net/wireless/mediatek/mt76/dma.c int buf_len = SKB_WITH_OVERHEAD(q->buf_size); q 232 drivers/net/wireless/mediatek/mt76/dma.c mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush, q 235 drivers/net/wireless/mediatek/mt76/dma.c int idx = q->tail; q 238 drivers/net/wireless/mediatek/mt76/dma.c if (!q->queued) q 241 drivers/net/wireless/mediatek/mt76/dma.c if (!flush && !(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE))) q 244 drivers/net/wireless/mediatek/mt76/dma.c q->tail = (q->tail + 1) % q->ndesc; q 245 drivers/net/wireless/mediatek/mt76/dma.c q->queued--; q 247 drivers/net/wireless/mediatek/mt76/dma.c return mt76_dma_get_buf(dev, q, idx, len, info, more); q 251 drivers/net/wireless/mediatek/mt76/dma.c mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q) q 253 drivers/net/wireless/mediatek/mt76/dma.c writel(q->head, &q->regs->cpu_idx); q 260 drivers/net/wireless/mediatek/mt76/dma.c struct mt76_queue *q = dev->q_tx[qid].q; q 272 drivers/net/wireless/mediatek/mt76/dma.c spin_lock_bh(&q->lock); q 273 drivers/net/wireless/mediatek/mt76/dma.c mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL); q 274 drivers/net/wireless/mediatek/mt76/dma.c mt76_dma_kick_queue(dev, q); q 275 drivers/net/wireless/mediatek/mt76/dma.c spin_unlock_bh(&q->lock); q 285 drivers/net/wireless/mediatek/mt76/dma.c struct mt76_queue *q = dev->q_tx[qid].q; q 339 drivers/net/wireless/mediatek/mt76/dma.c if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) { q 344 drivers/net/wireless/mediatek/mt76/dma.c return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf, q 361 drivers/net/wireless/mediatek/mt76/dma.c mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q) q 366 drivers/net/wireless/mediatek/mt76/dma.c int len = SKB_WITH_OVERHEAD(q->buf_size); q 367 drivers/net/wireless/mediatek/mt76/dma.c int offset = q->buf_offset; q 370 drivers/net/wireless/mediatek/mt76/dma.c spin_lock_bh(&q->lock); q 372 drivers/net/wireless/mediatek/mt76/dma.c while (q->queued < q->ndesc - 1) { q 375 drivers/net/wireless/mediatek/mt76/dma.c buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC); q 387 drivers/net/wireless/mediatek/mt76/dma.c idx = mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL); q 392 drivers/net/wireless/mediatek/mt76/dma.c mt76_dma_kick_queue(dev, q); q 394 drivers/net/wireless/mediatek/mt76/dma.c spin_unlock_bh(&q->lock); q 400 drivers/net/wireless/mediatek/mt76/dma.c mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q) q 406 drivers/net/wireless/mediatek/mt76/dma.c spin_lock_bh(&q->lock); q 408 drivers/net/wireless/mediatek/mt76/dma.c buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more); q 414 drivers/net/wireless/mediatek/mt76/dma.c spin_unlock_bh(&q->lock); q 416 drivers/net/wireless/mediatek/mt76/dma.c if (!q->rx_page.va) q 419 drivers/net/wireless/mediatek/mt76/dma.c page = virt_to_page(q->rx_page.va); q 420 drivers/net/wireless/mediatek/mt76/dma.c __page_frag_cache_drain(page, q->rx_page.pagecnt_bias); q 421 drivers/net/wireless/mediatek/mt76/dma.c memset(&q->rx_page, 0, sizeof(q->rx_page)); q 427 drivers/net/wireless/mediatek/mt76/dma.c struct mt76_queue *q = &dev->q_rx[qid]; q 430 drivers/net/wireless/mediatek/mt76/dma.c for (i = 0; i < q->ndesc; i++) q 431 drivers/net/wireless/mediatek/mt76/dma.c q->desc[i].ctrl &= ~cpu_to_le32(MT_DMA_CTL_DMA_DONE); q 433 drivers/net/wireless/mediatek/mt76/dma.c mt76_dma_rx_cleanup(dev, q); q 434 drivers/net/wireless/mediatek/mt76/dma.c mt76_dma_sync_idx(dev, q); q 435 drivers/net/wireless/mediatek/mt76/dma.c mt76_dma_rx_fill(dev, q); q 437 drivers/net/wireless/mediatek/mt76/dma.c if (!q->rx_head) q 440 drivers/net/wireless/mediatek/mt76/dma.c dev_kfree_skb(q->rx_head); q 441 drivers/net/wireless/mediatek/mt76/dma.c q->rx_head = NULL; q 445 drivers/net/wireless/mediatek/mt76/dma.c mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, q 450 drivers/net/wireless/mediatek/mt76/dma.c struct sk_buff *skb = q->rx_head; q 454 drivers/net/wireless/mediatek/mt76/dma.c offset += q->buf_offset; q 456 drivers/net/wireless/mediatek/mt76/dma.c q->buf_size); q 462 drivers/net/wireless/mediatek/mt76/dma.c q->rx_head = NULL; q 463 drivers/net/wireless/mediatek/mt76/dma.c dev->drv->rx_skb(dev, q - dev->q_rx, skb); q 467 drivers/net/wireless/mediatek/mt76/dma.c mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget) q 477 drivers/net/wireless/mediatek/mt76/dma.c data = mt76_dma_dequeue(dev, q, false, &len, &info, &more); q 481 drivers/net/wireless/mediatek/mt76/dma.c if (q->rx_head) q 482 drivers/net/wireless/mediatek/mt76/dma.c data_len = q->buf_size; q 484 drivers/net/wireless/mediatek/mt76/dma.c data_len = SKB_WITH_OVERHEAD(q->buf_size); q 486 drivers/net/wireless/mediatek/mt76/dma.c if (data_len < len + q->buf_offset) { q 487 drivers/net/wireless/mediatek/mt76/dma.c dev_kfree_skb(q->rx_head); q 488 drivers/net/wireless/mediatek/mt76/dma.c q->rx_head = NULL; q 494 drivers/net/wireless/mediatek/mt76/dma.c if (q->rx_head) { q 495 drivers/net/wireless/mediatek/mt76/dma.c mt76_add_fragment(dev, q, data, len, more); q 499 drivers/net/wireless/mediatek/mt76/dma.c skb = build_skb(data, q->buf_size); q 504 drivers/net/wireless/mediatek/mt76/dma.c skb_reserve(skb, q->buf_offset); q 506 drivers/net/wireless/mediatek/mt76/dma.c if (q == &dev->q_rx[MT_RXQ_MCU]) { q 515 drivers/net/wireless/mediatek/mt76/dma.c q->rx_head = skb; q 519 drivers/net/wireless/mediatek/mt76/dma.c dev->drv->rx_skb(dev, q - dev->q_rx, skb); q 522 drivers/net/wireless/mediatek/mt76/dma.c mt76_dma_rx_fill(dev, q); q 375 drivers/net/wireless/mediatek/mt76/mac80211.c void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb) q 382 drivers/net/wireless/mediatek/mt76/mac80211.c __skb_queue_tail(&dev->rx_skb[q], skb); q 388 drivers/net/wireless/mediatek/mt76/mac80211.c struct mt76_queue *q; q 392 drivers/net/wireless/mediatek/mt76/mac80211.c q = dev->q_tx[i].q; q 393 drivers/net/wireless/mediatek/mt76/mac80211.c if (q && q->queued) q 644 drivers/net/wireless/mediatek/mt76/mac80211.c void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, q 652 drivers/net/wireless/mediatek/mt76/mac80211.c while ((skb = __skb_dequeue(&dev->rx_skb[q])) != NULL) { q 132 drivers/net/wireless/mediatek/mt76/mt76.h struct mt76_queue *q; q 151 drivers/net/wireless/mediatek/mt76/mt76.h int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q, q 155 drivers/net/wireless/mediatek/mt76/mt76.h int (*add_buf)(struct mt76_dev *dev, struct mt76_queue *q, q 166 drivers/net/wireless/mediatek/mt76/mt76.h void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush, q 174 drivers/net/wireless/mediatek/mt76/mt76.h void (*kick)(struct mt76_dev *dev, struct mt76_queue *q); q 304 drivers/net/wireless/mediatek/mt76/mt76.h void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q, q 307 drivers/net/wireless/mediatek/mt76/mt76.h void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q); q 703 drivers/net/wireless/mediatek/mt76/mt76.h void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb); q 778 drivers/net/wireless/mediatek/mt76/mt76.h void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, q 7 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c struct sk_buff_head q; q 32 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c dev->mt76.q_tx[MT_TXQ_CAB].q->hw_idx) | q 62 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c __skb_queue_tail(&data->q, skb); q 70 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c struct mt76_queue *q; q 79 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c __skb_queue_head_init(&data.q); q 81 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c q = dev->mt76.q_tx[MT_TXQ_BEACON].q; q 82 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c spin_lock_bh(&q->lock); q 86 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c mt76_queue_kick(dev, q); q 87 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c spin_unlock_bh(&q->lock); q 98 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c q = dev->mt76.q_tx[MT_TXQ_CAB].q; q 100 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c nframes = skb_queue_len(&data.q); q 104 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c } while (nframes != skb_queue_len(&data.q) && q 105 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c skb_queue_len(&data.q) < 8); q 107 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c if (skb_queue_empty(&data.q)) q 117 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c spin_lock_bh(&q->lock); q 118 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c while ((skb = __skb_dequeue(&data.q)) != NULL) { q 125 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c mt76_queue_kick(dev, q); q 126 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c spin_unlock_bh(&q->lock); q 139 drivers/net/wireless/mediatek/mt76/mt7603/beacon.c if (dev->mt76.q_tx[MT_TXQ_BEACON].q->queued > q 5 drivers/net/wireless/mediatek/mt76/mt7603/core.c void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q) q 9 drivers/net/wireless/mediatek/mt76/mt7603/core.c mt7603_irq_enable(dev, MT_INT_RX_DONE(q)); q 8 drivers/net/wireless/mediatek/mt76/mt7603/dma.c mt7603_init_tx_queue(struct mt7603_dev *dev, struct mt76_sw_queue *q, q 22 drivers/net/wireless/mediatek/mt76/mt7603/dma.c INIT_LIST_HEAD(&q->swq); q 23 drivers/net/wireless/mediatek/mt76/mt7603/dma.c q->q = hwq; q 83 drivers/net/wireless/mediatek/mt76/mt7603/dma.c void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, q 93 drivers/net/wireless/mediatek/mt76/mt7603/dma.c if (q == MT_RXQ_MCU) { q 112 drivers/net/wireless/mediatek/mt76/mt7603/dma.c mt76_rx(&dev->mt76, q, skb); q 123 drivers/net/wireless/mediatek/mt76/mt7603/dma.c mt7603_init_rx_queue(struct mt7603_dev *dev, struct mt76_queue *q, q 128 drivers/net/wireless/mediatek/mt76/mt7603/dma.c err = mt76_queue_alloc(dev, q, idx, n_desc, bufsize, q 788 drivers/net/wireless/mediatek/mt76/mt7603/mac.c struct mt76_queue *q = dev->mt76.q_tx[qid].q; q 822 drivers/net/wireless/mediatek/mt76/mt7603/mac.c FIELD_PREP(MT_TXD0_Q_IDX, q->hw_idx); q 1393 drivers/net/wireless/mediatek/mt76/mt7603/mac.c struct mt76_queue *q; q 1398 drivers/net/wireless/mediatek/mt76/mt7603/mac.c q = dev->mt76.q_tx[i].q; q 1400 drivers/net/wireless/mediatek/mt76/mt7603/mac.c if (!q->queued) q 1404 drivers/net/wireless/mediatek/mt76/mt7603/mac.c dma_idx = readl(&q->regs->dma_idx); q 1408 drivers/net/wireless/mediatek/mt76/mt7603/mac.c dma_idx != readl(&q->regs->cpu_idx)) q 503 drivers/net/wireless/mediatek/mt76/mt7603/main.c queue = dev->mt76.q_tx[queue].q->hw_idx; q 235 drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, q 237 drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h void mt7603_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q); q 17 drivers/net/wireless/mediatek/mt76/mt7615/dma.c struct mt76_sw_queue *q; q 30 drivers/net/wireless/mediatek/mt76/mt7615/dma.c q = &dev->mt76.q_tx[i]; q 31 drivers/net/wireless/mediatek/mt76/mt7615/dma.c INIT_LIST_HEAD(&q->swq); q 32 drivers/net/wireless/mediatek/mt76/mt7615/dma.c q->q = hwq; q 39 drivers/net/wireless/mediatek/mt76/mt7615/dma.c mt7615_init_mcu_queue(struct mt7615_dev *dev, struct mt76_sw_queue *q, q 53 drivers/net/wireless/mediatek/mt76/mt7615/dma.c INIT_LIST_HEAD(&q->swq); q 54 drivers/net/wireless/mediatek/mt76/mt7615/dma.c q->q = hwq; q 59 drivers/net/wireless/mediatek/mt76/mt7615/dma.c void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, q 83 drivers/net/wireless/mediatek/mt76/mt7615/dma.c mt76_rx(&dev->mt76, q, skb); q 63 drivers/net/wireless/mediatek/mt76/mt7615/mcu.h #define MCU_PQ_ID(p, q) (((p) << 15) | ((q) << 10)) q 261 drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h void mt7615_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, q 31 drivers/net/wireless/mediatek/mt76/mt7615/pci.c mt7615_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q) q 35 drivers/net/wireless/mediatek/mt76/mt7615/pci.c mt7615_irq_enable(dev, MT_INT_RX_DONE(q)); q 170 drivers/net/wireless/mediatek/mt76/mt76x02.h void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, q 172 drivers/net/wireless/mediatek/mt76/mt76x02.h void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q); q 189 drivers/net/wireless/mediatek/mt76/mt76x02.h struct sk_buff_head q; q 212 drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c __skb_queue_tail(&data->q, skb); q 224 drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c __skb_queue_head_init(&data->q); q 227 drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c nframes = skb_queue_len(&data->q); q 231 drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c } while (nframes != skb_queue_len(&data->q) && q 232 drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c skb_queue_len(&data->q) < max_nframes); q 234 drivers/net/wireless/mediatek/mt76/mt76x02_beacon.c if (!skb_queue_len(&data->q)) q 17 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c struct mt76_queue *q = dev->mt76.q_tx[MT_TXQ_PSD].q; q 38 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c if (!skb_queue_len(&data.q)) q 48 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c spin_lock_bh(&q->lock); q 49 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c while ((skb = __skb_dequeue(&data.q)) != NULL) { q 57 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c spin_unlock_bh(&q->lock); q 100 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_sw_queue *q, q 114 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c INIT_LIST_HEAD(&q->swq); q 115 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c q->q = hwq; q 123 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q, q 128 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c err = mt76_queue_alloc(dev, q, idx, n_desc, bufsize, q 183 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c struct mt76_queue *q; q 228 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c q = &dev->mt76.q_rx[MT_RXQ_MAIN]; q 229 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x02_rxwi); q 230 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c ret = mt76x02_init_rx_queue(dev, q, 0, MT76X02_RX_RING_SIZE, q 247 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q) q 252 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c mt76x02_irq_enable(dev, MT_INT_RX_DONE(q)); q 289 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c mt76_queue_kick(dev, dev->mt76.q_tx[MT_TXQ_PSD].q); q 360 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c struct mt76_queue *q; q 364 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c q = dev->mt76.q_tx[i].q; q 366 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c if (!q->queued) q 370 drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c dma_idx = readl(&q->regs->dma_idx); q 35 drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, q 41 drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c if (q == MT_RXQ_MCU) { q 53 drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c mt76_rx(mdev, q, skb); q 69 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c int pid, len = tx_info->skb->len, ep = q2ep(mdev->q_tx[qid].q->hw_idx); q 190 drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c skb = __skb_dequeue(&data.q); q 492 drivers/net/wireless/mediatek/mt76/mt76x02_util.c qid = dev->mt76.q_tx[queue].q->hw_idx; q 253 drivers/net/wireless/mediatek/mt76/tx.c struct mt76_queue *q; q 278 drivers/net/wireless/mediatek/mt76/tx.c q = dev->q_tx[qid].q; q 280 drivers/net/wireless/mediatek/mt76/tx.c spin_lock_bh(&q->lock); q 282 drivers/net/wireless/mediatek/mt76/tx.c dev->queue_ops->kick(dev, q); q 284 drivers/net/wireless/mediatek/mt76/tx.c if (q->queued > q->ndesc - 8 && !q->stopped) { q 286 drivers/net/wireless/mediatek/mt76/tx.c q->stopped = true; q 289 drivers/net/wireless/mediatek/mt76/tx.c spin_unlock_bh(&q->lock); q 340 drivers/net/wireless/mediatek/mt76/tx.c struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD].q; q 386 drivers/net/wireless/mediatek/mt76/tx.c struct mt76_queue *hwq = sq->q; q 476 drivers/net/wireless/mediatek/mt76/tx.c struct mt76_queue *hwq = sq->q; q 579 drivers/net/wireless/mediatek/mt76/tx.c hwq = mtxq->swq->q; q 275 drivers/net/wireless/mediatek/mt76/usb.c mt76u_fill_rx_sg(struct mt76_dev *dev, struct mt76_queue *q, struct urb *urb, q 285 drivers/net/wireless/mediatek/mt76/usb.c data = page_frag_alloc(&q->rx_page, q->buf_size, gfp); q 291 drivers/net/wireless/mediatek/mt76/usb.c sg_set_page(&urb->sg[i], page, q->buf_size, offset); q 303 drivers/net/wireless/mediatek/mt76/usb.c urb->transfer_buffer_length = urb->num_sgs * q->buf_size; q 312 drivers/net/wireless/mediatek/mt76/usb.c struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; q 315 drivers/net/wireless/mediatek/mt76/usb.c return mt76u_fill_rx_sg(dev, q, urb, nsgs, gfp); q 317 drivers/net/wireless/mediatek/mt76/usb.c urb->transfer_buffer_length = q->buf_size; q 318 drivers/net/wireless/mediatek/mt76/usb.c urb->transfer_buffer = page_frag_alloc(&q->rx_page, q->buf_size, gfp); q 393 drivers/net/wireless/mediatek/mt76/usb.c struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; q 397 drivers/net/wireless/mediatek/mt76/usb.c spin_lock_irqsave(&q->lock, flags); q 398 drivers/net/wireless/mediatek/mt76/usb.c if (q->queued > 0) { q 399 drivers/net/wireless/mediatek/mt76/usb.c urb = q->entry[q->head].urb; q 400 drivers/net/wireless/mediatek/mt76/usb.c q->head = (q->head + 1) % q->ndesc; q 401 drivers/net/wireless/mediatek/mt76/usb.c q->queued--; q 403 drivers/net/wireless/mediatek/mt76/usb.c spin_unlock_irqrestore(&q->lock, flags); q 462 drivers/net/wireless/mediatek/mt76/usb.c struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; q 476 drivers/net/wireless/mediatek/mt76/usb.c skb = mt76u_build_rx_skb(data, data_len, q->buf_size); q 486 drivers/net/wireless/mediatek/mt76/usb.c data_len, q->buf_size); q 498 drivers/net/wireless/mediatek/mt76/usb.c struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; q 516 drivers/net/wireless/mediatek/mt76/usb.c spin_lock_irqsave(&q->lock, flags); q 517 drivers/net/wireless/mediatek/mt76/usb.c if (WARN_ONCE(q->entry[q->tail].urb != urb, "rx urb mismatch")) q 520 drivers/net/wireless/mediatek/mt76/usb.c q->tail = (q->tail + 1) % q->ndesc; q 521 drivers/net/wireless/mediatek/mt76/usb.c q->queued++; q 524 drivers/net/wireless/mediatek/mt76/usb.c spin_unlock_irqrestore(&q->lock, flags); q 565 drivers/net/wireless/mediatek/mt76/usb.c struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; q 569 drivers/net/wireless/mediatek/mt76/usb.c spin_lock_irqsave(&q->lock, flags); q 570 drivers/net/wireless/mediatek/mt76/usb.c for (i = 0; i < q->ndesc; i++) { q 571 drivers/net/wireless/mediatek/mt76/usb.c err = mt76u_submit_rx_buf(dev, q->entry[i].urb); q 575 drivers/net/wireless/mediatek/mt76/usb.c q->head = q->tail = 0; q 576 drivers/net/wireless/mediatek/mt76/usb.c q->queued = 0; q 577 drivers/net/wireless/mediatek/mt76/usb.c spin_unlock_irqrestore(&q->lock, flags); q 585 drivers/net/wireless/mediatek/mt76/usb.c struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; q 592 drivers/net/wireless/mediatek/mt76/usb.c spin_lock_init(&q->lock); q 593 drivers/net/wireless/mediatek/mt76/usb.c q->entry = devm_kcalloc(dev->dev, q 594 drivers/net/wireless/mediatek/mt76/usb.c MT_NUM_RX_ENTRIES, sizeof(*q->entry), q 596 drivers/net/wireless/mediatek/mt76/usb.c if (!q->entry) q 599 drivers/net/wireless/mediatek/mt76/usb.c q->ndesc = MT_NUM_RX_ENTRIES; q 600 drivers/net/wireless/mediatek/mt76/usb.c q->buf_size = PAGE_SIZE; q 602 drivers/net/wireless/mediatek/mt76/usb.c for (i = 0; i < q->ndesc; i++) { q 603 drivers/net/wireless/mediatek/mt76/usb.c err = mt76u_rx_urb_alloc(dev, &q->entry[i]); q 613 drivers/net/wireless/mediatek/mt76/usb.c struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; q 617 drivers/net/wireless/mediatek/mt76/usb.c for (i = 0; i < q->ndesc; i++) q 618 drivers/net/wireless/mediatek/mt76/usb.c mt76u_urb_free(q->entry[i].urb); q 620 drivers/net/wireless/mediatek/mt76/usb.c if (!q->rx_page.va) q 623 drivers/net/wireless/mediatek/mt76/usb.c page = virt_to_page(q->rx_page.va); q 624 drivers/net/wireless/mediatek/mt76/usb.c __page_frag_cache_drain(page, q->rx_page.pagecnt_bias); q 625 drivers/net/wireless/mediatek/mt76/usb.c memset(&q->rx_page, 0, sizeof(q->rx_page)); q 630 drivers/net/wireless/mediatek/mt76/usb.c struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; q 633 drivers/net/wireless/mediatek/mt76/usb.c for (i = 0; i < q->ndesc; i++) q 634 drivers/net/wireless/mediatek/mt76/usb.c usb_poison_urb(q->entry[i].urb); q 642 drivers/net/wireless/mediatek/mt76/usb.c struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; q 645 drivers/net/wireless/mediatek/mt76/usb.c for (i = 0; i < q->ndesc; i++) q 646 drivers/net/wireless/mediatek/mt76/usb.c usb_unpoison_urb(q->entry[i].urb); q 657 drivers/net/wireless/mediatek/mt76/usb.c struct mt76_queue *q; q 665 drivers/net/wireless/mediatek/mt76/usb.c q = sq->q; q 667 drivers/net/wireless/mediatek/mt76/usb.c while (q->queued > n_dequeued) { q 668 drivers/net/wireless/mediatek/mt76/usb.c if (!q->entry[q->head].done) q 671 drivers/net/wireless/mediatek/mt76/usb.c if (q->entry[q->head].schedule) { q 672 drivers/net/wireless/mediatek/mt76/usb.c q->entry[q->head].schedule = false; q 676 drivers/net/wireless/mediatek/mt76/usb.c entry = q->entry[q->head]; q 677 drivers/net/wireless/mediatek/mt76/usb.c q->entry[q->head].done = false; q 678 drivers/net/wireless/mediatek/mt76/usb.c q->head = (q->head + 1) % q->ndesc; q 684 drivers/net/wireless/mediatek/mt76/usb.c spin_lock_bh(&q->lock); q 687 drivers/net/wireless/mediatek/mt76/usb.c q->queued -= n_dequeued; q 689 drivers/net/wireless/mediatek/mt76/usb.c wake = q->stopped && q->queued < q->ndesc - 8; q 691 drivers/net/wireless/mediatek/mt76/usb.c q->stopped = false; q 693 drivers/net/wireless/mediatek/mt76/usb.c if (!q->queued) q 696 drivers/net/wireless/mediatek/mt76/usb.c spin_unlock_bh(&q->lock); q 772 drivers/net/wireless/mediatek/mt76/usb.c struct mt76_queue *q = dev->q_tx[qid].q; q 776 drivers/net/wireless/mediatek/mt76/usb.c u16 idx = q->tail; q 779 drivers/net/wireless/mediatek/mt76/usb.c if (q->queued == q->ndesc) q 787 drivers/net/wireless/mediatek/mt76/usb.c err = mt76u_tx_setup_buffers(dev, tx_info.skb, q->entry[idx].urb); q 791 drivers/net/wireless/mediatek/mt76/usb.c mt76u_fill_bulk_urb(dev, USB_DIR_OUT, q2ep(q->hw_idx), q 792 drivers/net/wireless/mediatek/mt76/usb.c q->entry[idx].urb, mt76u_complete_tx, q 793 drivers/net/wireless/mediatek/mt76/usb.c &q->entry[idx]); q 795 drivers/net/wireless/mediatek/mt76/usb.c q->tail = (q->tail + 1) % q->ndesc; q 796 drivers/net/wireless/mediatek/mt76/usb.c q->entry[idx].skb = tx_info.skb; q 797 drivers/net/wireless/mediatek/mt76/usb.c q->queued++; q 802 drivers/net/wireless/mediatek/mt76/usb.c static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q) q 807 drivers/net/wireless/mediatek/mt76/usb.c while (q->first != q->tail) { q 808 drivers/net/wireless/mediatek/mt76/usb.c urb = q->entry[q->first].urb; q 820 drivers/net/wireless/mediatek/mt76/usb.c q->first = (q->first + 1) % q->ndesc; q 826 drivers/net/wireless/mediatek/mt76/usb.c struct mt76_queue *q; q 833 drivers/net/wireless/mediatek/mt76/usb.c dev->q_tx[i].q = dev->q_tx[0].q; q 837 drivers/net/wireless/mediatek/mt76/usb.c q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL); q 838 drivers/net/wireless/mediatek/mt76/usb.c if (!q) q 841 drivers/net/wireless/mediatek/mt76/usb.c spin_lock_init(&q->lock); q 842 drivers/net/wireless/mediatek/mt76/usb.c q->hw_idx = mt76_ac_to_hwq(i); q 843 drivers/net/wireless/mediatek/mt76/usb.c dev->q_tx[i].q = q; q 845 drivers/net/wireless/mediatek/mt76/usb.c q->entry = devm_kcalloc(dev->dev, q 846 drivers/net/wireless/mediatek/mt76/usb.c MT_NUM_TX_ENTRIES, sizeof(*q->entry), q 848 drivers/net/wireless/mediatek/mt76/usb.c if (!q->entry) q 851 drivers/net/wireless/mediatek/mt76/usb.c q->ndesc = MT_NUM_TX_ENTRIES; q 852 drivers/net/wireless/mediatek/mt76/usb.c for (j = 0; j < q->ndesc; j++) { q 853 drivers/net/wireless/mediatek/mt76/usb.c err = mt76u_urb_alloc(dev, &q->entry[j], q 864 drivers/net/wireless/mediatek/mt76/usb.c struct mt76_queue *q; q 868 drivers/net/wireless/mediatek/mt76/usb.c q = dev->q_tx[i].q; q 869 drivers/net/wireless/mediatek/mt76/usb.c for (j = 0; j < q->ndesc; j++) q 870 drivers/net/wireless/mediatek/mt76/usb.c usb_free_urb(q->entry[j].urb); q 877 drivers/net/wireless/mediatek/mt76/usb.c struct mt76_queue *q; q 886 drivers/net/wireless/mediatek/mt76/usb.c q = dev->q_tx[i].q; q 887 drivers/net/wireless/mediatek/mt76/usb.c for (j = 0; j < q->ndesc; j++) q 888 drivers/net/wireless/mediatek/mt76/usb.c usb_kill_urb(q->entry[j].urb); q 897 drivers/net/wireless/mediatek/mt76/usb.c q = dev->q_tx[i].q; q 900 drivers/net/wireless/mediatek/mt76/usb.c spin_lock_bh(&q->lock); q 901 drivers/net/wireless/mediatek/mt76/usb.c while (q->queued) { q 902 drivers/net/wireless/mediatek/mt76/usb.c entry = q->entry[q->head]; q 903 drivers/net/wireless/mediatek/mt76/usb.c q->head = (q->head + 1) % q->ndesc; q 904 drivers/net/wireless/mediatek/mt76/usb.c q->queued--; q 908 drivers/net/wireless/mediatek/mt76/usb.c spin_unlock_bh(&q->lock); q 164 drivers/net/wireless/mediatek/mt7601u/dma.c struct mt7601u_rx_queue *q = &dev->rx_q; q 170 drivers/net/wireless/mediatek/mt7601u/dma.c if (!q->pending) q 173 drivers/net/wireless/mediatek/mt7601u/dma.c buf = &q->e[q->start]; q 174 drivers/net/wireless/mediatek/mt7601u/dma.c q->pending--; q 175 drivers/net/wireless/mediatek/mt7601u/dma.c q->start = (q->start + 1) % q->entries; q 185 drivers/net/wireless/mediatek/mt7601u/dma.c struct mt7601u_rx_queue *q = &dev->rx_q; q 205 drivers/net/wireless/mediatek/mt7601u/dma.c if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch")) q 208 drivers/net/wireless/mediatek/mt7601u/dma.c q->end = (q->end + 1) % q->entries; q 209 drivers/net/wireless/mediatek/mt7601u/dma.c q->pending++; q 231 drivers/net/wireless/mediatek/mt7601u/dma.c struct mt7601u_tx_queue *q = urb->context; q 232 drivers/net/wireless/mediatek/mt7601u/dma.c struct mt7601u_dev *dev = q->dev; q 250 drivers/net/wireless/mediatek/mt7601u/dma.c if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch")) q 253 drivers/net/wireless/mediatek/mt7601u/dma.c skb = q->e[q->start].skb; q 254 drivers/net/wireless/mediatek/mt7601u/dma.c q->e[q->start].skb = NULL; q 260 drivers/net/wireless/mediatek/mt7601u/dma.c if (q->used == q->entries - q->entries / 8) q 263 drivers/net/wireless/mediatek/mt7601u/dma.c q->start = (q->start + 1) % q->entries; q 264 drivers/net/wireless/mediatek/mt7601u/dma.c q->used--; q 301 drivers/net/wireless/mediatek/mt7601u/dma.c struct mt7601u_tx_queue *q = &dev->tx_q[ep]; q 307 drivers/net/wireless/mediatek/mt7601u/dma.c if (WARN_ON(q->entries <= q->used)) { q 312 drivers/net/wireless/mediatek/mt7601u/dma.c e = &q->e[q->end]; q 315 drivers/net/wireless/mediatek/mt7601u/dma.c mt7601u_complete_tx, q); q 329 drivers/net/wireless/mediatek/mt7601u/dma.c q->end = (q->end + 1) % q->entries; q 330 drivers/net/wireless/mediatek/mt7601u/dma.c q->used++; q 332 drivers/net/wireless/mediatek/mt7601u/dma.c if (q->used >= q->entries) q 450 drivers/net/wireless/mediatek/mt7601u/dma.c static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q) q 454 drivers/net/wireless/mediatek/mt7601u/dma.c for (i = 0; i < q->entries; i++) { q 455 drivers/net/wireless/mediatek/mt7601u/dma.c usb_poison_urb(q->e[i].urb); q 456 drivers/net/wireless/mediatek/mt7601u/dma.c if (q->e[i].skb) q 457 drivers/net/wireless/mediatek/mt7601u/dma.c mt7601u_tx_status(q->dev, q->e[i].skb); q 458 drivers/net/wireless/mediatek/mt7601u/dma.c usb_free_urb(q->e[i].urb); q 474 drivers/net/wireless/mediatek/mt7601u/dma.c struct mt7601u_tx_queue *q) q 478 drivers/net/wireless/mediatek/mt7601u/dma.c q->dev = dev; q 479 drivers/net/wireless/mediatek/mt7601u/dma.c q->entries = N_TX_ENTRIES; q 482 drivers/net/wireless/mediatek/mt7601u/dma.c q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL); q 483 drivers/net/wireless/mediatek/mt7601u/dma.c if (!q->e[i].urb) q 21 drivers/net/wireless/mediatek/mt7601u/tx.c static u8 q2hwq(u8 q) q 23 drivers/net/wireless/mediatek/mt7601u/tx.c return q ^ 0x3; q 70 drivers/net/wireless/st/cw1200/debug.c struct cw1200_queue *q) q 73 drivers/net/wireless/st/cw1200/debug.c seq_printf(seq, "Queue %d:\n", q->queue_id); q 74 drivers/net/wireless/st/cw1200/debug.c seq_printf(seq, " capacity: %zu\n", q->capacity); q 75 drivers/net/wireless/st/cw1200/debug.c seq_printf(seq, " queued: %zu\n", q->num_queued); q 76 drivers/net/wireless/st/cw1200/debug.c seq_printf(seq, " pending: %zu\n", q->num_pending); q 77 drivers/net/wireless/st/cw1200/debug.c seq_printf(seq, " sent: %zu\n", q->num_sent); q 78 drivers/net/wireless/st/cw1200/debug.c seq_printf(seq, " locked: %s\n", q->tx_locked_cnt ? "yes" : "no"); q 79 drivers/net/wireless/st/cw1200/debug.c seq_printf(seq, " overfull: %s\n", q->overfull ? "yes" : "no"); q 81 drivers/net/wireless/st/cw1200/debug.c for (i = 0; i < q->stats->map_capacity; ++i) q 82 drivers/net/wireless/st/cw1200/debug.c seq_printf(seq, "%.2d ", q->link_map_cache[i]); q 83 drivers/net/wireless/st/cw1200/debug.c seq_printf(seq, "<-%zu\n", q->stats->map_capacity); q 727 drivers/net/wireless/ti/wl18xx/main.c wl18xx_clk_table[clk_freq].p, wl18xx_clk_table[clk_freq].q, q 760 drivers/net/wireless/ti/wl18xx/main.c wl18xx_clk_table[clk_freq].q & q 767 drivers/net/wireless/ti/wl18xx/main.c (wl18xx_clk_table[clk_freq].q >> 16) & q 168 drivers/net/wireless/ti/wl18xx/wl18xx.h u32 q; q 1211 drivers/net/wireless/ti/wlcore/main.c int q, mapping; q 1222 drivers/net/wireless/ti/wlcore/main.c q = wl1271_tx_get_queue(mapping); q 1235 drivers/net/wireless/ti/wlcore/main.c (wlcore_is_queue_stopped_locked(wl, wlvif, q) && q 1236 drivers/net/wireless/ti/wlcore/main.c !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q, q 1238 drivers/net/wireless/ti/wlcore/main.c wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q); q 1244 drivers/net/wireless/ti/wlcore/main.c hlid, q, skb->len); q 1245 drivers/net/wireless/ti/wlcore/main.c skb_queue_tail(&wl->links[hlid].tx_queue[q], skb); q 1247 drivers/net/wireless/ti/wlcore/main.c wl->tx_queue_count[q]++; q 1248 drivers/net/wireless/ti/wlcore/main.c wlvif->tx_queue_count[q]++; q 1254 drivers/net/wireless/ti/wlcore/main.c if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK && q 1255 drivers/net/wireless/ti/wlcore/main.c !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q, q 1257 drivers/net/wireless/ti/wlcore/main.c wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q); q 1258 drivers/net/wireless/ti/wlcore/main.c wlcore_stop_queue_locked(wl, wlvif, q, q 1278 drivers/net/wireless/ti/wlcore/main.c int q; q 1284 drivers/net/wireless/ti/wlcore/main.c q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet)); q 1288 drivers/net/wireless/ti/wlcore/main.c wl->tx_queue_count[q]++; q 485 drivers/net/wireless/ti/wlcore/tx.c int i, q = -1, ac; q 499 drivers/net/wireless/ti/wlcore/tx.c q = ac; q 500 drivers/net/wireless/ti/wlcore/tx.c min_pkts = wl->tx_allocated_pkts[q]; q 504 drivers/net/wireless/ti/wlcore/tx.c return q; q 508 drivers/net/wireless/ti/wlcore/tx.c struct wl1271_link *lnk, u8 q) q 513 drivers/net/wireless/ti/wlcore/tx.c skb = skb_dequeue(&lnk->tx_queue[q]); q 516 drivers/net/wireless/ti/wlcore/tx.c WARN_ON_ONCE(wl->tx_queue_count[q] <= 0); q 517 drivers/net/wireless/ti/wlcore/tx.c wl->tx_queue_count[q]--; q 519 drivers/net/wireless/ti/wlcore/tx.c WARN_ON_ONCE(lnk->wlvif->tx_queue_count[q] <= 0); q 520 drivers/net/wireless/ti/wlcore/tx.c lnk->wlvif->tx_queue_count[q]--; q 658 drivers/net/wireless/ti/wlcore/tx.c int q; q 662 drivers/net/wireless/ti/wlcore/tx.c q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); q 664 drivers/net/wireless/ti/wlcore/tx.c WARN_ON_ONCE(wl->tx_queue_count[q] <= 0); q 665 drivers/net/wireless/ti/wlcore/tx.c wl->tx_queue_count[q]--; q 676 drivers/net/wireless/ti/wlcore/tx.c int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); q 681 drivers/net/wireless/ti/wlcore/tx.c skb_queue_head(&wl->links[hlid].tx_queue[q], skb); q 689 drivers/net/wireless/ti/wlcore/tx.c wl->tx_queue_count[q]++; q 691 drivers/net/wireless/ti/wlcore/tx.c wlvif->tx_queue_count[q]++; q 490 drivers/net/wireless/zydas/zd1211rw/zd_mac.c struct sk_buff_head *q = &mac->ack_wait_queue; q 499 drivers/net/wireless/zydas/zd1211rw/zd_mac.c spin_lock_irqsave(&q->lock, flags); q 501 drivers/net/wireless/zydas/zd1211rw/zd_mac.c skb_queue_walk(q, skb) { q 514 drivers/net/wireless/zydas/zd1211rw/zd_mac.c skb_queue_is_first(q, skb)) { q 546 drivers/net/wireless/zydas/zd1211rw/zd_mac.c skb = __skb_dequeue(q); q 554 drivers/net/wireless/zydas/zd1211rw/zd_mac.c spin_unlock_irqrestore(&q->lock, flags); q 583 drivers/net/wireless/zydas/zd1211rw/zd_mac.c struct sk_buff_head *q = &mac->ack_wait_queue; q 585 drivers/net/wireless/zydas/zd1211rw/zd_mac.c skb_queue_tail(q, skb); q 586 drivers/net/wireless/zydas/zd1211rw/zd_mac.c while (skb_queue_len(q) > ZD_MAC_MAX_ACK_WAITERS) { q 587 drivers/net/wireless/zydas/zd1211rw/zd_mac.c zd_mac_tx_status(hw, skb_dequeue(q), q 966 drivers/net/wireless/zydas/zd1211rw/zd_mac.c struct sk_buff_head *q; q 974 drivers/net/wireless/zydas/zd1211rw/zd_mac.c q = &mac->ack_wait_queue; q 975 drivers/net/wireless/zydas/zd1211rw/zd_mac.c spin_lock_irqsave(&q->lock, flags); q 976 drivers/net/wireless/zydas/zd1211rw/zd_mac.c skb_queue_walk(q, skb) { q 981 drivers/net/wireless/zydas/zd1211rw/zd_mac.c if (mac->ack_pending && skb_queue_is_first(q, skb)) q 994 drivers/net/wireless/zydas/zd1211rw/zd_mac.c skb = __skb_dequeue(q); q 1006 drivers/net/wireless/zydas/zd1211rw/zd_mac.c skb = __skb_dequeue(q); q 1012 drivers/net/wireless/zydas/zd1211rw/zd_mac.c spin_unlock_irqrestore(&q->lock, flags); q 1063 drivers/net/wireless/zydas/zd1211rw/zd_usb.c struct sk_buff_head *q = &tx->submitted_skbs; q 1069 drivers/net/wireless/zydas/zd1211rw/zd_usb.c spin_lock_irqsave(&q->lock, flags); q 1070 drivers/net/wireless/zydas/zd1211rw/zd_usb.c skb_queue_walk_safe(q, skb, skbnext) { q 1079 drivers/net/wireless/zydas/zd1211rw/zd_usb.c spin_unlock_irqrestore(&q->lock, flags); q 165 drivers/nvdimm/blk.c static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio) q 179 drivers/nvdimm/blk.c nsblk = q->queuedata; q 230 drivers/nvdimm/blk.c static void nd_blk_release_queue(void *q) q 232 drivers/nvdimm/blk.c blk_cleanup_queue(q); q 245 drivers/nvdimm/blk.c struct request_queue *q; q 252 drivers/nvdimm/blk.c q = blk_alloc_queue(GFP_KERNEL); q 253 drivers/nvdimm/blk.c if (!q) q 255 drivers/nvdimm/blk.c if (devm_add_action_or_reset(dev, nd_blk_release_queue, q)) q 258 drivers/nvdimm/blk.c blk_queue_make_request(q, nd_blk_make_request); q 259 drivers/nvdimm/blk.c blk_queue_max_hw_sectors(q, UINT_MAX); q 260 drivers/nvdimm/blk.c blk_queue_logical_block_size(q, nsblk_sector_size(nsblk)); q 261 drivers/nvdimm/blk.c blk_queue_flag_set(QUEUE_FLAG_NONROT, q); q 262 drivers/nvdimm/blk.c q->queuedata = nsblk; q 270 drivers/nvdimm/blk.c disk->queue = q; q 1442 drivers/nvdimm/btt.c static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio) q 1445 drivers/nvdimm/btt.c struct btt *btt = q->queuedata; q 185 drivers/nvdimm/pmem.c static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) q 193 drivers/nvdimm/pmem.c struct pmem_device *pmem = q->queuedata; q 312 drivers/nvdimm/pmem.c struct request_queue *q = q 315 drivers/nvdimm/pmem.c blk_cleanup_queue(q); q 325 drivers/nvdimm/pmem.c struct request_queue *q = q 328 drivers/nvdimm/pmem.c blk_freeze_queue_start(q); q 364 drivers/nvdimm/pmem.c struct request_queue *q; q 401 drivers/nvdimm/pmem.c q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev)); q 402 drivers/nvdimm/pmem.c if (!q) q 406 drivers/nvdimm/pmem.c pmem->pgmap.ref = &q->q_usage_counter; q 438 drivers/nvdimm/pmem.c blk_queue_write_cache(q, true, fua); q 439 drivers/nvdimm/pmem.c blk_queue_make_request(q, pmem_make_request); q 440 drivers/nvdimm/pmem.c blk_queue_physical_block_size(q, PAGE_SIZE); q 441 drivers/nvdimm/pmem.c blk_queue_logical_block_size(q, pmem_sector_size(ndns)); q 442 drivers/nvdimm/pmem.c blk_queue_max_hw_sectors(q, UINT_MAX); q 443 drivers/nvdimm/pmem.c blk_queue_flag_set(QUEUE_FLAG_NONROT, q); q 445 drivers/nvdimm/pmem.c blk_queue_flag_set(QUEUE_FLAG_DAX, q); q 446 drivers/nvdimm/pmem.c q->queuedata = pmem; q 454 drivers/nvdimm/pmem.c disk->queue = q; q 267 drivers/nvme/host/core.c struct nvme_ns *ns = req->q->queuedata; q 278 drivers/nvme/host/core.c blk_mq_delay_kick_requeue_list(req->q, delay); q 297 drivers/nvme/host/core.c if (!blk_queue_dying(req->q)) { q 479 drivers/nvme/host/core.c struct request *nvme_alloc_request(struct request_queue *q, q 486 drivers/nvme/host/core.c req = blk_mq_alloc_request(q, op, flags); q 488 drivers/nvme/host/core.c req = blk_mq_alloc_request_hctx(q, op, flags, q 597 drivers/nvme/host/core.c if (streamid < ARRAY_SIZE(req->q->write_hints)) q 598 drivers/nvme/host/core.c req->q->write_hints[streamid] += blk_rq_bytes(req) >> 9; q 797 drivers/nvme/host/core.c static void nvme_execute_rq_polled(struct request_queue *q, q 802 drivers/nvme/host/core.c WARN_ON_ONCE(!test_bit(QUEUE_FLAG_POLL, &q->queue_flags)); q 806 drivers/nvme/host/core.c blk_execute_rq_nowait(q, bd_disk, rq, at_head, nvme_end_sync_rq); q 809 drivers/nvme/host/core.c blk_poll(q, request_to_qc_t(rq->mq_hctx, rq), true); q 818 drivers/nvme/host/core.c int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, q 826 drivers/nvme/host/core.c req = nvme_alloc_request(q, cmd, flags, qid); q 833 drivers/nvme/host/core.c ret = blk_rq_map_kern(q, req, buffer, bufflen, GFP_KERNEL); q 839 drivers/nvme/host/core.c nvme_execute_rq_polled(req->q, NULL, req, at_head); q 841 drivers/nvme/host/core.c blk_execute_rq(req->q, NULL, req, at_head); q 854 drivers/nvme/host/core.c int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, q 857 drivers/nvme/host/core.c return __nvme_submit_sync_cmd(q, cmd, NULL, buffer, bufflen, 0, q 896 drivers/nvme/host/core.c static int nvme_submit_user_cmd(struct request_queue *q, q 902 drivers/nvme/host/core.c struct nvme_ns *ns = q->queuedata; q 909 drivers/nvme/host/core.c req = nvme_alloc_request(q, cmd, 0, NVME_QID_ANY); q 917 drivers/nvme/host/core.c ret = blk_rq_map_user(q, req, NULL, ubuffer, bufflen, q 934 drivers/nvme/host/core.c blk_execute_rq(req->q, disk, req, 0); q 991 drivers/nvme/host/core.c blk_execute_rq_nowait(rq->q, NULL, rq, 0, nvme_keep_alive_end_io); q 2193 drivers/nvme/host/core.c struct request_queue *q) q 2202 drivers/nvme/host/core.c blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors); q 2203 drivers/nvme/host/core.c blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX)); q 2207 drivers/nvme/host/core.c blk_queue_chunk_sectors(q, ctrl->max_hw_sectors); q 2208 drivers/nvme/host/core.c blk_queue_virt_boundary(q, ctrl->page_size - 1); q 2211 drivers/nvme/host/core.c blk_queue_write_cache(q, vwc, vwc); q 2451 drivers/nvme/host/core.c const struct nvme_core_quirk_entry *q) q 2453 drivers/nvme/host/core.c return q->vid == le16_to_cpu(id->vid) && q 2454 drivers/nvme/host/core.c string_matches(id->mn, q->mn, sizeof(id->mn)) && q 2455 drivers/nvme/host/core.c string_matches(id->fr, q->fr, sizeof(id->fr)); q 2148 drivers/nvme/host/fc.c op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl); q 433 drivers/nvme/host/lightnvm.c struct nvme_ns *ns = nvmdev->q->queuedata; q 478 drivers/nvme/host/lightnvm.c struct request_queue *q = nvmdev->q; q 480 drivers/nvme/host/lightnvm.c struct nvme_ns *ns = q->queuedata; q 534 drivers/nvme/host/lightnvm.c struct nvme_ns *ns = nvmdev->q->queuedata; q 560 drivers/nvme/host/lightnvm.c struct nvme_ns *ns = ndev->q->queuedata; q 647 drivers/nvme/host/lightnvm.c static struct request *nvme_nvm_alloc_request(struct request_queue *q, q 651 drivers/nvme/host/lightnvm.c struct nvme_ns *ns = q->queuedata; q 656 drivers/nvme/host/lightnvm.c rq = nvme_alloc_request(q, (struct nvme_command *)cmd, 0, NVME_QID_ANY); q 674 drivers/nvme/host/lightnvm.c struct request_queue *q = dev->q; q 683 drivers/nvme/host/lightnvm.c rq = nvme_nvm_alloc_request(q, rqd, cmd); q 690 drivers/nvme/host/lightnvm.c ret = blk_rq_map_kern(q, rq, buf, geo->csecs * rqd->nr_ppas, q 698 drivers/nvme/host/lightnvm.c blk_execute_rq_nowait(q, NULL, rq, 0, nvme_nvm_end_io); q 710 drivers/nvme/host/lightnvm.c struct nvme_ns *ns = nvmdev->q->queuedata; q 750 drivers/nvme/host/lightnvm.c static int nvme_nvm_submit_user_cmd(struct request_queue *q, q 770 drivers/nvme/host/lightnvm.c rq = nvme_alloc_request(q, (struct nvme_command *)vcmd, 0, q 796 drivers/nvme/host/lightnvm.c ret = blk_rq_map_user(q, rq, NULL, ubuf, bufflen, GFP_KERNEL); q 823 drivers/nvme/host/lightnvm.c blk_execute_rq(q, NULL, rq, 0); q 892 drivers/nvme/host/lightnvm.c struct request_queue *q; q 918 drivers/nvme/host/lightnvm.c q = admin ? ns->ctrl->admin_q : ns->queue; q 920 drivers/nvme/host/lightnvm.c ret = nvme_nvm_submit_user_cmd(q, ns, q 950 drivers/nvme/host/lightnvm.c struct request_queue *q = ns->queue; q 967 drivers/nvme/host/lightnvm.c dev->q = q; q 69 drivers/nvme/host/multipath.c struct nvme_ns *ns = req->q->queuedata; q 296 drivers/nvme/host/multipath.c static blk_qc_t nvme_ns_head_make_request(struct request_queue *q, q 299 drivers/nvme/host/multipath.c struct nvme_ns_head *head = q->queuedata; q 311 drivers/nvme/host/multipath.c blk_queue_split(q, &bio); q 364 drivers/nvme/host/multipath.c struct request_queue *q; q 380 drivers/nvme/host/multipath.c q = blk_alloc_queue_node(GFP_KERNEL, ctrl->numa_node); q 381 drivers/nvme/host/multipath.c if (!q) q 383 drivers/nvme/host/multipath.c q->queuedata = head; q 384 drivers/nvme/host/multipath.c blk_queue_make_request(q, nvme_ns_head_make_request); q 385 drivers/nvme/host/multipath.c blk_queue_flag_set(QUEUE_FLAG_NONROT, q); q 387 drivers/nvme/host/multipath.c blk_queue_logical_block_size(q, 512); q 388 drivers/nvme/host/multipath.c blk_set_stacking_limits(&q->limits); q 393 drivers/nvme/host/multipath.c blk_queue_write_cache(q, vwc, vwc); q 400 drivers/nvme/host/multipath.c head->disk->queue = q; q 407 drivers/nvme/host/multipath.c blk_cleanup_queue(q); q 483 drivers/nvme/host/nvme.h struct request *nvme_alloc_request(struct request_queue *q, q 488 drivers/nvme/host/nvme.h int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, q 490 drivers/nvme/host/nvme.h int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, q 547 drivers/nvme/host/nvme.h struct nvme_ns *ns = req->q->queuedata; q 31 drivers/nvme/host/pci.c #define SQ_SIZE(q) ((q)->q_depth << (q)->sqes) q 32 drivers/nvme/host/pci.c #define CQ_SIZE(q) ((q)->q_depth * sizeof(struct nvme_completion)) q 822 drivers/nvme/host/pci.c iod->nents = blk_rq_map_sg(req->q, req, iod->sg); q 1334 drivers/nvme/host/pci.c blk_execute_rq_nowait(abort_req->q, NULL, abort_req, 0, abort_endio); q 2218 drivers/nvme/host/pci.c struct request_queue *q = nvmeq->dev->ctrl.admin_q; q 2226 drivers/nvme/host/pci.c req = nvme_alloc_request(q, &cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY); q 2234 drivers/nvme/host/pci.c blk_execute_rq_nowait(q, NULL, req, false, q 1287 drivers/nvme/host/rdma.c req->nents = blk_rq_map_sg(rq->q, rq, req->sg_table.sgl); q 1757 drivers/nvme/target/fc.c queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd) q 1762 drivers/nvme/target/fc.c sqtail = atomic_read(&q->sqtail) % q->sqsize; q 1764 drivers/nvme/target/fc.c used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd); q 1765 drivers/nvme/target/fc.c return ((used * 10) >= (((u32)(q->sqsize - 1) * 9))); q 166 drivers/nvme/target/loop.c iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl); q 911 drivers/of/fdt.c const char *p, *q, *options = NULL; q 928 drivers/of/fdt.c q = strchrnul(p, ':'); q 929 drivers/of/fdt.c if (*q != '\0') q 930 drivers/of/fdt.c options = q + 1; q 931 drivers/of/fdt.c l = q - p; q 56 drivers/parport/probe.c char *p = txt, *q; q 67 drivers/parport/probe.c q = strchr(p, ';'); q 68 drivers/parport/probe.c if (q) *q = 0; q 117 drivers/parport/probe.c if (q) q 118 drivers/parport/probe.c p = q + 1; q 662 drivers/pcmcia/cistpl.c u_char *p, *q; q 665 drivers/pcmcia/cistpl.c q = p + tuple->TupleDataLen; q 691 drivers/pcmcia/cistpl.c if (++p == q) q 695 drivers/pcmcia/cistpl.c if (++p == q) q 702 drivers/pcmcia/cistpl.c if (++p == q) q 711 drivers/pcmcia/cistpl.c if (++p == q) q 760 drivers/pcmcia/cistpl.c static int parse_strings(u_char *p, u_char *q, int max, q 765 drivers/pcmcia/cistpl.c if (p == q) q 777 drivers/pcmcia/cistpl.c if (++p == q) q 780 drivers/pcmcia/cistpl.c if ((*p == 0xff) || (++p == q)) q 794 drivers/pcmcia/cistpl.c u_char *p, *q; q 797 drivers/pcmcia/cistpl.c q = p + tuple->TupleDataLen; q 801 drivers/pcmcia/cistpl.c if (p >= q) q 804 drivers/pcmcia/cistpl.c return parse_strings(p, q, CISTPL_VERS_1_MAX_PROD_STRINGS, q 811 drivers/pcmcia/cistpl.c u_char *p, *q; q 814 drivers/pcmcia/cistpl.c q = p + tuple->TupleDataLen; q 816 drivers/pcmcia/cistpl.c return parse_strings(p, q, CISTPL_MAX_ALTSTR_STRINGS, q 823 drivers/pcmcia/cistpl.c u_char *p, *q; q 827 drivers/pcmcia/cistpl.c q = p + tuple->TupleDataLen; q 830 drivers/pcmcia/cistpl.c if (p > q-2) q 905 drivers/pcmcia/cistpl.c static u_char *parse_power(u_char *p, u_char *q, cistpl_power_t *pwr) q 910 drivers/pcmcia/cistpl.c if (p == q) q 917 drivers/pcmcia/cistpl.c if (p == q) q 922 drivers/pcmcia/cistpl.c if (++p == q) q 942 drivers/pcmcia/cistpl.c static u_char *parse_timing(u_char *p, u_char *q, cistpl_timing_t *timing) q 946 drivers/pcmcia/cistpl.c if (p == q) q 950 drivers/pcmcia/cistpl.c if (++p == q) q 958 drivers/pcmcia/cistpl.c if (++p == q) q 966 drivers/pcmcia/cistpl.c if (++p == q) q 977 drivers/pcmcia/cistpl.c static u_char *parse_io(u_char *p, u_char *q, cistpl_io_t *io) q 981 drivers/pcmcia/cistpl.c if (p == q) q 992 drivers/pcmcia/cistpl.c if (++p == q) q 1007 drivers/pcmcia/cistpl.c if (p == q) q 1012 drivers/pcmcia/cistpl.c if (p == q) q 1021 drivers/pcmcia/cistpl.c static u_char *parse_mem(u_char *p, u_char *q, cistpl_mem_t *mem) q 1026 drivers/pcmcia/cistpl.c if (p == q) q 1033 drivers/pcmcia/cistpl.c if (++p == q) q 1039 drivers/pcmcia/cistpl.c if (p == q) q 1044 drivers/pcmcia/cistpl.c if (p == q) q 1050 drivers/pcmcia/cistpl.c if (p == q) q 1062 drivers/pcmcia/cistpl.c static u_char *parse_irq(u_char *p, u_char *q, cistpl_irq_t *irq) q 1064 drivers/pcmcia/cistpl.c if (p == q) q 1068 drivers/pcmcia/cistpl.c if (p+2 > q) q 1080 drivers/pcmcia/cistpl.c u_char *p, *q, features; q 1083 drivers/pcmcia/cistpl.c q = p + tuple->TupleDataLen; q 1089 drivers/pcmcia/cistpl.c if (++p == q) q 1104 drivers/pcmcia/cistpl.c if (++p == q) q 1110 drivers/pcmcia/cistpl.c p = parse_power(p, q, &entry->vcc); q 1116 drivers/pcmcia/cistpl.c p = parse_power(p, q, &entry->vpp1); q 1122 drivers/pcmcia/cistpl.c p = parse_power(p, q, &entry->vpp2); q 1130 drivers/pcmcia/cistpl.c p = parse_timing(p, q, &entry->timing); q 1141 drivers/pcmcia/cistpl.c p = parse_io(p, q, &entry->io); q 1149 drivers/pcmcia/cistpl.c p = parse_irq(p, q, &entry->irq); q 1165 drivers/pcmcia/cistpl.c if (p > q) q 1174 drivers/pcmcia/cistpl.c if (p > q) q 1178 drivers/pcmcia/cistpl.c p = parse_mem(p, q, &entry->mem); q 1186 drivers/pcmcia/cistpl.c if (p == q) q 1190 drivers/pcmcia/cistpl.c if (++p == q) q 1195 drivers/pcmcia/cistpl.c entry->subtuples = q-p; q 1203 drivers/pcmcia/cistpl.c u_char *p, *q; q 1207 drivers/pcmcia/cistpl.c q = p + tuple->TupleDataLen; q 1210 drivers/pcmcia/cistpl.c if (p > q-6) q 1227 drivers/pcmcia/cistpl.c u_char *p, *q; q 1233 drivers/pcmcia/cistpl.c q = p + tuple->TupleDataLen; q 1242 drivers/pcmcia/cistpl.c return parse_strings(p, q, 2, v2->str, &v2->vendor, NULL); q 1248 drivers/pcmcia/cistpl.c u_char *p, *q; q 1252 drivers/pcmcia/cistpl.c q = p + tuple->TupleDataLen; q 1253 drivers/pcmcia/cistpl.c if (p == q) q 1256 drivers/pcmcia/cistpl.c if (++p == q) q 1262 drivers/pcmcia/cistpl.c if (++p == q) q 110 drivers/pcmcia/rsrc_nonstatic.c struct resource_map *p, *q; q 120 drivers/pcmcia/rsrc_nonstatic.c q = kmalloc(sizeof(struct resource_map), GFP_KERNEL); q 121 drivers/pcmcia/rsrc_nonstatic.c if (!q) { q 125 drivers/pcmcia/rsrc_nonstatic.c q->base = base; q->num = num; q 126 drivers/pcmcia/rsrc_nonstatic.c q->next = p->next; p->next = q; q 134 drivers/pcmcia/rsrc_nonstatic.c struct resource_map *p, *q; q 136 drivers/pcmcia/rsrc_nonstatic.c for (p = map; ; p = q) { q 137 drivers/pcmcia/rsrc_nonstatic.c q = p->next; q 138 drivers/pcmcia/rsrc_nonstatic.c if (q == map) q 140 drivers/pcmcia/rsrc_nonstatic.c if ((q->base+q->num > base) && (base+num > q->base)) { q 141 drivers/pcmcia/rsrc_nonstatic.c if (q->base >= base) { q 142 drivers/pcmcia/rsrc_nonstatic.c if (q->base+q->num <= base+num) { q 144 drivers/pcmcia/rsrc_nonstatic.c p->next = q->next; q 145 drivers/pcmcia/rsrc_nonstatic.c kfree(q); q 147 drivers/pcmcia/rsrc_nonstatic.c q = p; q 150 drivers/pcmcia/rsrc_nonstatic.c q->num = q->base + q->num - base - num; q 151 drivers/pcmcia/rsrc_nonstatic.c q->base = base + num; q 153 drivers/pcmcia/rsrc_nonstatic.c } else if (q->base+q->num <= base+num) { q 155 drivers/pcmcia/rsrc_nonstatic.c q->num = base - q->base; q 165 drivers/pcmcia/rsrc_nonstatic.c p->num = q->base+q->num - p->base; q 166 drivers/pcmcia/rsrc_nonstatic.c q->num = base - q->base; q 167 drivers/pcmcia/rsrc_nonstatic.c p->next = q->next ; q->next = p; q 1036 drivers/pcmcia/rsrc_nonstatic.c struct resource_map *p, *q; q 1038 drivers/pcmcia/rsrc_nonstatic.c for (p = data->mem_db_valid.next; p != &data->mem_db_valid; p = q) { q 1039 drivers/pcmcia/rsrc_nonstatic.c q = p->next; q 1042 drivers/pcmcia/rsrc_nonstatic.c for (p = data->mem_db.next; p != &data->mem_db; p = q) { q 1043 drivers/pcmcia/rsrc_nonstatic.c q = p->next; q 1046 drivers/pcmcia/rsrc_nonstatic.c for (p = data->io_db.next; p != &data->io_db; p = q) { q 1047 drivers/pcmcia/rsrc_nonstatic.c q = p->next; q 108 drivers/platform/chrome/wilco_ec/event.c struct ec_event_queue *q; q 110 drivers/platform/chrome/wilco_ec/event.c q = kzalloc(struct_size(q, entries, capacity), GFP_KERNEL); q 111 drivers/platform/chrome/wilco_ec/event.c if (!q) q 114 drivers/platform/chrome/wilco_ec/event.c q->capacity = capacity; q 116 drivers/platform/chrome/wilco_ec/event.c return q; q 119 drivers/platform/chrome/wilco_ec/event.c static inline bool event_queue_empty(struct ec_event_queue *q) q 122 drivers/platform/chrome/wilco_ec/event.c return q->head == q->tail && !q->entries[q->head]; q 125 drivers/platform/chrome/wilco_ec/event.c static inline bool event_queue_full(struct ec_event_queue *q) q 128 drivers/platform/chrome/wilco_ec/event.c return q->head == q->tail && q->entries[q->head]; q 131 drivers/platform/chrome/wilco_ec/event.c static struct ec_event *event_queue_pop(struct ec_event_queue *q) q 135 drivers/platform/chrome/wilco_ec/event.c if (event_queue_empty(q)) q 138 drivers/platform/chrome/wilco_ec/event.c ev = q->entries[q->tail]; q 139 drivers/platform/chrome/wilco_ec/event.c q->entries[q->tail] = NULL; q 140 drivers/platform/chrome/wilco_ec/event.c q->tail = (q->tail + 1) % q->capacity; q 149 drivers/platform/chrome/wilco_ec/event.c static struct ec_event *event_queue_push(struct ec_event_queue *q, q 154 drivers/platform/chrome/wilco_ec/event.c if (event_queue_full(q)) q 155 drivers/platform/chrome/wilco_ec/event.c popped = event_queue_pop(q); q 156 drivers/platform/chrome/wilco_ec/event.c q->entries[q->head] = ev; q 157 drivers/platform/chrome/wilco_ec/event.c q->head = (q->head + 1) % q->capacity; q 162 drivers/platform/chrome/wilco_ec/event.c static void event_queue_free(struct ec_event_queue *q) q 166 drivers/platform/chrome/wilco_ec/event.c while ((event = event_queue_pop(q)) != NULL) q 169 drivers/platform/chrome/wilco_ec/event.c kfree(q); q 36 drivers/ptp/ptp_clock.c static inline int queue_free(struct timestamp_event_queue *q) q 38 drivers/ptp/ptp_clock.c return PTP_MAX_TIMESTAMPS - queue_cnt(q) - 1; q 58 drivers/ptp/ptp_private.h static inline int queue_cnt(struct timestamp_event_queue *q) q 60 drivers/ptp/ptp_private.h int cnt = q->tail - q->head; q 2794 drivers/s390/block/dasd.c blk_mq_run_hw_queues(req->q, true); q 3165 drivers/s390/block/dasd.c struct dasd_block *block = req->q->queuedata; q 3254 drivers/s390/block/dasd.c blk_mq_run_hw_queues(req->q, true); q 1329 drivers/s390/block/dasd_devmap.c struct request_queue *q; q 1341 drivers/s390/block/dasd_devmap.c q = device->block->request_queue; q 1342 drivers/s390/block/dasd_devmap.c if (!q) { q 1349 drivers/s390/block/dasd_devmap.c blk_queue_rq_timeout(q, device->blk_timeout * HZ); q 624 drivers/s390/block/dasd_diag.c struct request_queue *q = block->request_queue; q 628 drivers/s390/block/dasd_diag.c blk_queue_flag_set(QUEUE_FLAG_NONROT, q); q 629 drivers/s390/block/dasd_diag.c q->limits.max_dev_sectors = max; q 630 drivers/s390/block/dasd_diag.c blk_queue_logical_block_size(q, logical_block_size); q 631 drivers/s390/block/dasd_diag.c blk_queue_max_hw_sectors(q, max); q 632 drivers/s390/block/dasd_diag.c blk_queue_max_segments(q, USHRT_MAX); q 634 drivers/s390/block/dasd_diag.c blk_queue_max_segment_size(q, PAGE_SIZE); q 635 drivers/s390/block/dasd_diag.c blk_queue_segment_boundary(q, PAGE_SIZE - 1); q 6632 drivers/s390/block/dasd_eckd.c struct request_queue *q = block->request_queue; q 6648 drivers/s390/block/dasd_eckd.c blk_queue_flag_set(QUEUE_FLAG_NONROT, q); q 6649 drivers/s390/block/dasd_eckd.c q->limits.max_dev_sectors = max; q 6650 drivers/s390/block/dasd_eckd.c blk_queue_logical_block_size(q, logical_block_size); q 6651 drivers/s390/block/dasd_eckd.c blk_queue_max_hw_sectors(q, max); q 6652 drivers/s390/block/dasd_eckd.c blk_queue_max_segments(q, USHRT_MAX); q 6654 drivers/s390/block/dasd_eckd.c blk_queue_max_segment_size(q, PAGE_SIZE); q 6655 drivers/s390/block/dasd_eckd.c blk_queue_segment_boundary(q, PAGE_SIZE - 1); q 778 drivers/s390/block/dasd_fba.c struct request_queue *q = block->request_queue; q 783 drivers/s390/block/dasd_fba.c blk_queue_flag_set(QUEUE_FLAG_NONROT, q); q 784 drivers/s390/block/dasd_fba.c q->limits.max_dev_sectors = max; q 785 drivers/s390/block/dasd_fba.c blk_queue_logical_block_size(q, logical_block_size); q 786 drivers/s390/block/dasd_fba.c blk_queue_max_hw_sectors(q, max); q 787 drivers/s390/block/dasd_fba.c blk_queue_max_segments(q, USHRT_MAX); q 789 drivers/s390/block/dasd_fba.c blk_queue_max_segment_size(q, PAGE_SIZE); q 790 drivers/s390/block/dasd_fba.c blk_queue_segment_boundary(q, PAGE_SIZE - 1); q 792 drivers/s390/block/dasd_fba.c q->limits.discard_granularity = logical_block_size; q 793 drivers/s390/block/dasd_fba.c q->limits.discard_alignment = PAGE_SIZE; q 800 drivers/s390/block/dasd_fba.c blk_queue_max_discard_sectors(q, max_discard_sectors); q 801 drivers/s390/block/dasd_fba.c blk_queue_max_write_zeroes_sectors(q, max_discard_sectors); q 802 drivers/s390/block/dasd_fba.c blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); q 34 drivers/s390/block/dcssblk.c static blk_qc_t dcssblk_make_request(struct request_queue *q, q 855 drivers/s390/block/dcssblk.c dcssblk_make_request(struct request_queue *q, struct bio *bio) q 865 drivers/s390/block/dcssblk.c blk_queue_split(q, &bio); q 185 drivers/s390/block/xpram.c static blk_qc_t xpram_make_request(struct request_queue *q, struct bio *bio) q 194 drivers/s390/block/xpram.c blk_queue_split(q, &bio); q 311 drivers/s390/cio/qdio.h #define queue_type(q) q->irq_ptr->qib.qfmt q 312 drivers/s390/cio/qdio.h #define SCH_NO(q) (q->irq_ptr->schid.sch_no) q 327 drivers/s390/cio/qdio.h static inline void account_sbals_error(struct qdio_q *q, int count) q 329 drivers/s390/cio/qdio.h q->q_stats.nr_sbal_error += count; q 330 drivers/s390/cio/qdio.h q->q_stats.nr_sbal_total += count; q 334 drivers/s390/cio/qdio.h static inline int multicast_outbound(struct qdio_q *q) q 336 drivers/s390/cio/qdio.h return (q->irq_ptr->nr_output_qs > 1) && q 337 drivers/s390/cio/qdio.h (q->nr == q->irq_ptr->nr_output_qs - 1); q 341 drivers/s390/cio/qdio.h #define is_qebsm(q) (q->irq_ptr->sch_token != 0) q 343 drivers/s390/cio/qdio.h #define need_siga_in(q) (q->irq_ptr->siga_flag.input) q 344 drivers/s390/cio/qdio.h #define need_siga_out(q) (q->irq_ptr->siga_flag.output) q 345 drivers/s390/cio/qdio.h #define need_siga_sync(q) (unlikely(q->irq_ptr->siga_flag.sync)) q 346 drivers/s390/cio/qdio.h #define need_siga_sync_after_ai(q) \ q 347 drivers/s390/cio/qdio.h (unlikely(q->irq_ptr->siga_flag.sync_after_ai)) q 348 drivers/s390/cio/qdio.h #define need_siga_sync_out_after_pci(q) \ q 349 drivers/s390/cio/qdio.h (unlikely(q->irq_ptr->siga_flag.sync_out_after_pci)) q 351 drivers/s390/cio/qdio.h #define for_each_input_queue(irq_ptr, q, i) \ q 353 drivers/s390/cio/qdio.h ({ q = irq_ptr->input_qs[i]; 1; }); i++) q 354 drivers/s390/cio/qdio.h #define for_each_output_queue(irq_ptr, q, i) \ q 356 drivers/s390/cio/qdio.h ({ q = irq_ptr->output_qs[i]; 1; }); i++) q 367 drivers/s390/cio/qdio.h #define queue_irqs_enabled(q) \ q 368 drivers/s390/cio/qdio.h (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0) q 369 drivers/s390/cio/qdio.h #define queue_irqs_disabled(q) \ q 370 drivers/s390/cio/qdio.h (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0) q 380 drivers/s390/cio/qdio.h void tiqdio_inbound_processing(unsigned long q); q 408 drivers/s390/cio/qdio.h int qdio_enable_async_operation(struct qdio_output_q *q); q 409 drivers/s390/cio/qdio.h void qdio_disable_async_operation(struct qdio_output_q *q); q 412 drivers/s390/cio/qdio.h int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, q 116 drivers/s390/cio/qdio_debug.c struct qdio_q *q = m->private; q 119 drivers/s390/cio/qdio_debug.c if (!q) q 123 drivers/s390/cio/qdio_debug.c q->timestamp, last_ai_time); q 125 drivers/s390/cio/qdio_debug.c atomic_read(&q->nr_buf_used), q->first_to_check); q 126 drivers/s390/cio/qdio_debug.c if (q->is_input_q) { q 128 drivers/s390/cio/qdio_debug.c q->u.in.polling, q->u.in.ack_start, q 129 drivers/s390/cio/qdio_debug.c q->u.in.ack_count); q 131 drivers/s390/cio/qdio_debug.c *(u8 *)q->irq_ptr->dsci, q 133 drivers/s390/cio/qdio_debug.c &q->u.in.queue_irq_state)); q 139 drivers/s390/cio/qdio_debug.c debug_get_buf_state(q, i, &state); q 177 drivers/s390/cio/qdio_debug.c if (!q->irq_ptr->perf_stat_enabled) { q 184 drivers/s390/cio/qdio_debug.c for (i = 0; i < ARRAY_SIZE(q->q_stats.nr_sbals); i++) q 185 drivers/s390/cio/qdio_debug.c seq_printf(m, "%-10u ", q->q_stats.nr_sbals[i]); q 187 drivers/s390/cio/qdio_debug.c q->q_stats.nr_sbal_error, q->q_stats.nr_sbal_nop, q 188 drivers/s390/cio/qdio_debug.c q->q_stats.nr_sbal_total); q 246 drivers/s390/cio/qdio_debug.c struct qdio_q *q; q 261 drivers/s390/cio/qdio_debug.c for_each_input_queue(irq_ptr, q, i) q 262 drivers/s390/cio/qdio_debug.c memset(&q->q_stats, 0, sizeof(q->q_stats)); q 263 drivers/s390/cio/qdio_debug.c for_each_output_queue(irq_ptr, q, i) q 264 drivers/s390/cio/qdio_debug.c memset(&q->q_stats, 0, sizeof(q->q_stats)); q 288 drivers/s390/cio/qdio_debug.c static void setup_debugfs_entry(struct qdio_q *q) q 293 drivers/s390/cio/qdio_debug.c q->is_input_q ? "input" : "output", q 294 drivers/s390/cio/qdio_debug.c q->nr); q 295 drivers/s390/cio/qdio_debug.c q->debugfs_q = debugfs_create_file(name, 0444, q 296 drivers/s390/cio/qdio_debug.c q->irq_ptr->debugfs_dev, q, &qstat_fops); q 297 drivers/s390/cio/qdio_debug.c if (IS_ERR(q->debugfs_q)) q 298 drivers/s390/cio/qdio_debug.c q->debugfs_q = NULL; q 303 drivers/s390/cio/qdio_debug.c struct qdio_q *q; q 318 drivers/s390/cio/qdio_debug.c for_each_input_queue(irq_ptr, q, i) q 319 drivers/s390/cio/qdio_debug.c setup_debugfs_entry(q); q 320 drivers/s390/cio/qdio_debug.c for_each_output_queue(irq_ptr, q, i) q 321 drivers/s390/cio/qdio_debug.c setup_debugfs_entry(q); q 326 drivers/s390/cio/qdio_debug.c struct qdio_q *q; q 329 drivers/s390/cio/qdio_debug.c for_each_input_queue(irq_ptr, q, i) q 330 drivers/s390/cio/qdio_debug.c debugfs_remove(q->debugfs_q); q 331 drivers/s390/cio/qdio_debug.c for_each_output_queue(irq_ptr, q, i) q 332 drivers/s390/cio/qdio_debug.c debugfs_remove(q->debugfs_q); q 112 drivers/s390/cio/qdio_main.c static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, q 115 drivers/s390/cio/qdio_main.c int tmp_count = count, tmp_start = start, nr = q->nr; q 118 drivers/s390/cio/qdio_main.c qperf_inc(q, eqbs); q 120 drivers/s390/cio/qdio_main.c if (!q->is_input_q) q 121 drivers/s390/cio/qdio_main.c nr += q->irq_ptr->nr_input_qs; q 123 drivers/s390/cio/qdio_main.c ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count, q 133 drivers/s390/cio/qdio_main.c qperf_inc(q, eqbs_partial); q 134 drivers/s390/cio/qdio_main.c DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x", q 139 drivers/s390/cio/qdio_main.c DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); q 142 drivers/s390/cio/qdio_main.c DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); q 143 drivers/s390/cio/qdio_main.c DBF_ERROR("%4x EQBS ERROR", SCH_NO(q)); q 145 drivers/s390/cio/qdio_main.c q->handler(q->irq_ptr->cdev, QDIO_ERROR_GET_BUF_STATE, q->nr, q 146 drivers/s390/cio/qdio_main.c q->first_to_kick, count, q->irq_ptr->int_parm); q 162 drivers/s390/cio/qdio_main.c static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start, q 167 drivers/s390/cio/qdio_main.c int nr = q->nr; q 171 drivers/s390/cio/qdio_main.c qperf_inc(q, sqbs); q 173 drivers/s390/cio/qdio_main.c if (!q->is_input_q) q 174 drivers/s390/cio/qdio_main.c nr += q->irq_ptr->nr_input_qs; q 176 drivers/s390/cio/qdio_main.c ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); q 186 drivers/s390/cio/qdio_main.c DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq); q 187 drivers/s390/cio/qdio_main.c qperf_inc(q, sqbs_partial); q 190 drivers/s390/cio/qdio_main.c DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); q 191 drivers/s390/cio/qdio_main.c DBF_ERROR("%4x SQBS ERROR", SCH_NO(q)); q 193 drivers/s390/cio/qdio_main.c q->handler(q->irq_ptr->cdev, QDIO_ERROR_SET_BUF_STATE, q->nr, q 194 drivers/s390/cio/qdio_main.c q->first_to_kick, count, q->irq_ptr->int_parm); q 203 drivers/s390/cio/qdio_main.c static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, q 210 drivers/s390/cio/qdio_main.c if (is_qebsm(q)) q 211 drivers/s390/cio/qdio_main.c return qdio_do_eqbs(q, state, bufnr, count, auto_ack); q 214 drivers/s390/cio/qdio_main.c __state = q->slsb.val[bufnr]; q 228 drivers/s390/cio/qdio_main.c q->slsb.val[bufnr] == SLSB_P_OUTPUT_PENDING && q 233 drivers/s390/cio/qdio_main.c if (q->slsb.val[bufnr] != __state) q 242 drivers/s390/cio/qdio_main.c static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, q 245 drivers/s390/cio/qdio_main.c return get_buf_states(q, bufnr, state, 1, auto_ack, 0); q 249 drivers/s390/cio/qdio_main.c static inline int set_buf_states(struct qdio_q *q, int bufnr, q 254 drivers/s390/cio/qdio_main.c if (is_qebsm(q)) q 255 drivers/s390/cio/qdio_main.c return qdio_do_sqbs(q, state, bufnr, count); q 258 drivers/s390/cio/qdio_main.c xchg(&q->slsb.val[bufnr], state); q 264 drivers/s390/cio/qdio_main.c static inline int set_buf_state(struct qdio_q *q, int bufnr, q 267 drivers/s390/cio/qdio_main.c return set_buf_states(q, bufnr, state, 1); q 273 drivers/s390/cio/qdio_main.c struct qdio_q *q; q 276 drivers/s390/cio/qdio_main.c for_each_input_queue(irq_ptr, q, i) q 277 drivers/s390/cio/qdio_main.c set_buf_states(q, 0, SLSB_P_INPUT_NOT_INIT, q 279 drivers/s390/cio/qdio_main.c for_each_output_queue(irq_ptr, q, i) q 280 drivers/s390/cio/qdio_main.c set_buf_states(q, 0, SLSB_P_OUTPUT_NOT_INIT, q 284 drivers/s390/cio/qdio_main.c static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output, q 287 drivers/s390/cio/qdio_main.c unsigned long schid = *((u32 *) &q->irq_ptr->schid); q 291 drivers/s390/cio/qdio_main.c DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr); q 292 drivers/s390/cio/qdio_main.c qperf_inc(q, siga_sync); q 294 drivers/s390/cio/qdio_main.c if (is_qebsm(q)) { q 295 drivers/s390/cio/qdio_main.c schid = q->irq_ptr->sch_token; q 301 drivers/s390/cio/qdio_main.c DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc); q 305 drivers/s390/cio/qdio_main.c static inline int qdio_siga_sync_q(struct qdio_q *q) q 307 drivers/s390/cio/qdio_main.c if (q->is_input_q) q 308 drivers/s390/cio/qdio_main.c return qdio_siga_sync(q, 0, q->mask); q 310 drivers/s390/cio/qdio_main.c return qdio_siga_sync(q, q->mask, 0); q 313 drivers/s390/cio/qdio_main.c static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit, q 316 drivers/s390/cio/qdio_main.c unsigned long schid = *((u32 *) &q->irq_ptr->schid); q 327 drivers/s390/cio/qdio_main.c if (is_qebsm(q)) { q 328 drivers/s390/cio/qdio_main.c schid = q->irq_ptr->sch_token; q 332 drivers/s390/cio/qdio_main.c cc = do_siga_output(schid, q->mask, busy_bit, fc, laob); q 346 drivers/s390/cio/qdio_main.c DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, q 347 drivers/s390/cio/qdio_main.c "%4x cc2 BB1:%1d", SCH_NO(q), q->nr); q 348 drivers/s390/cio/qdio_main.c DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "count:%u", retries); q 353 drivers/s390/cio/qdio_main.c static inline int qdio_siga_input(struct qdio_q *q) q 355 drivers/s390/cio/qdio_main.c unsigned long schid = *((u32 *) &q->irq_ptr->schid); q 359 drivers/s390/cio/qdio_main.c DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr); q 360 drivers/s390/cio/qdio_main.c qperf_inc(q, siga_read); q 362 drivers/s390/cio/qdio_main.c if (is_qebsm(q)) { q 363 drivers/s390/cio/qdio_main.c schid = q->irq_ptr->sch_token; q 367 drivers/s390/cio/qdio_main.c cc = do_siga_input(schid, q->mask, fc); q 369 drivers/s390/cio/qdio_main.c DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc); q 373 drivers/s390/cio/qdio_main.c #define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0) q 374 drivers/s390/cio/qdio_main.c #define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U) q 376 drivers/s390/cio/qdio_main.c static inline void qdio_sync_queues(struct qdio_q *q) q 379 drivers/s390/cio/qdio_main.c if (pci_out_supported(q->irq_ptr)) q 380 drivers/s390/cio/qdio_main.c qdio_siga_sync_all(q); q 382 drivers/s390/cio/qdio_main.c qdio_siga_sync_q(q); q 385 drivers/s390/cio/qdio_main.c int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, q 388 drivers/s390/cio/qdio_main.c if (need_siga_sync(q)) q 389 drivers/s390/cio/qdio_main.c qdio_siga_sync_q(q); q 390 drivers/s390/cio/qdio_main.c return get_buf_state(q, bufnr, state, 0); q 393 drivers/s390/cio/qdio_main.c static inline void qdio_stop_polling(struct qdio_q *q) q 395 drivers/s390/cio/qdio_main.c if (!q->u.in.polling) q 398 drivers/s390/cio/qdio_main.c q->u.in.polling = 0; q 399 drivers/s390/cio/qdio_main.c qperf_inc(q, stop_polling); q 402 drivers/s390/cio/qdio_main.c if (is_qebsm(q)) { q 403 drivers/s390/cio/qdio_main.c set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT, q 404 drivers/s390/cio/qdio_main.c q->u.in.ack_count); q 405 drivers/s390/cio/qdio_main.c q->u.in.ack_count = 0; q 407 drivers/s390/cio/qdio_main.c set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); q 410 drivers/s390/cio/qdio_main.c static inline void account_sbals(struct qdio_q *q, unsigned int count) q 414 drivers/s390/cio/qdio_main.c q->q_stats.nr_sbal_total += count; q 416 drivers/s390/cio/qdio_main.c q->q_stats.nr_sbals[7]++; q 420 drivers/s390/cio/qdio_main.c q->q_stats.nr_sbals[pos]++; q 423 drivers/s390/cio/qdio_main.c static void process_buffer_error(struct qdio_q *q, unsigned int start, q 426 drivers/s390/cio/qdio_main.c unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT : q 429 drivers/s390/cio/qdio_main.c q->qdio_error = QDIO_ERROR_SLSB_STATE; q 432 drivers/s390/cio/qdio_main.c if (queue_type(q) == QDIO_IQDIO_QFMT && !q->is_input_q && q 433 drivers/s390/cio/qdio_main.c q->sbal[start]->element[15].sflags == 0x10) { q 434 drivers/s390/cio/qdio_main.c qperf_inc(q, target_full); q 435 drivers/s390/cio/qdio_main.c DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", start); q 439 drivers/s390/cio/qdio_main.c DBF_ERROR("%4x BUF ERROR", SCH_NO(q)); q 440 drivers/s390/cio/qdio_main.c DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr); q 443 drivers/s390/cio/qdio_main.c q->sbal[start]->element[14].sflags, q 444 drivers/s390/cio/qdio_main.c q->sbal[start]->element[15].sflags); q 451 drivers/s390/cio/qdio_main.c set_buf_states(q, start, state, count); q 454 drivers/s390/cio/qdio_main.c static inline void inbound_primed(struct qdio_q *q, unsigned int start, q 459 drivers/s390/cio/qdio_main.c DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr, count); q 462 drivers/s390/cio/qdio_main.c if (is_qebsm(q)) { q 463 drivers/s390/cio/qdio_main.c if (!q->u.in.polling) { q 464 drivers/s390/cio/qdio_main.c q->u.in.polling = 1; q 465 drivers/s390/cio/qdio_main.c q->u.in.ack_count = count; q 466 drivers/s390/cio/qdio_main.c q->u.in.ack_start = start; q 471 drivers/s390/cio/qdio_main.c set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT, q 472 drivers/s390/cio/qdio_main.c q->u.in.ack_count); q 473 drivers/s390/cio/qdio_main.c q->u.in.ack_count = count; q 474 drivers/s390/cio/qdio_main.c q->u.in.ack_start = start; q 483 drivers/s390/cio/qdio_main.c if (q->u.in.polling) { q 485 drivers/s390/cio/qdio_main.c set_buf_state(q, new, SLSB_P_INPUT_ACK); q 486 drivers/s390/cio/qdio_main.c set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); q 488 drivers/s390/cio/qdio_main.c q->u.in.polling = 1; q 489 drivers/s390/cio/qdio_main.c set_buf_state(q, new, SLSB_P_INPUT_ACK); q 492 drivers/s390/cio/qdio_main.c q->u.in.ack_start = new; q 497 drivers/s390/cio/qdio_main.c set_buf_states(q, start, SLSB_P_INPUT_NOT_INIT, count); q 500 drivers/s390/cio/qdio_main.c static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start) q 505 drivers/s390/cio/qdio_main.c q->timestamp = get_tod_clock_fast(); q 511 drivers/s390/cio/qdio_main.c count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); q 519 drivers/s390/cio/qdio_main.c count = get_buf_states(q, start, &state, count, 1, 0); q 525 drivers/s390/cio/qdio_main.c inbound_primed(q, start, count); q 526 drivers/s390/cio/qdio_main.c if (atomic_sub_return(count, &q->nr_buf_used) == 0) q 527 drivers/s390/cio/qdio_main.c qperf_inc(q, inbound_queue_full); q 528 drivers/s390/cio/qdio_main.c if (q->irq_ptr->perf_stat_enabled) q 529 drivers/s390/cio/qdio_main.c account_sbals(q, count); q 532 drivers/s390/cio/qdio_main.c process_buffer_error(q, start, count); q 533 drivers/s390/cio/qdio_main.c if (atomic_sub_return(count, &q->nr_buf_used) == 0) q 534 drivers/s390/cio/qdio_main.c qperf_inc(q, inbound_queue_full); q 535 drivers/s390/cio/qdio_main.c if (q->irq_ptr->perf_stat_enabled) q 536 drivers/s390/cio/qdio_main.c account_sbals_error(q, count); q 541 drivers/s390/cio/qdio_main.c if (q->irq_ptr->perf_stat_enabled) q 542 drivers/s390/cio/qdio_main.c q->q_stats.nr_sbal_nop++; q 543 drivers/s390/cio/qdio_main.c DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x", q 544 drivers/s390/cio/qdio_main.c q->nr, start); q 552 drivers/s390/cio/qdio_main.c static int qdio_inbound_q_moved(struct qdio_q *q, unsigned int start) q 556 drivers/s390/cio/qdio_main.c count = get_inbound_buffer_frontier(q, start); q 558 drivers/s390/cio/qdio_main.c if (count && !is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR) q 559 drivers/s390/cio/qdio_main.c q->u.in.timestamp = get_tod_clock(); q 564 drivers/s390/cio/qdio_main.c static inline int qdio_inbound_q_done(struct qdio_q *q, unsigned int start) q 568 drivers/s390/cio/qdio_main.c if (!atomic_read(&q->nr_buf_used)) q 571 drivers/s390/cio/qdio_main.c if (need_siga_sync(q)) q 572 drivers/s390/cio/qdio_main.c qdio_siga_sync_q(q); q 573 drivers/s390/cio/qdio_main.c get_buf_state(q, start, &state, 0); q 579 drivers/s390/cio/qdio_main.c if (is_thinint_irq(q->irq_ptr)) q 590 drivers/s390/cio/qdio_main.c if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { q 591 drivers/s390/cio/qdio_main.c DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", start); q 597 drivers/s390/cio/qdio_main.c static inline void qdio_handle_aobs(struct qdio_q *q, int start, int count) q 603 drivers/s390/cio/qdio_main.c get_buf_state(q, b, &state, 0); q 605 drivers/s390/cio/qdio_main.c struct qaob *aob = q->u.out.aobs[b]; q 609 drivers/s390/cio/qdio_main.c q->u.out.sbal_state[b].flags |= q 611 drivers/s390/cio/qdio_main.c q->u.out.aobs[b] = NULL; q 617 drivers/s390/cio/qdio_main.c static inline unsigned long qdio_aob_for_buffer(struct qdio_output_q *q, q 622 drivers/s390/cio/qdio_main.c if (!q->aobs[bufnr]) { q 624 drivers/s390/cio/qdio_main.c q->aobs[bufnr] = aob; q 626 drivers/s390/cio/qdio_main.c if (q->aobs[bufnr]) { q 627 drivers/s390/cio/qdio_main.c q->aobs[bufnr]->user1 = (u64) q->sbal_state[bufnr].user; q 628 drivers/s390/cio/qdio_main.c phys_aob = virt_to_phys(q->aobs[bufnr]); q 632 drivers/s390/cio/qdio_main.c q->sbal_state[bufnr].flags = 0; q 636 drivers/s390/cio/qdio_main.c static void qdio_kick_handler(struct qdio_q *q, unsigned int count) q 638 drivers/s390/cio/qdio_main.c int start = q->first_to_kick; q 640 drivers/s390/cio/qdio_main.c if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) q 643 drivers/s390/cio/qdio_main.c if (q->is_input_q) { q 644 drivers/s390/cio/qdio_main.c qperf_inc(q, inbound_handler); q 645 drivers/s390/cio/qdio_main.c DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count); q 647 drivers/s390/cio/qdio_main.c qperf_inc(q, outbound_handler); q 648 drivers/s390/cio/qdio_main.c DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x", q 652 drivers/s390/cio/qdio_main.c q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, q 653 drivers/s390/cio/qdio_main.c q->irq_ptr->int_parm); q 656 drivers/s390/cio/qdio_main.c q->first_to_kick = add_buf(start, count); q 657 drivers/s390/cio/qdio_main.c q->qdio_error = 0; q 660 drivers/s390/cio/qdio_main.c static inline int qdio_tasklet_schedule(struct qdio_q *q) q 662 drivers/s390/cio/qdio_main.c if (likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) { q 663 drivers/s390/cio/qdio_main.c tasklet_schedule(&q->tasklet); q 669 drivers/s390/cio/qdio_main.c static void __qdio_inbound_processing(struct qdio_q *q) q 671 drivers/s390/cio/qdio_main.c unsigned int start = q->first_to_check; q 674 drivers/s390/cio/qdio_main.c qperf_inc(q, tasklet_inbound); q 676 drivers/s390/cio/qdio_main.c count = qdio_inbound_q_moved(q, start); q 681 drivers/s390/cio/qdio_main.c q->first_to_check = start; q 682 drivers/s390/cio/qdio_main.c qdio_kick_handler(q, count); q 684 drivers/s390/cio/qdio_main.c if (!qdio_inbound_q_done(q, start)) { q 686 drivers/s390/cio/qdio_main.c qperf_inc(q, tasklet_inbound_resched); q 687 drivers/s390/cio/qdio_main.c if (!qdio_tasklet_schedule(q)) q 691 drivers/s390/cio/qdio_main.c qdio_stop_polling(q); q 696 drivers/s390/cio/qdio_main.c if (!qdio_inbound_q_done(q, start)) { q 697 drivers/s390/cio/qdio_main.c qperf_inc(q, tasklet_inbound_resched2); q 698 drivers/s390/cio/qdio_main.c qdio_tasklet_schedule(q); q 704 drivers/s390/cio/qdio_main.c struct qdio_q *q = (struct qdio_q *)data; q 705 drivers/s390/cio/qdio_main.c __qdio_inbound_processing(q); q 708 drivers/s390/cio/qdio_main.c static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start) q 713 drivers/s390/cio/qdio_main.c q->timestamp = get_tod_clock_fast(); q 715 drivers/s390/cio/qdio_main.c if (need_siga_sync(q)) q 716 drivers/s390/cio/qdio_main.c if (((queue_type(q) != QDIO_IQDIO_QFMT) && q 717 drivers/s390/cio/qdio_main.c !pci_out_supported(q->irq_ptr)) || q 718 drivers/s390/cio/qdio_main.c (queue_type(q) == QDIO_IQDIO_QFMT && q 719 drivers/s390/cio/qdio_main.c multicast_outbound(q))) q 720 drivers/s390/cio/qdio_main.c qdio_siga_sync_q(q); q 722 drivers/s390/cio/qdio_main.c count = atomic_read(&q->nr_buf_used); q 726 drivers/s390/cio/qdio_main.c count = get_buf_states(q, start, &state, count, 0, q->u.out.use_cq); q 734 drivers/s390/cio/qdio_main.c DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, q 735 drivers/s390/cio/qdio_main.c "out empty:%1d %02x", q->nr, count); q 737 drivers/s390/cio/qdio_main.c atomic_sub(count, &q->nr_buf_used); q 738 drivers/s390/cio/qdio_main.c if (q->irq_ptr->perf_stat_enabled) q 739 drivers/s390/cio/qdio_main.c account_sbals(q, count); q 742 drivers/s390/cio/qdio_main.c process_buffer_error(q, start, count); q 743 drivers/s390/cio/qdio_main.c atomic_sub(count, &q->nr_buf_used); q 744 drivers/s390/cio/qdio_main.c if (q->irq_ptr->perf_stat_enabled) q 745 drivers/s390/cio/qdio_main.c account_sbals_error(q, count); q 749 drivers/s390/cio/qdio_main.c if (q->irq_ptr->perf_stat_enabled) q 750 drivers/s390/cio/qdio_main.c q->q_stats.nr_sbal_nop++; q 751 drivers/s390/cio/qdio_main.c DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q 752 drivers/s390/cio/qdio_main.c q->nr); q 764 drivers/s390/cio/qdio_main.c static inline int qdio_outbound_q_done(struct qdio_q *q) q 766 drivers/s390/cio/qdio_main.c return atomic_read(&q->nr_buf_used) == 0; q 769 drivers/s390/cio/qdio_main.c static inline int qdio_outbound_q_moved(struct qdio_q *q, unsigned int start) q 773 drivers/s390/cio/qdio_main.c count = get_outbound_buffer_frontier(q, start); q 776 drivers/s390/cio/qdio_main.c DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr); q 777 drivers/s390/cio/qdio_main.c if (q->u.out.use_cq) q 778 drivers/s390/cio/qdio_main.c qdio_handle_aobs(q, start, count); q 784 drivers/s390/cio/qdio_main.c static int qdio_kick_outbound_q(struct qdio_q *q, unsigned long aob) q 789 drivers/s390/cio/qdio_main.c if (!need_siga_out(q)) q 792 drivers/s390/cio/qdio_main.c DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr); q 794 drivers/s390/cio/qdio_main.c qperf_inc(q, siga_write); q 796 drivers/s390/cio/qdio_main.c cc = qdio_siga_output(q, &busy_bit, aob); q 806 drivers/s390/cio/qdio_main.c DBF_ERROR("%4x cc2 BBC:%1d", SCH_NO(q), q->nr); q 809 drivers/s390/cio/qdio_main.c DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", q->nr); q 815 drivers/s390/cio/qdio_main.c DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc); q 820 drivers/s390/cio/qdio_main.c DBF_ERROR("%4x cc2 BB2:%1d", SCH_NO(q), q->nr); q 826 drivers/s390/cio/qdio_main.c static void __qdio_outbound_processing(struct qdio_q *q) q 828 drivers/s390/cio/qdio_main.c unsigned int start = q->first_to_check; q 831 drivers/s390/cio/qdio_main.c qperf_inc(q, tasklet_outbound); q 832 drivers/s390/cio/qdio_main.c WARN_ON_ONCE(atomic_read(&q->nr_buf_used) < 0); q 834 drivers/s390/cio/qdio_main.c count = qdio_outbound_q_moved(q, start); q 836 drivers/s390/cio/qdio_main.c q->first_to_check = add_buf(start, count); q 837 drivers/s390/cio/qdio_main.c qdio_kick_handler(q, count); q 840 drivers/s390/cio/qdio_main.c if (queue_type(q) == QDIO_ZFCP_QFMT && !pci_out_supported(q->irq_ptr) && q 841 drivers/s390/cio/qdio_main.c !qdio_outbound_q_done(q)) q 844 drivers/s390/cio/qdio_main.c if (q->u.out.pci_out_enabled) q 852 drivers/s390/cio/qdio_main.c if (qdio_outbound_q_done(q)) q 853 drivers/s390/cio/qdio_main.c del_timer_sync(&q->u.out.timer); q 855 drivers/s390/cio/qdio_main.c if (!timer_pending(&q->u.out.timer) && q 856 drivers/s390/cio/qdio_main.c likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) q 857 drivers/s390/cio/qdio_main.c mod_timer(&q->u.out.timer, jiffies + 10 * HZ); q 861 drivers/s390/cio/qdio_main.c qdio_tasklet_schedule(q); q 867 drivers/s390/cio/qdio_main.c struct qdio_q *q = (struct qdio_q *)data; q 868 drivers/s390/cio/qdio_main.c __qdio_outbound_processing(q); q 873 drivers/s390/cio/qdio_main.c struct qdio_q *q = from_timer(q, t, u.out.timer); q 875 drivers/s390/cio/qdio_main.c qdio_tasklet_schedule(q); q 891 drivers/s390/cio/qdio_main.c static void __tiqdio_inbound_processing(struct qdio_q *q) q 893 drivers/s390/cio/qdio_main.c unsigned int start = q->first_to_check; q 896 drivers/s390/cio/qdio_main.c qperf_inc(q, tasklet_inbound); q 897 drivers/s390/cio/qdio_main.c if (need_siga_sync(q) && need_siga_sync_after_ai(q)) q 898 drivers/s390/cio/qdio_main.c qdio_sync_queues(q); q 901 drivers/s390/cio/qdio_main.c qdio_check_outbound_pci_queues(q->irq_ptr); q 903 drivers/s390/cio/qdio_main.c count = qdio_inbound_q_moved(q, start); q 908 drivers/s390/cio/qdio_main.c q->first_to_check = start; q 909 drivers/s390/cio/qdio_main.c qdio_kick_handler(q, count); q 911 drivers/s390/cio/qdio_main.c if (!qdio_inbound_q_done(q, start)) { q 912 drivers/s390/cio/qdio_main.c qperf_inc(q, tasklet_inbound_resched); q 913 drivers/s390/cio/qdio_main.c if (!qdio_tasklet_schedule(q)) q 917 drivers/s390/cio/qdio_main.c qdio_stop_polling(q); q 922 drivers/s390/cio/qdio_main.c if (!qdio_inbound_q_done(q, start)) { q 923 drivers/s390/cio/qdio_main.c qperf_inc(q, tasklet_inbound_resched2); q 924 drivers/s390/cio/qdio_main.c qdio_tasklet_schedule(q); q 930 drivers/s390/cio/qdio_main.c struct qdio_q *q = (struct qdio_q *)data; q 931 drivers/s390/cio/qdio_main.c __tiqdio_inbound_processing(q); q 956 drivers/s390/cio/qdio_main.c struct qdio_q *q; q 961 drivers/s390/cio/qdio_main.c for_each_input_queue(irq_ptr, q, i) { q 962 drivers/s390/cio/qdio_main.c if (q->u.in.queue_start_poll) { q 965 drivers/s390/cio/qdio_main.c &q->u.in.queue_irq_state)) { q 966 drivers/s390/cio/qdio_main.c qperf_inc(q, int_discarded); q 969 drivers/s390/cio/qdio_main.c q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, q 970 drivers/s390/cio/qdio_main.c q->irq_ptr->int_parm); q 972 drivers/s390/cio/qdio_main.c tasklet_schedule(&q->tasklet); q 979 drivers/s390/cio/qdio_main.c for_each_output_queue(irq_ptr, q, i) { q 980 drivers/s390/cio/qdio_main.c if (qdio_outbound_q_done(q)) q 982 drivers/s390/cio/qdio_main.c if (need_siga_sync(q) && need_siga_sync_out_after_pci(q)) q 983 drivers/s390/cio/qdio_main.c qdio_siga_sync_q(q); q 984 drivers/s390/cio/qdio_main.c qdio_tasklet_schedule(q); q 992 drivers/s390/cio/qdio_main.c struct qdio_q *q; q 1000 drivers/s390/cio/qdio_main.c q = irq_ptr->input_qs[0]; q 1002 drivers/s390/cio/qdio_main.c q = irq_ptr->output_qs[0]; q 1008 drivers/s390/cio/qdio_main.c count = sub_buf(q->first_to_check, q->first_to_kick); q 1009 drivers/s390/cio/qdio_main.c q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE, q 1010 drivers/s390/cio/qdio_main.c q->nr, q->first_to_kick, count, irq_ptr->int_parm); q 1119 drivers/s390/cio/qdio_main.c struct qdio_q *q; q 1122 drivers/s390/cio/qdio_main.c for_each_input_queue(irq_ptr, q, i) q 1123 drivers/s390/cio/qdio_main.c tasklet_kill(&q->tasklet); q 1125 drivers/s390/cio/qdio_main.c for_each_output_queue(irq_ptr, q, i) { q 1126 drivers/s390/cio/qdio_main.c del_timer_sync(&q->u.out.timer); q 1127 drivers/s390/cio/qdio_main.c tasklet_kill(&q->tasklet); q 1299 drivers/s390/cio/qdio_main.c struct qdio_q *q = irq_ptr->input_qs[0]; q 1302 drivers/s390/cio/qdio_main.c if (irq_ptr->nr_input_qs > 1 && queue_type(q) == QDIO_IQDIO_QFMT) q 1305 drivers/s390/cio/qdio_main.c for_each_output_queue(irq_ptr, q, i) { q 1307 drivers/s390/cio/qdio_main.c if (multicast_outbound(q)) q 1309 drivers/s390/cio/qdio_main.c if (qdio_enable_async_operation(&q->u.out) < 0) { q 1314 drivers/s390/cio/qdio_main.c qdio_disable_async_operation(&q->u.out); q 1477 drivers/s390/cio/qdio_main.c static int handle_inbound(struct qdio_q *q, unsigned int callflags, q 1482 drivers/s390/cio/qdio_main.c qperf_inc(q, inbound_call); q 1484 drivers/s390/cio/qdio_main.c if (!q->u.in.polling) q 1490 drivers/s390/cio/qdio_main.c q->u.in.polling = 0; q 1491 drivers/s390/cio/qdio_main.c q->u.in.ack_count = 0; q 1493 drivers/s390/cio/qdio_main.c } else if (buf_in_between(q->u.in.ack_start, bufnr, count)) { q 1494 drivers/s390/cio/qdio_main.c if (is_qebsm(q)) { q 1497 drivers/s390/cio/qdio_main.c diff = sub_buf(diff, q->u.in.ack_start); q 1498 drivers/s390/cio/qdio_main.c q->u.in.ack_count -= diff; q 1499 drivers/s390/cio/qdio_main.c if (q->u.in.ack_count <= 0) { q 1500 drivers/s390/cio/qdio_main.c q->u.in.polling = 0; q 1501 drivers/s390/cio/qdio_main.c q->u.in.ack_count = 0; q 1504 drivers/s390/cio/qdio_main.c q->u.in.ack_start = add_buf(q->u.in.ack_start, diff); q 1508 drivers/s390/cio/qdio_main.c q->u.in.polling = 0; q 1512 drivers/s390/cio/qdio_main.c count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count); q 1513 drivers/s390/cio/qdio_main.c atomic_add(count, &q->nr_buf_used); q 1515 drivers/s390/cio/qdio_main.c if (need_siga_in(q)) q 1516 drivers/s390/cio/qdio_main.c return qdio_siga_input(q); q 1528 drivers/s390/cio/qdio_main.c static int handle_outbound(struct qdio_q *q, unsigned int callflags, q 1531 drivers/s390/cio/qdio_main.c const unsigned int scan_threshold = q->irq_ptr->scan_threshold; q 1535 drivers/s390/cio/qdio_main.c qperf_inc(q, outbound_call); q 1537 drivers/s390/cio/qdio_main.c count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count); q 1538 drivers/s390/cio/qdio_main.c used = atomic_add_return(count, &q->nr_buf_used); q 1541 drivers/s390/cio/qdio_main.c qperf_inc(q, outbound_queue_full); q 1544 drivers/s390/cio/qdio_main.c q->u.out.pci_out_enabled = 1; q 1545 drivers/s390/cio/qdio_main.c qperf_inc(q, pci_request_int); q 1547 drivers/s390/cio/qdio_main.c q->u.out.pci_out_enabled = 0; q 1549 drivers/s390/cio/qdio_main.c if (queue_type(q) == QDIO_IQDIO_QFMT) { q 1553 drivers/s390/cio/qdio_main.c WARN_ON_ONCE(count > 1 && !multicast_outbound(q)); q 1555 drivers/s390/cio/qdio_main.c if (q->u.out.use_cq) q 1556 drivers/s390/cio/qdio_main.c phys_aob = qdio_aob_for_buffer(&q->u.out, bufnr); q 1558 drivers/s390/cio/qdio_main.c rc = qdio_kick_outbound_q(q, phys_aob); q 1559 drivers/s390/cio/qdio_main.c } else if (need_siga_sync(q)) { q 1560 drivers/s390/cio/qdio_main.c rc = qdio_siga_sync_q(q); q 1562 drivers/s390/cio/qdio_main.c get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 && q 1565 drivers/s390/cio/qdio_main.c qperf_inc(q, fast_requeue); q 1567 drivers/s390/cio/qdio_main.c rc = qdio_kick_outbound_q(q, 0); q 1576 drivers/s390/cio/qdio_main.c qdio_tasklet_schedule(q); q 1579 drivers/s390/cio/qdio_main.c if (!timer_pending(&q->u.out.timer) && q 1580 drivers/s390/cio/qdio_main.c likely(q->irq_ptr->state == QDIO_IRQ_STATE_ACTIVE)) q 1581 drivers/s390/cio/qdio_main.c mod_timer(&q->u.out.timer, jiffies + HZ); q 1633 drivers/s390/cio/qdio_main.c struct qdio_q *q; q 1638 drivers/s390/cio/qdio_main.c q = irq_ptr->input_qs[nr]; q 1641 drivers/s390/cio/qdio_main.c qdio_stop_polling(q); q 1642 drivers/s390/cio/qdio_main.c clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state); q 1650 drivers/s390/cio/qdio_main.c if (!qdio_inbound_q_done(q, q->first_to_check)) q 1656 drivers/s390/cio/qdio_main.c &q->u.in.queue_irq_state)) q 1664 drivers/s390/cio/qdio_main.c static int __qdio_inspect_queue(struct qdio_q *q, unsigned int *bufnr, q 1667 drivers/s390/cio/qdio_main.c unsigned int start = q->first_to_check; q 1670 drivers/s390/cio/qdio_main.c count = q->is_input_q ? qdio_inbound_q_moved(q, start) : q 1671 drivers/s390/cio/qdio_main.c qdio_outbound_q_moved(q, start); q 1676 drivers/s390/cio/qdio_main.c *error = q->qdio_error; q 1679 drivers/s390/cio/qdio_main.c q->first_to_check = add_buf(start, count); q 1680 drivers/s390/cio/qdio_main.c q->qdio_error = 0; q 1689 drivers/s390/cio/qdio_main.c struct qdio_q *q; q 1693 drivers/s390/cio/qdio_main.c q = is_input ? irq_ptr->input_qs[nr] : irq_ptr->output_qs[nr]; q 1695 drivers/s390/cio/qdio_main.c if (need_siga_sync(q)) q 1696 drivers/s390/cio/qdio_main.c qdio_siga_sync_q(q); q 1698 drivers/s390/cio/qdio_main.c return __qdio_inspect_queue(q, bufnr, error); q 1717 drivers/s390/cio/qdio_main.c struct qdio_q *q; q 1722 drivers/s390/cio/qdio_main.c q = irq_ptr->input_qs[nr]; q 1728 drivers/s390/cio/qdio_main.c if (need_siga_sync(q)) q 1729 drivers/s390/cio/qdio_main.c qdio_sync_queues(q); q 1734 drivers/s390/cio/qdio_main.c if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) q 1737 drivers/s390/cio/qdio_main.c return __qdio_inspect_queue(q, bufnr, error); q 1752 drivers/s390/cio/qdio_main.c struct qdio_q *q; q 1757 drivers/s390/cio/qdio_main.c q = irq_ptr->input_qs[nr]; q 1760 drivers/s390/cio/qdio_main.c &q->u.in.queue_irq_state)) q 108 drivers/s390/cio/qdio_setup.c struct qdio_q *q; q 122 drivers/s390/cio/qdio_setup.c for_each_input_queue(irq_ptr, q, i) { q 124 drivers/s390/cio/qdio_setup.c q->slib->slibe[j].parms = q 131 drivers/s390/cio/qdio_setup.c for_each_output_queue(irq_ptr, q, i) { q 133 drivers/s390/cio/qdio_setup.c q->slib->slibe[j].parms = q 140 drivers/s390/cio/qdio_setup.c struct qdio_q *q; q 144 drivers/s390/cio/qdio_setup.c q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL); q 145 drivers/s390/cio/qdio_setup.c if (!q) q 148 drivers/s390/cio/qdio_setup.c q->slib = (struct slib *) __get_free_page(GFP_KERNEL); q 149 drivers/s390/cio/qdio_setup.c if (!q->slib) { q 150 drivers/s390/cio/qdio_setup.c kmem_cache_free(qdio_q_cache, q); q 153 drivers/s390/cio/qdio_setup.c irq_ptr_qs[i] = q; q 154 drivers/s390/cio/qdio_setup.c INIT_LIST_HEAD(&q->entry); q 170 drivers/s390/cio/qdio_setup.c static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr, q 173 drivers/s390/cio/qdio_setup.c struct slib *slib = q->slib; q 176 drivers/s390/cio/qdio_setup.c memset(q, 0, sizeof(*q)); q 178 drivers/s390/cio/qdio_setup.c q->slib = slib; q 179 drivers/s390/cio/qdio_setup.c q->irq_ptr = irq_ptr; q 180 drivers/s390/cio/qdio_setup.c q->mask = 1 << (31 - i); q 181 drivers/s390/cio/qdio_setup.c q->nr = i; q 182 drivers/s390/cio/qdio_setup.c q->handler = handler; q 183 drivers/s390/cio/qdio_setup.c INIT_LIST_HEAD(&q->entry); q 186 drivers/s390/cio/qdio_setup.c static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, q 192 drivers/s390/cio/qdio_setup.c DBF_HEX(&q, sizeof(void *)); q 193 drivers/s390/cio/qdio_setup.c q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2); q 197 drivers/s390/cio/qdio_setup.c q->sbal[j] = *sbals_array++; q 201 drivers/s390/cio/qdio_setup.c prev = (q->is_input_q) ? irq_ptr->input_qs[i - 1] q 203 drivers/s390/cio/qdio_setup.c prev->slib->nsliba = (unsigned long)q->slib; q 206 drivers/s390/cio/qdio_setup.c q->slib->sla = (unsigned long)q->sl; q 207 drivers/s390/cio/qdio_setup.c q->slib->slsba = (unsigned long)&q->slsb.val[0]; q 211 drivers/s390/cio/qdio_setup.c q->sl->element[j].sbal = virt_to_phys(q->sbal[j]); q 217 drivers/s390/cio/qdio_setup.c struct qdio_q *q; q 224 drivers/s390/cio/qdio_setup.c for_each_input_queue(irq_ptr, q, i) { q 226 drivers/s390/cio/qdio_setup.c setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i); q 228 drivers/s390/cio/qdio_setup.c q->is_input_q = 1; q 229 drivers/s390/cio/qdio_setup.c q->u.in.queue_start_poll = qdio_init->queue_start_poll_array ? q 232 drivers/s390/cio/qdio_setup.c setup_storage_lists(q, irq_ptr, input_sbal_array, i); q 236 drivers/s390/cio/qdio_setup.c tasklet_init(&q->tasklet, tiqdio_inbound_processing, q 237 drivers/s390/cio/qdio_setup.c (unsigned long) q); q 239 drivers/s390/cio/qdio_setup.c tasklet_init(&q->tasklet, qdio_inbound_processing, q 240 drivers/s390/cio/qdio_setup.c (unsigned long) q); q 244 drivers/s390/cio/qdio_setup.c for_each_output_queue(irq_ptr, q, i) { q 246 drivers/s390/cio/qdio_setup.c setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i); q 248 drivers/s390/cio/qdio_setup.c q->u.out.sbal_state = output_sbal_state_array; q 251 drivers/s390/cio/qdio_setup.c q->is_input_q = 0; q 252 drivers/s390/cio/qdio_setup.c setup_storage_lists(q, irq_ptr, output_sbal_array, i); q 255 drivers/s390/cio/qdio_setup.c tasklet_init(&q->tasklet, qdio_outbound_processing, q 256 drivers/s390/cio/qdio_setup.c (unsigned long) q); q 257 drivers/s390/cio/qdio_setup.c timer_setup(&q->u.out.timer, qdio_outbound_timer, 0); q 358 drivers/s390/cio/qdio_setup.c struct qdio_q *q; q 366 drivers/s390/cio/qdio_setup.c q = irq_ptr->input_qs[i]; q 367 drivers/s390/cio/qdio_setup.c if (q) { q 368 drivers/s390/cio/qdio_setup.c free_page((unsigned long) q->slib); q 369 drivers/s390/cio/qdio_setup.c kmem_cache_free(qdio_q_cache, q); q 373 drivers/s390/cio/qdio_setup.c q = irq_ptr->output_qs[i]; q 374 drivers/s390/cio/qdio_setup.c if (q) { q 375 drivers/s390/cio/qdio_setup.c if (q->u.out.use_cq) { q 379 drivers/s390/cio/qdio_setup.c struct qaob *aob = q->u.out.aobs[n]; q 382 drivers/s390/cio/qdio_setup.c q->u.out.aobs[n] = NULL; q 386 drivers/s390/cio/qdio_setup.c qdio_disable_async_operation(&q->u.out); q 388 drivers/s390/cio/qdio_setup.c free_page((unsigned long) q->slib); q 389 drivers/s390/cio/qdio_setup.c kmem_cache_free(qdio_q_cache, q); q 551 drivers/s390/cio/qdio_setup.c void qdio_disable_async_operation(struct qdio_output_q *q) q 553 drivers/s390/cio/qdio_setup.c kfree(q->aobs); q 554 drivers/s390/cio/qdio_setup.c q->aobs = NULL; q 555 drivers/s390/cio/qdio_setup.c q->use_cq = 0; q 86 drivers/s390/cio/qdio_thinint.c struct qdio_q *q; q 88 drivers/s390/cio/qdio_thinint.c q = irq_ptr->input_qs[0]; q 89 drivers/s390/cio/qdio_thinint.c if (!q) q 93 drivers/s390/cio/qdio_thinint.c list_del_rcu(&q->entry); q 96 drivers/s390/cio/qdio_thinint.c INIT_LIST_HEAD(&q->entry); q 145 drivers/s390/cio/qdio_thinint.c struct qdio_q *q; q 152 drivers/s390/cio/qdio_thinint.c for_each_input_queue(irq, q, i) { q 153 drivers/s390/cio/qdio_thinint.c if (q->u.in.queue_start_poll) { q 156 drivers/s390/cio/qdio_thinint.c &q->u.in.queue_irq_state)) { q 157 drivers/s390/cio/qdio_thinint.c qperf_inc(q, int_discarded); q 162 drivers/s390/cio/qdio_thinint.c q->u.in.queue_start_poll(irq->cdev, q->nr, q 172 drivers/s390/cio/qdio_thinint.c tasklet_schedule(&q->tasklet); q 185 drivers/s390/cio/qdio_thinint.c struct qdio_q *q; q 194 drivers/s390/cio/qdio_thinint.c list_for_each_entry_rcu(q, &tiq_list, entry) { q 198 drivers/s390/cio/qdio_thinint.c irq = q->irq_ptr; q 207 drivers/s390/cio/qdio_thinint.c qperf_inc(q, adapter_int); q 54 drivers/s390/crypto/vfio_ap_drv.c struct vfio_ap_queue *q; q 56 drivers/s390/crypto/vfio_ap_drv.c q = kzalloc(sizeof(*q), GFP_KERNEL); q 57 drivers/s390/crypto/vfio_ap_drv.c if (!q) q 59 drivers/s390/crypto/vfio_ap_drv.c dev_set_drvdata(&apdev->device, q); q 60 drivers/s390/crypto/vfio_ap_drv.c q->apqn = to_ap_queue(&apdev->device)->qid; q 61 drivers/s390/crypto/vfio_ap_drv.c q->saved_isc = VFIO_AP_ISC_INVALID; q 73 drivers/s390/crypto/vfio_ap_drv.c struct vfio_ap_queue *q; q 77 drivers/s390/crypto/vfio_ap_drv.c q = dev_get_drvdata(&apdev->device); q 79 drivers/s390/crypto/vfio_ap_drv.c apid = AP_QID_CARD(q->apqn); q 80 drivers/s390/crypto/vfio_ap_drv.c apqi = AP_QID_QUEUE(q->apqn); q 82 drivers/s390/crypto/vfio_ap_drv.c vfio_ap_irq_disable(q); q 83 drivers/s390/crypto/vfio_ap_drv.c kfree(q); q 31 drivers/s390/crypto/vfio_ap_ops.c struct vfio_ap_queue *q = dev_get_drvdata(dev); q 33 drivers/s390/crypto/vfio_ap_ops.c return (q->apqn == *(int *)(data)) ? 1 : 0; q 51 drivers/s390/crypto/vfio_ap_ops.c struct vfio_ap_queue *q; q 63 drivers/s390/crypto/vfio_ap_ops.c q = dev_get_drvdata(dev); q 64 drivers/s390/crypto/vfio_ap_ops.c q->matrix_mdev = matrix_mdev; q 67 drivers/s390/crypto/vfio_ap_ops.c return q; q 120 drivers/s390/crypto/vfio_ap_ops.c static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q) q 122 drivers/s390/crypto/vfio_ap_ops.c if (q->saved_isc != VFIO_AP_ISC_INVALID && q->matrix_mdev) q 123 drivers/s390/crypto/vfio_ap_ops.c kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc); q 124 drivers/s390/crypto/vfio_ap_ops.c if (q->saved_pfn && q->matrix_mdev) q 125 drivers/s390/crypto/vfio_ap_ops.c vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev), q 126 drivers/s390/crypto/vfio_ap_ops.c &q->saved_pfn, 1); q 127 drivers/s390/crypto/vfio_ap_ops.c q->saved_pfn = 0; q 128 drivers/s390/crypto/vfio_ap_ops.c q->saved_isc = VFIO_AP_ISC_INVALID; q 147 drivers/s390/crypto/vfio_ap_ops.c struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q) q 154 drivers/s390/crypto/vfio_ap_ops.c status = ap_aqic(q->apqn, aqic_gisa, NULL); q 158 drivers/s390/crypto/vfio_ap_ops.c vfio_ap_wait_for_irqclear(q->apqn); q 179 drivers/s390/crypto/vfio_ap_ops.c vfio_ap_free_aqic_resources(q); q 180 drivers/s390/crypto/vfio_ap_ops.c q->matrix_mdev = NULL; q 200 drivers/s390/crypto/vfio_ap_ops.c static struct ap_queue_status vfio_ap_irq_enable(struct vfio_ap_queue *q, q 212 drivers/s390/crypto/vfio_ap_ops.c ret = vfio_pin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1, q 222 drivers/s390/crypto/vfio_ap_ops.c kvm = q->matrix_mdev->kvm; q 231 drivers/s390/crypto/vfio_ap_ops.c status = ap_aqic(q->apqn, aqic_gisa, (void *)h_nib); q 235 drivers/s390/crypto/vfio_ap_ops.c vfio_ap_free_aqic_resources(q); q 236 drivers/s390/crypto/vfio_ap_ops.c q->saved_pfn = g_pfn; q 237 drivers/s390/crypto/vfio_ap_ops.c q->saved_isc = isc; q 241 drivers/s390/crypto/vfio_ap_ops.c vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev), &g_pfn, 1); q 245 drivers/s390/crypto/vfio_ap_ops.c pr_warn("%s: apqn %04x: response: %02x\n", __func__, q->apqn, q 247 drivers/s390/crypto/vfio_ap_ops.c vfio_ap_irq_disable(q); q 279 drivers/s390/crypto/vfio_ap_ops.c struct vfio_ap_queue *q; q 296 drivers/s390/crypto/vfio_ap_ops.c q = vfio_ap_get_queue(matrix_mdev, apqn); q 297 drivers/s390/crypto/vfio_ap_ops.c if (!q) q 304 drivers/s390/crypto/vfio_ap_ops.c qstatus = vfio_ap_irq_enable(q, status & 0x07, q 307 drivers/s390/crypto/vfio_ap_ops.c qstatus = vfio_ap_irq_disable(q); q 1120 drivers/s390/crypto/vfio_ap_ops.c struct vfio_ap_queue *q; q 1125 drivers/s390/crypto/vfio_ap_ops.c q = dev_get_drvdata(dev); q 1126 drivers/s390/crypto/vfio_ap_ops.c vfio_ap_irq_disable(q); q 103 drivers/s390/crypto/vfio_ap_private.h struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q); q 78 drivers/s390/crypto/zcrypt_cex2a.h unsigned char q[64]; q 91 drivers/s390/crypto/zcrypt_cex2a.h unsigned char q[128]; q 104 drivers/s390/crypto/zcrypt_cex2a.h unsigned char q[256]; q 106 drivers/s390/crypto/zcrypt_msgtype50.c unsigned char q[64]; q 119 drivers/s390/crypto/zcrypt_msgtype50.c unsigned char q[128]; q 132 drivers/s390/crypto/zcrypt_msgtype50.c unsigned char q[256]; q 266 drivers/s390/crypto/zcrypt_msgtype50.c unsigned char *p, *q, *dp, *dq, *u, *inp; q 286 drivers/s390/crypto/zcrypt_msgtype50.c q = crb1->q + sizeof(crb1->q) - short_len; q 300 drivers/s390/crypto/zcrypt_msgtype50.c q = crb2->q + sizeof(crb2->q) - short_len; q 315 drivers/s390/crypto/zcrypt_msgtype50.c q = crb3->q + sizeof(crb3->q) - short_len; q 328 drivers/s390/crypto/zcrypt_msgtype50.c copy_from_user(q, crt->nq_prime, short_len) || q 214 drivers/s390/net/ctcm_fsms.c void ctcm_purge_skb_queue(struct sk_buff_head *q) q 220 drivers/s390/net/ctcm_fsms.c while ((skb = skb_dequeue(q))) { q 161 drivers/s390/net/ctcm_fsms.h void ctcm_purge_skb_queue(struct sk_buff_head *q); q 942 drivers/s390/net/netiucv.c static void netiucv_purge_skb_queue(struct sk_buff_head *q) q 946 drivers/s390/net/netiucv.c while ((skb = skb_dequeue(q))) { q 455 drivers/s390/net/qeth_core.h struct qeth_qdio_out_q *q; q 537 drivers/s390/net/qeth_core.h #define qeth_for_each_output_queue(card, q, i) \ q 539 drivers/s390/net/qeth_core.h (q = card->qdio.out_qs[i]); i++) q 262 drivers/s390/net/qeth_core_main.c static void qeth_free_qdio_queue(struct qeth_qdio_q *q) q 264 drivers/s390/net/qeth_core_main.c if (!q) q 267 drivers/s390/net/qeth_core_main.c qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); q 268 drivers/s390/net/qeth_core_main.c kfree(q); q 273 drivers/s390/net/qeth_core_main.c struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL); q 276 drivers/s390/net/qeth_core_main.c if (!q) q 279 drivers/s390/net/qeth_core_main.c if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) { q 280 drivers/s390/net/qeth_core_main.c kfree(q); q 285 drivers/s390/net/qeth_core_main.c q->bufs[i].buffer = q->qdio_bufs[i]; q 287 drivers/s390/net/qeth_core_main.c QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *)); q 288 drivers/s390/net/qeth_core_main.c return q; q 395 drivers/s390/net/qeth_core_main.c static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx, q 398 drivers/s390/net/qeth_core_main.c if (q->card->options.cq != QETH_CQ_ENABLED) q 401 drivers/s390/net/qeth_core_main.c if (q->bufs[bidx]->next_pending != NULL) { q 402 drivers/s390/net/qeth_core_main.c struct qeth_qdio_out_buffer *head = q->bufs[bidx]; q 403 drivers/s390/net/qeth_core_main.c struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending; q 410 drivers/s390/net/qeth_core_main.c QETH_CARD_TEXT(f->q->card, 5, "fp"); q 411 drivers/s390/net/qeth_core_main.c QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f); q 428 drivers/s390/net/qeth_core_main.c if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) == q 431 drivers/s390/net/qeth_core_main.c qeth_init_qdio_out_buf(q, bidx); q 432 drivers/s390/net/qeth_core_main.c QETH_CARD_TEXT(q->card, 2, "clprecov"); q 465 drivers/s390/net/qeth_core_main.c qeth_notify_skbs(buffer->q, buffer, notification); q 1077 drivers/s390/net/qeth_core_main.c static void qeth_notify_skbs(struct qeth_qdio_out_q *q, q 1084 drivers/s390/net/qeth_core_main.c QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification); q 1085 drivers/s390/net/qeth_core_main.c QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb); q 1094 drivers/s390/net/qeth_core_main.c struct qeth_qdio_out_q *queue = buf->q; q 1159 drivers/s390/net/qeth_core_main.c static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free) q 1164 drivers/s390/net/qeth_core_main.c if (!q->bufs[j]) q 1166 drivers/s390/net/qeth_core_main.c qeth_cleanup_handled_pending(q, j, 1); q 1167 drivers/s390/net/qeth_core_main.c qeth_clear_output_buffer(q, q->bufs[j], true, 0); q 1169 drivers/s390/net/qeth_core_main.c kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]); q 1170 drivers/s390/net/qeth_core_main.c q->bufs[j] = NULL; q 2278 drivers/s390/net/qeth_core_main.c static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx) q 2286 drivers/s390/net/qeth_core_main.c newbuf->buffer = q->qdio_bufs[bidx]; q 2289 drivers/s390/net/qeth_core_main.c newbuf->q = q; q 2290 drivers/s390/net/qeth_core_main.c newbuf->next_pending = q->bufs[bidx]; q 2292 drivers/s390/net/qeth_core_main.c q->bufs[bidx] = newbuf; q 2296 drivers/s390/net/qeth_core_main.c static void qeth_free_output_queue(struct qeth_qdio_out_q *q) q 2298 drivers/s390/net/qeth_core_main.c if (!q) q 2301 drivers/s390/net/qeth_core_main.c qeth_drain_output_queue(q, true); q 2302 drivers/s390/net/qeth_core_main.c qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); q 2303 drivers/s390/net/qeth_core_main.c kfree(q); q 2308 drivers/s390/net/qeth_core_main.c struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL); q 2310 drivers/s390/net/qeth_core_main.c if (!q) q 2313 drivers/s390/net/qeth_core_main.c if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) { q 2314 drivers/s390/net/qeth_core_main.c kfree(q); q 2317 drivers/s390/net/qeth_core_main.c return q; q 2099 drivers/s390/scsi/zfcp_fsf.c blk_add_driver_data(scsi->request->q, scsi->request, &blktrc, q 2689 drivers/scsi/aacraid/aacraid.h int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry); q 2690 drivers/scsi/aacraid/aacraid.h void aac_consumer_free(struct aac_dev * dev, struct aac_queue * q, u32 qnum); q 2714 drivers/scsi/aacraid/aacraid.h unsigned int aac_response_normal(struct aac_queue * q); q 2715 drivers/scsi/aacraid/aacraid.h unsigned int aac_command_normal(struct aac_queue * q); q 259 drivers/scsi/aacraid/comminit.c static void aac_queue_init(struct aac_dev * dev, struct aac_queue * q, u32 *mem, int qsize) q 261 drivers/scsi/aacraid/comminit.c atomic_set(&q->numpending, 0); q 262 drivers/scsi/aacraid/comminit.c q->dev = dev; q 263 drivers/scsi/aacraid/comminit.c init_waitqueue_head(&q->cmdready); q 264 drivers/scsi/aacraid/comminit.c INIT_LIST_HEAD(&q->cmdq); q 265 drivers/scsi/aacraid/comminit.c init_waitqueue_head(&q->qfull); q 266 drivers/scsi/aacraid/comminit.c spin_lock_init(&q->lockdata); q 267 drivers/scsi/aacraid/comminit.c q->lock = &q->lockdata; q 268 drivers/scsi/aacraid/comminit.c q->headers.producer = (__le32 *)mem; q 269 drivers/scsi/aacraid/comminit.c q->headers.consumer = (__le32 *)(mem+1); q 270 drivers/scsi/aacraid/comminit.c *(q->headers.producer) = cpu_to_le32(qsize); q 271 drivers/scsi/aacraid/comminit.c *(q->headers.consumer) = cpu_to_le32(qsize); q 272 drivers/scsi/aacraid/comminit.c q->entries = qsize; q 359 drivers/scsi/aacraid/commsup.c struct aac_queue * q; q 369 drivers/scsi/aacraid/commsup.c q = &dev->queues->queue[qid]; q 371 drivers/scsi/aacraid/commsup.c idx = *index = le32_to_cpu(*(q->headers.producer)); q 373 drivers/scsi/aacraid/commsup.c if (idx != le32_to_cpu(*(q->headers.consumer))) { q 380 drivers/scsi/aacraid/commsup.c if (idx != le32_to_cpu(*(q->headers.consumer))) q 393 drivers/scsi/aacraid/commsup.c if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { q 395 drivers/scsi/aacraid/commsup.c qid, atomic_read(&q->numpending)); q 398 drivers/scsi/aacraid/commsup.c *entry = q->base + *index; q 650 drivers/scsi/aacraid/commsup.c struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue]; q 651 drivers/scsi/aacraid/commsup.c atomic_dec(&q->numpending); q 798 drivers/scsi/aacraid/commsup.c int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry) q 802 drivers/scsi/aacraid/commsup.c if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) { q 810 drivers/scsi/aacraid/commsup.c if (le32_to_cpu(*q->headers.consumer) >= q->entries) q 813 drivers/scsi/aacraid/commsup.c index = le32_to_cpu(*q->headers.consumer); q 814 drivers/scsi/aacraid/commsup.c *entry = q->base + index; q 830 drivers/scsi/aacraid/commsup.c void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid) q 835 drivers/scsi/aacraid/commsup.c if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer)) q 838 drivers/scsi/aacraid/commsup.c if (le32_to_cpu(*q->headers.consumer) >= q->entries) q 839 drivers/scsi/aacraid/commsup.c *q->headers.consumer = cpu_to_le32(1); q 841 drivers/scsi/aacraid/commsup.c le32_add_cpu(q->headers.consumer, 1); q 873 drivers/scsi/aacraid/commsup.c struct aac_queue * q; q 918 drivers/scsi/aacraid/commsup.c q = &dev->queues->queue[AdapNormRespQueue]; q 919 drivers/scsi/aacraid/commsup.c spin_lock_irqsave(q->lock, qflags); q 921 drivers/scsi/aacraid/commsup.c *(q->headers.producer) = cpu_to_le32(index + 1); q 922 drivers/scsi/aacraid/commsup.c spin_unlock_irqrestore(q->lock, qflags); q 39 drivers/scsi/aacraid/dpcsup.c unsigned int aac_response_normal(struct aac_queue * q) q 41 drivers/scsi/aacraid/dpcsup.c struct aac_dev * dev = q->dev; q 48 drivers/scsi/aacraid/dpcsup.c spin_lock_irqsave(q->lock, flags); q 55 drivers/scsi/aacraid/dpcsup.c while(aac_consumer_get(dev, q, &entry)) q 63 drivers/scsi/aacraid/dpcsup.c aac_consumer_free(dev, q, HostNormRespQueue); q 75 drivers/scsi/aacraid/dpcsup.c spin_unlock_irqrestore(q->lock, flags); q 78 drivers/scsi/aacraid/dpcsup.c spin_lock_irqsave(q->lock, flags); q 81 drivers/scsi/aacraid/dpcsup.c spin_unlock_irqrestore(q->lock, flags); q 134 drivers/scsi/aacraid/dpcsup.c spin_lock_irqsave(q->lock, flags); q 142 drivers/scsi/aacraid/dpcsup.c spin_unlock_irqrestore(q->lock, flags); q 157 drivers/scsi/aacraid/dpcsup.c unsigned int aac_command_normal(struct aac_queue *q) q 159 drivers/scsi/aacraid/dpcsup.c struct aac_dev * dev = q->dev; q 163 drivers/scsi/aacraid/dpcsup.c spin_lock_irqsave(q->lock, flags); q 170 drivers/scsi/aacraid/dpcsup.c while(aac_consumer_get(dev, q, &entry)) q 199 drivers/scsi/aacraid/dpcsup.c list_add_tail(&fib->fiblink, &q->cmdq); q 200 drivers/scsi/aacraid/dpcsup.c aac_consumer_free(dev, q, HostNormCmdQueue); q 201 drivers/scsi/aacraid/dpcsup.c wake_up_interruptible(&q->cmdready); q 203 drivers/scsi/aacraid/dpcsup.c aac_consumer_free(dev, q, HostNormCmdQueue); q 204 drivers/scsi/aacraid/dpcsup.c spin_unlock_irqrestore(q->lock, flags); q 210 drivers/scsi/aacraid/dpcsup.c spin_lock_irqsave(q->lock, flags); q 213 drivers/scsi/aacraid/dpcsup.c spin_unlock_irqrestore(q->lock, flags); q 278 drivers/scsi/aacraid/dpcsup.c struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue]; q 309 drivers/scsi/aacraid/dpcsup.c spin_lock_irqsave(q->lock, flags); q 310 drivers/scsi/aacraid/dpcsup.c list_add_tail(&fib->fiblink, &q->cmdq); q 311 drivers/scsi/aacraid/dpcsup.c wake_up_interruptible(&q->cmdready); q 312 drivers/scsi/aacraid/dpcsup.c spin_unlock_irqrestore(q->lock, flags); q 391 drivers/scsi/aacraid/rx.c struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; q 397 drivers/scsi/aacraid/rx.c atomic_inc(&q->numpending); q 398 drivers/scsi/aacraid/rx.c *(q->headers.producer) = cpu_to_le32(Index + 1); q 414 drivers/scsi/aacraid/rx.c struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; q 420 drivers/scsi/aacraid/rx.c atomic_inc(&q->numpending); q 428 drivers/scsi/aacraid/rx.c atomic_dec(&q->numpending); q 477 drivers/scsi/aacraid/src.c struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; q 488 drivers/scsi/aacraid/src.c atomic_inc(&q->numpending); q 2496 drivers/scsi/advansys.c static void asc_prt_asc_scsi_q(ASC_SCSI_Q *q) q 2501 drivers/scsi/advansys.c printk("ASC_SCSI_Q at addr 0x%lx\n", (ulong)q); q 2505 drivers/scsi/advansys.c q->q2.target_ix, q->q1.target_lun, q->q2.srb_tag, q 2506 drivers/scsi/advansys.c q->q2.tag_code); q 2510 drivers/scsi/advansys.c (ulong)le32_to_cpu(q->q1.data_addr), q 2511 drivers/scsi/advansys.c (ulong)le32_to_cpu(q->q1.data_cnt), q 2512 drivers/scsi/advansys.c (ulong)le32_to_cpu(q->q1.sense_addr), q->q1.sense_len); q 2515 drivers/scsi/advansys.c (ulong)q->cdbptr, q->q2.cdb_len, q 2516 drivers/scsi/advansys.c (ulong)q->sg_head, q->q1.sg_queue_cnt); q 2518 drivers/scsi/advansys.c if (q->sg_head) { q 2519 drivers/scsi/advansys.c sgp = q->sg_head; q 2535 drivers/scsi/advansys.c static void asc_prt_asc_qdone_info(ASC_QDONE_INFO *q) q 2537 drivers/scsi/advansys.c printk("ASC_QDONE_INFO at addr 0x%lx\n", (ulong)q); q 2539 drivers/scsi/advansys.c q->d2.srb_tag, q->d2.target_ix, q->d2.cdb_len, q 2540 drivers/scsi/advansys.c q->d2.tag_code); q 2543 drivers/scsi/advansys.c q->d3.done_stat, q->d3.host_stat, q->d3.scsi_stat, q->d3.scsi_msg); q 2574 drivers/scsi/advansys.c static void asc_prt_adv_scsi_req_q(ADV_SCSI_REQ_Q *q) q 2580 drivers/scsi/advansys.c printk("ADV_SCSI_REQ_Q at addr 0x%lx\n", (ulong)q); q 2583 drivers/scsi/advansys.c q->target_id, q->target_lun, q->srb_tag); q 2586 drivers/scsi/advansys.c q->cntl, (ulong)le32_to_cpu(q->data_addr)); q 2589 drivers/scsi/advansys.c (ulong)le32_to_cpu(q->data_cnt), q 2590 drivers/scsi/advansys.c (ulong)le32_to_cpu(q->sense_addr), q->sense_len); q 2594 drivers/scsi/advansys.c q->cdb_len, q->done_status, q->host_status, q->scsi_status); q 2597 drivers/scsi/advansys.c q->sg_working_ix, q->target_cmd); q 2600 drivers/scsi/advansys.c (ulong)le32_to_cpu(q->scsiq_rptr), q 2601 drivers/scsi/advansys.c (ulong)le32_to_cpu(q->sg_real_addr), (ulong)q->sg_list_ptr); q 2604 drivers/scsi/advansys.c if (q->sg_list_ptr != NULL) { q 2605 drivers/scsi/advansys.c sgblkp = container_of(q->sg_list_ptr, adv_sgblk_t, sg_block); q 39 drivers/scsi/arm/queue.c #define SET_MAGIC(q,m) ((q)->magic = (m)) q 40 drivers/scsi/arm/queue.c #define BAD_MAGIC(q,m) ((q)->magic != (m)) q 42 drivers/scsi/arm/queue.c #define SET_MAGIC(q,m) do { } while (0) q 43 drivers/scsi/arm/queue.c #define BAD_MAGIC(q,m) (0) q 58 drivers/scsi/arm/queue.c QE_t *q; q 70 drivers/scsi/arm/queue.c queue->alloc = q = kmalloc_array(nqueues, sizeof(QE_t), GFP_KERNEL); q 71 drivers/scsi/arm/queue.c if (q) { q 72 drivers/scsi/arm/queue.c for (; nqueues; q++, nqueues--) { q 73 drivers/scsi/arm/queue.c SET_MAGIC(q, QUEUE_MAGIC_FREE); q 74 drivers/scsi/arm/queue.c q->SCpnt = NULL; q 75 drivers/scsi/arm/queue.c list_add(&q->list, &queue->free); q 107 drivers/scsi/arm/queue.c QE_t *q; q 117 drivers/scsi/arm/queue.c q = list_entry(l, QE_t, list); q 118 drivers/scsi/arm/queue.c BUG_ON(BAD_MAGIC(q, QUEUE_MAGIC_FREE)); q 120 drivers/scsi/arm/queue.c SET_MAGIC(q, QUEUE_MAGIC_USED); q 121 drivers/scsi/arm/queue.c q->SCpnt = SCpnt; q 136 drivers/scsi/arm/queue.c QE_t *q; q 142 drivers/scsi/arm/queue.c q = list_entry(ent, QE_t, list); q 143 drivers/scsi/arm/queue.c BUG_ON(BAD_MAGIC(q, QUEUE_MAGIC_USED)); q 145 drivers/scsi/arm/queue.c SET_MAGIC(q, QUEUE_MAGIC_FREE); q 148 drivers/scsi/arm/queue.c return q->SCpnt; q 166 drivers/scsi/arm/queue.c QE_t *q = list_entry(l, QE_t, list); q 167 drivers/scsi/arm/queue.c if (!test_bit(q->SCpnt->device->id * 8 + q 168 drivers/scsi/arm/queue.c (u8)(q->SCpnt->device->lun & 0x7), exclude)) { q 215 drivers/scsi/arm/queue.c QE_t *q = list_entry(l, QE_t, list); q 216 drivers/scsi/arm/queue.c if (q->SCpnt->device->id == target && q->SCpnt->device->lun == lun && q 217 drivers/scsi/arm/queue.c q->SCpnt->tag == tag) { q 241 drivers/scsi/arm/queue.c QE_t *q = list_entry(l, QE_t, list); q 242 drivers/scsi/arm/queue.c if (q->SCpnt->device->id == target) q 265 drivers/scsi/arm/queue.c QE_t *q = list_entry(l, QE_t, list); q 266 drivers/scsi/arm/queue.c if (q->SCpnt->device->id == target && q->SCpnt->device->lun == lun) { q 291 drivers/scsi/arm/queue.c QE_t *q = list_entry(l, QE_t, list); q 292 drivers/scsi/arm/queue.c if (q->SCpnt == SCpnt) { q 51 drivers/scsi/be2iscsi/be.h static inline void *queue_head_node(struct be_queue_info *q) q 53 drivers/scsi/be2iscsi/be.h return q->dma_mem.va + q->head * q->entry_size; q 56 drivers/scsi/be2iscsi/be.h static inline void *queue_get_wrb(struct be_queue_info *q, unsigned int wrb_num) q 58 drivers/scsi/be2iscsi/be.h return q->dma_mem.va + wrb_num * q->entry_size; q 61 drivers/scsi/be2iscsi/be.h static inline void *queue_tail_node(struct be_queue_info *q) q 63 drivers/scsi/be2iscsi/be.h return q->dma_mem.va + q->tail * q->entry_size; q 66 drivers/scsi/be2iscsi/be.h static inline void queue_head_inc(struct be_queue_info *q) q 68 drivers/scsi/be2iscsi/be.h index_inc(&q->head, q->len); q 71 drivers/scsi/be2iscsi/be.h static inline void queue_tail_inc(struct be_queue_info *q) q 73 drivers/scsi/be2iscsi/be.h index_inc(&q->tail, q->len); q 89 drivers/scsi/be2iscsi/be.h struct be_queue_info q; q 97 drivers/scsi/be2iscsi/be.h struct be_queue_info q; q 91 drivers/scsi/be2iscsi/be_cmds.c struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; q 143 drivers/scsi/be2iscsi/be_cmds.c struct be_queue_info *mccq = &ctrl->mcc_obj.q; q 173 drivers/scsi/be2iscsi/be_cmds.c struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; q 563 drivers/scsi/be2iscsi/be_cmds.c struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; q 900 drivers/scsi/be2iscsi/be_cmds.c int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, q 948 drivers/scsi/be2iscsi/be_cmds.c req->id = cpu_to_le16(q->id); q 810 drivers/scsi/be2iscsi/be_cmds.h int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q, q 675 drivers/scsi/be2iscsi/be_main.c eq = &pbe_eq->q; q 712 drivers/scsi/be2iscsi/be_main.c eq = &pbe_eq->q; q 751 drivers/scsi/be2iscsi/be_main.c eq = &phwi_context->be_eq[0].q; q 1838 drivers/scsi/be2iscsi/be_main.c hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); q 2048 drivers/scsi/be2iscsi/be_main.c eq = &pbe_eq->q; q 2066 drivers/scsi/be2iscsi/be_main.c pbe_eq->q.id, ret); q 2068 drivers/scsi/be2iscsi/be_main.c hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1); q 2978 drivers/scsi/be2iscsi/be_main.c static int be_fill_queue(struct be_queue_info *q, q 2981 drivers/scsi/be2iscsi/be_main.c struct be_dma_mem *mem = &q->dma_mem; q 2983 drivers/scsi/be2iscsi/be_main.c memset(q, 0, sizeof(*q)); q 2984 drivers/scsi/be2iscsi/be_main.c q->len = len; q 2985 drivers/scsi/be2iscsi/be_main.c q->entry_size = entry_size; q 3012 drivers/scsi/be2iscsi/be_main.c eq = &phwi_context->be_eq[i].q; q 3044 drivers/scsi/be2iscsi/be_main.c phwi_context->be_eq[i].q.id); q 3050 drivers/scsi/be2iscsi/be_main.c eq = &phwi_context->be_eq[i].q; q 3076 drivers/scsi/be2iscsi/be_main.c eq = &phwi_context->be_eq[i].q; q 3306 drivers/scsi/be2iscsi/be_main.c static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q) q 3308 drivers/scsi/be2iscsi/be_main.c struct be_dma_mem *mem = &q->dma_mem; q 3316 drivers/scsi/be2iscsi/be_main.c static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q, q 3319 drivers/scsi/be2iscsi/be_main.c struct be_dma_mem *mem = &q->dma_mem; q 3321 drivers/scsi/be2iscsi/be_main.c memset(q, 0, sizeof(*q)); q 3322 drivers/scsi/be2iscsi/be_main.c q->len = len; q 3323 drivers/scsi/be2iscsi/be_main.c q->entry_size = entry_size; q 3450 drivers/scsi/be2iscsi/be_main.c struct be_queue_info *q; q 3453 drivers/scsi/be2iscsi/be_main.c q = &phba->ctrl.mcc_obj.q; q 3495 drivers/scsi/be2iscsi/be_main.c if (q->created) { q 3496 drivers/scsi/be2iscsi/be_main.c beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ); q 3497 drivers/scsi/be2iscsi/be_main.c be_queue_free(phba, q); q 3500 drivers/scsi/be2iscsi/be_main.c q = &phba->ctrl.mcc_obj.cq; q 3501 drivers/scsi/be2iscsi/be_main.c if (q->created) { q 3502 drivers/scsi/be2iscsi/be_main.c beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); q 3503 drivers/scsi/be2iscsi/be_main.c be_queue_free(phba, q); q 3510 drivers/scsi/be2iscsi/be_main.c struct be_queue_info *q, *cq; q 3521 drivers/scsi/be2iscsi/be_main.c &phwi_context->be_eq[phba->num_cpus].q, q 3525 drivers/scsi/be2iscsi/be_main.c if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q, q 3531 drivers/scsi/be2iscsi/be_main.c q = &phba->ctrl.mcc_obj.q; q 3532 drivers/scsi/be2iscsi/be_main.c if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb))) q 3536 drivers/scsi/be2iscsi/be_main.c if (beiscsi_cmd_mccq_create(phba, q, cq)) q 3542 drivers/scsi/be2iscsi/be_main.c be_queue_free(phba, q); q 3602 drivers/scsi/be2iscsi/be_main.c eq = &phwi_context->be_eq[i].q; q 3620 drivers/scsi/be2iscsi/be_main.c struct be_queue_info *q; q 3643 drivers/scsi/be2iscsi/be_main.c q = &phwi_context->be_wrbq[i]; q 3644 drivers/scsi/be2iscsi/be_main.c if (q->created) q 3645 drivers/scsi/be2iscsi/be_main.c beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ); q 3653 drivers/scsi/be2iscsi/be_main.c q = &phwi_context->be_def_hdrq[ulp_num]; q 3654 drivers/scsi/be2iscsi/be_main.c if (q->created) q 3655 drivers/scsi/be2iscsi/be_main.c beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); q 3657 drivers/scsi/be2iscsi/be_main.c q = &phwi_context->be_def_dataq[ulp_num]; q 3658 drivers/scsi/be2iscsi/be_main.c if (q->created) q 3659 drivers/scsi/be2iscsi/be_main.c beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ); q 3666 drivers/scsi/be2iscsi/be_main.c q = &phwi_context->be_cq[i]; q 3667 drivers/scsi/be2iscsi/be_main.c if (q->created) { q 3668 drivers/scsi/be2iscsi/be_main.c be_queue_free(phba, q); q 3669 drivers/scsi/be2iscsi/be_main.c beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ); q 3679 drivers/scsi/be2iscsi/be_main.c q = &phwi_context->be_eq[i].q; q 3680 drivers/scsi/be2iscsi/be_main.c if (q->created) { q 3681 drivers/scsi/be2iscsi/be_main.c be_queue_free(phba, q); q 3682 drivers/scsi/be2iscsi/be_main.c beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); q 4120 drivers/scsi/be2iscsi/be_main.c eq = &phwi_context->be_eq[0].q; q 4127 drivers/scsi/be2iscsi/be_main.c eq = &phwi_context->be_eq[i].q; q 5236 drivers/scsi/be2iscsi/be_main.c set_eqd[num].eq_id = pbe_eq->q.id; q 1318 drivers/scsi/bfa/bfa_core.c int q; q 1320 drivers/scsi/bfa/bfa_core.c for (q = 0; q < BFI_IOC_MAX_CQS; q++) { q 1321 drivers/scsi/bfa/bfa_core.c bfa_reqq_ci(bfa, q) = 0; q 1322 drivers/scsi/bfa/bfa_core.c bfa_reqq_pi(bfa, q) = 0; q 1323 drivers/scsi/bfa/bfa_core.c bfa_rspq_ci(bfa, q) = 0; q 1324 drivers/scsi/bfa/bfa_core.c bfa_rspq_pi(bfa, q) = 0; q 1474 drivers/scsi/bfa/bfa_core.c int q, per_reqq_sz, per_rspq_sz; q 1490 drivers/scsi/bfa/bfa_core.c for (q = 0; q < cfg->fwcfg.num_cqs; q++) { q 1491 drivers/scsi/bfa/bfa_core.c bfa_mem_dma_setup(meminfo, BFA_MEM_REQQ_DMA(bfa, q), q 1493 drivers/scsi/bfa/bfa_core.c bfa_mem_dma_setup(meminfo, BFA_MEM_RSPQ_DMA(bfa, q), q 1498 drivers/scsi/bfa/bfa_core.c for (q = 0; q < cfg->fwcfg.num_cqs; q++) q 157 drivers/scsi/bfa/bfa_cs.h bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe) q 161 drivers/scsi/bfa/bfa_cs.h tqe = bfa_q_next(q); q 162 drivers/scsi/bfa/bfa_cs.h while (tqe != q) { q 428 drivers/scsi/csiostor/csio_isr.c struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx]; q 431 drivers/scsi/csiostor/csio_isr.c entryp[k].desc, q); q 439 drivers/scsi/csiostor/csio_isr.c entryp[k].dev_id = q; q 1159 drivers/scsi/csiostor/csio_scsi.c csio_scsi_cleanup_io_q(struct csio_scsim *scm, struct list_head *q) q 1167 drivers/scsi/csiostor/csio_scsi.c list_for_each_safe(tmp, next, q) { q 1233 drivers/scsi/csiostor/csio_scsi.c csio_scsi_abort_io_q(struct csio_scsim *scm, struct list_head *q, uint32_t tmo) q 1240 drivers/scsi/csiostor/csio_scsi.c if (list_empty(q)) q 1246 drivers/scsi/csiostor/csio_scsi.c list_for_each_safe(tmp, next, q) { q 1252 drivers/scsi/csiostor/csio_scsi.c while (!list_empty(q) && count--) { q 1259 drivers/scsi/csiostor/csio_scsi.c if (list_empty(q)) q 191 drivers/scsi/csiostor/csio_wr.c struct csio_q *q, *flq; q 234 drivers/scsi/csiostor/csio_wr.c q = wrm->q_arr[free_idx]; q 236 drivers/scsi/csiostor/csio_wr.c q->vstart = dma_alloc_coherent(&hw->pdev->dev, qsz, &q->pstart, q 238 drivers/scsi/csiostor/csio_wr.c if (!q->vstart) { q 245 drivers/scsi/csiostor/csio_wr.c q->type = type; q 246 drivers/scsi/csiostor/csio_wr.c q->owner = owner; q 247 drivers/scsi/csiostor/csio_wr.c q->pidx = q->cidx = q->inc_idx = 0; q 248 drivers/scsi/csiostor/csio_wr.c q->size = qsz; q 249 drivers/scsi/csiostor/csio_wr.c q->wr_sz = wrsize; /* If using fixed size WRs */ q 255 drivers/scsi/csiostor/csio_wr.c q->un.iq.genbit = 1; q 261 drivers/scsi/csiostor/csio_wr.c q->credits = (qsz - q->wr_sz) / q->wr_sz; q 262 drivers/scsi/csiostor/csio_wr.c q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz q 263 drivers/scsi/csiostor/csio_wr.c - q->wr_sz); q 278 drivers/scsi/csiostor/csio_wr.c q->un.iq.flq_idx = flq_idx; q 280 drivers/scsi/csiostor/csio_wr.c flq = wrm->q_arr[q->un.iq.flq_idx]; q 306 drivers/scsi/csiostor/csio_wr.c q->un.iq.flq_idx = -1; q 310 drivers/scsi/csiostor/csio_wr.c q->un.iq.iq_intx_handler = iq_intx_handler; q 315 drivers/scsi/csiostor/csio_wr.c q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / CSIO_QCREDIT_SZ; q 316 drivers/scsi/csiostor/csio_wr.c q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz q 320 drivers/scsi/csiostor/csio_wr.c q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / sizeof(__be64); q 321 drivers/scsi/csiostor/csio_wr.c q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz q 747 drivers/scsi/csiostor/csio_wr.c struct csio_q *q = csio_hw_to_wrm(hw)->q_arr[qidx]; q 748 drivers/scsi/csiostor/csio_wr.c struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap; q 765 drivers/scsi/csiostor/csio_wr.c struct csio_q *q = wrm->q_arr[qidx]; q 771 drivers/scsi/csiostor/csio_wr.c q->un.iq.genbit = 1; q 773 drivers/scsi/csiostor/csio_wr.c for (i = 0; i < q->credits; i++) { q 775 drivers/scsi/csiostor/csio_wr.c wr = (void *)((uintptr_t)q->vstart + q 776 drivers/scsi/csiostor/csio_wr.c (i * q->wr_sz)); q 779 drivers/scsi/csiostor/csio_wr.c (q->wr_sz - sizeof(*ftr))); q 789 drivers/scsi/csiostor/csio_wr.c struct csio_q *q; q 794 drivers/scsi/csiostor/csio_wr.c q = wrm->q_arr[i]; q 796 drivers/scsi/csiostor/csio_wr.c switch (q->type) { q 866 drivers/scsi/csiostor/csio_wr.c struct csio_q *q = wrm->q_arr[qidx]; q 867 drivers/scsi/csiostor/csio_wr.c void *cwr = (void *)((uintptr_t)(q->vstart) + q 868 drivers/scsi/csiostor/csio_wr.c (q->pidx * CSIO_QCREDIT_SZ)); q 869 drivers/scsi/csiostor/csio_wr.c struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap; q 870 drivers/scsi/csiostor/csio_wr.c uint16_t cidx = q->cidx = ntohs(stp->cidx); q 871 drivers/scsi/csiostor/csio_wr.c uint16_t pidx = q->pidx; q 876 drivers/scsi/csiostor/csio_wr.c CSIO_DB_ASSERT(q->owner != NULL); q 878 drivers/scsi/csiostor/csio_wr.c CSIO_DB_ASSERT(cidx <= q->credits); q 882 drivers/scsi/csiostor/csio_wr.c credits = q->credits - (pidx - cidx) - 1; q 887 drivers/scsi/csiostor/csio_wr.c credits = q->credits; q 888 drivers/scsi/csiostor/csio_wr.c CSIO_INC_STATS(q, n_qempty); q 896 drivers/scsi/csiostor/csio_wr.c CSIO_INC_STATS(q, n_qfull); q 908 drivers/scsi/csiostor/csio_wr.c if (unlikely(((uintptr_t)cwr + req_sz) > (uintptr_t)(q->vwrap))) { q 910 drivers/scsi/csiostor/csio_wr.c wrp->size1 = (uint32_t)((uintptr_t)q->vwrap - (uintptr_t)cwr); q 911 drivers/scsi/csiostor/csio_wr.c wrp->addr2 = q->vstart; q 913 drivers/scsi/csiostor/csio_wr.c q->pidx = (uint16_t)(ALIGN(wrp->size2, CSIO_QCREDIT_SZ) / q 915 drivers/scsi/csiostor/csio_wr.c CSIO_INC_STATS(q, n_qwrap); q 916 drivers/scsi/csiostor/csio_wr.c CSIO_INC_STATS(q, n_eq_wr_split); q 922 drivers/scsi/csiostor/csio_wr.c q->pidx += (uint16_t)req_credits; q 925 drivers/scsi/csiostor/csio_wr.c if (unlikely(q->pidx == q->credits)) { q 926 drivers/scsi/csiostor/csio_wr.c q->pidx = 0; q 927 drivers/scsi/csiostor/csio_wr.c CSIO_INC_STATS(q, n_qwrap); q 931 drivers/scsi/csiostor/csio_wr.c q->inc_idx = (uint16_t)req_credits; q 933 drivers/scsi/csiostor/csio_wr.c CSIO_INC_STATS(q, n_tot_reqs); q 984 drivers/scsi/csiostor/csio_wr.c struct csio_q *q = wrm->q_arr[qidx]; q 990 drivers/scsi/csiostor/csio_wr.c csio_wr_reg32(hw, DBPRIO_V(prio) | QID_V(q->un.eq.physeqid) | q 991 drivers/scsi/csiostor/csio_wr.c PIDX_T5_V(q->inc_idx) | DBTYPE_F, q 993 drivers/scsi/csiostor/csio_wr.c q->inc_idx = 0; q 999 drivers/scsi/csiostor/csio_wr.c csio_wr_avail_qcredits(struct csio_q *q) q 1001 drivers/scsi/csiostor/csio_wr.c if (q->pidx > q->cidx) q 1002 drivers/scsi/csiostor/csio_wr.c return q->pidx - q->cidx; q 1003 drivers/scsi/csiostor/csio_wr.c else if (q->cidx > q->pidx) q 1004 drivers/scsi/csiostor/csio_wr.c return q->credits - (q->cidx - q->pidx); q 1041 drivers/scsi/csiostor/csio_wr.c csio_wr_process_fl(struct csio_hw *hw, struct csio_q *q, q 1054 drivers/scsi/csiostor/csio_wr.c struct csio_q *flq = hw->wrm.q_arr[q->un.iq.flq_idx]; q 1092 drivers/scsi/csiostor/csio_wr.c iq_handler(hw, wr, q->wr_sz - sizeof(struct csio_iqwr_footer), q 1111 drivers/scsi/csiostor/csio_wr.c csio_is_new_iqwr(struct csio_q *q, struct csio_iqwr_footer *ftr) q 1113 drivers/scsi/csiostor/csio_wr.c return (q->un.iq.genbit == (ftr->u.type_gen >> IQWRF_GEN_SHIFT)); q 1128 drivers/scsi/csiostor/csio_wr.c csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q, q 1135 drivers/scsi/csiostor/csio_wr.c void *wr = (void *)((uintptr_t)q->vstart + (q->cidx * q->wr_sz)); q 1139 drivers/scsi/csiostor/csio_wr.c struct csio_q *flq = csio_iq_has_fl(q) ? q 1140 drivers/scsi/csiostor/csio_wr.c wrm->q_arr[q->un.iq.flq_idx] : NULL; q 1145 drivers/scsi/csiostor/csio_wr.c (q->wr_sz - sizeof(*ftr))); q 1151 drivers/scsi/csiostor/csio_wr.c while (csio_is_new_iqwr(q, ftr)) { q 1153 drivers/scsi/csiostor/csio_wr.c CSIO_DB_ASSERT(((uintptr_t)wr + q->wr_sz) <= q 1154 drivers/scsi/csiostor/csio_wr.c (uintptr_t)q->vwrap); q 1161 drivers/scsi/csiostor/csio_wr.c iq_handler(hw, wr, q->wr_sz - sizeof(*ftr), NULL, priv); q 1164 drivers/scsi/csiostor/csio_wr.c csio_wr_process_fl(hw, q, wr, q 1194 drivers/scsi/csiostor/csio_wr.c CSIO_INC_STATS(q, n_rsp_unknown); q 1203 drivers/scsi/csiostor/csio_wr.c if (((uintptr_t)wr + q->wr_sz) == (uintptr_t)q->vwrap) { q 1206 drivers/scsi/csiostor/csio_wr.c q->cidx = 0; q 1207 drivers/scsi/csiostor/csio_wr.c wr = q->vstart; q 1210 drivers/scsi/csiostor/csio_wr.c q->un.iq.genbit ^= 0x1; q 1212 drivers/scsi/csiostor/csio_wr.c CSIO_INC_STATS(q, n_qwrap); q 1214 drivers/scsi/csiostor/csio_wr.c q->cidx++; q 1215 drivers/scsi/csiostor/csio_wr.c wr = (void *)((uintptr_t)(q->vstart) + q 1216 drivers/scsi/csiostor/csio_wr.c (q->cidx * q->wr_sz)); q 1220 drivers/scsi/csiostor/csio_wr.c (q->wr_sz - sizeof(*ftr))); q 1221 drivers/scsi/csiostor/csio_wr.c q->inc_idx++; q 1229 drivers/scsi/csiostor/csio_wr.c if (unlikely(!q->inc_idx)) { q 1230 drivers/scsi/csiostor/csio_wr.c CSIO_INC_STATS(q, n_stray_comp); q 1250 drivers/scsi/csiostor/csio_wr.c csio_wr_reg32(hw, CIDXINC_V(q->inc_idx) | q 1251 drivers/scsi/csiostor/csio_wr.c INGRESSQID_V(q->un.iq.physiqid) | q 1254 drivers/scsi/csiostor/csio_wr.c q->stats.n_tot_rsps += q->inc_idx; q 1256 drivers/scsi/csiostor/csio_wr.c q->inc_idx = 0; q 1690 drivers/scsi/csiostor/csio_wr.c struct csio_q *q; q 1694 drivers/scsi/csiostor/csio_wr.c q = wrm->q_arr[i]; q 1697 drivers/scsi/csiostor/csio_wr.c if (q->type == CSIO_FREELIST) { q 1698 drivers/scsi/csiostor/csio_wr.c if (!q->un.fl.bufs) q 1700 drivers/scsi/csiostor/csio_wr.c for (j = 0; j < q->credits; j++) { q 1701 drivers/scsi/csiostor/csio_wr.c buf = &q->un.fl.bufs[j]; q 1708 drivers/scsi/csiostor/csio_wr.c kfree(q->un.fl.bufs); q 1710 drivers/scsi/csiostor/csio_wr.c dma_free_coherent(&hw->pdev->dev, q->size, q 1711 drivers/scsi/csiostor/csio_wr.c q->vstart, q->pstart); q 1713 drivers/scsi/csiostor/csio_wr.c kfree(q); q 331 drivers/scsi/esas2r/esas2r_flash.c u8 *p, *q; q 347 drivers/scsi/esas2r/esas2r_flash.c q = (u8 *)fi /* start of the whole gob */ q 357 drivers/scsi/esas2r/esas2r_flash.c if (*p++ != *q++) q 2296 drivers/scsi/fnic/fnic_scsi.c struct request_queue *q = sc->request->q; q 2299 drivers/scsi/fnic/fnic_scsi.c dummy = blk_mq_alloc_request(q, REQ_OP_WRITE, BLK_MQ_REQ_NOWAIT); q 196 drivers/scsi/gvp11.c unsigned char q, qq; q 215 drivers/scsi/gvp11.c q = *sasr_3393; /* read it */ q 216 drivers/scsi/gvp11.c if (q & 0x08) /* bit 3 should always be clear */ q 223 drivers/scsi/gvp11.c if (*sasr_3393 != q) { /* should still read the same */ q 227 drivers/scsi/gvp11.c if (*scmd_3393 != q) /* and so should the image at 0x1f */ q 237 drivers/scsi/gvp11.c q = *scmd_3393; q 239 drivers/scsi/gvp11.c *scmd_3393 = ~q; q 243 drivers/scsi/gvp11.c *scmd_3393 = q; q 244 drivers/scsi/gvp11.c if (qq != q) /* should be read only */ q 247 drivers/scsi/gvp11.c q = *scmd_3393; q 249 drivers/scsi/gvp11.c *scmd_3393 = ~q; q 253 drivers/scsi/gvp11.c *scmd_3393 = q; q 254 drivers/scsi/gvp11.c if (qq != q || qq != 0xff) /* should be read only, all 1's */ q 257 drivers/scsi/gvp11.c q = *scmd_3393; q 259 drivers/scsi/gvp11.c *scmd_3393 = ~q; q 263 drivers/scsi/gvp11.c *scmd_3393 = q; q 264 drivers/scsi/gvp11.c if (qq != (~q & 0xff)) /* should be read/write */ q 296 drivers/scsi/hpsa.c static inline u32 next_command(struct ctlr_info *h, u8 q); q 979 drivers/scsi/hpsa.c static inline u32 next_command(struct ctlr_info *h, u8 q) q 982 drivers/scsi/hpsa.c struct reply_queue_buffer *rq = &h->reply_queue[q]; q 985 drivers/scsi/hpsa.c return h->access.command_completed(h, q); q 988 drivers/scsi/hpsa.c return h->access.command_completed(h, q); q 6885 drivers/scsi/hpsa.c static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q) q 6887 drivers/scsi/hpsa.c return h->access.command_completed(h, q); q 6961 drivers/scsi/hpsa.c return container_of((queue - *queue), struct ctlr_info, q[0]); q 6967 drivers/scsi/hpsa.c u8 q = *(u8 *) queue; q 6977 drivers/scsi/hpsa.c raw_tag = get_next_completion(h, q); q 6979 drivers/scsi/hpsa.c raw_tag = next_command(h, q); q 6988 drivers/scsi/hpsa.c u8 q = *(u8 *) queue; q 6994 drivers/scsi/hpsa.c raw_tag = get_next_completion(h, q); q 6996 drivers/scsi/hpsa.c raw_tag = next_command(h, q); q 7004 drivers/scsi/hpsa.c u8 q = *(u8 *) queue; q 7010 drivers/scsi/hpsa.c raw_tag = get_next_completion(h, q); q 7013 drivers/scsi/hpsa.c raw_tag = next_command(h, q); q 7023 drivers/scsi/hpsa.c u8 q = *(u8 *) queue; q 7026 drivers/scsi/hpsa.c raw_tag = get_next_completion(h, q); q 7029 drivers/scsi/hpsa.c raw_tag = next_command(h, q); q 8038 drivers/scsi/hpsa.c &h->q[h->intr_mode]); q 8039 drivers/scsi/hpsa.c h->q[h->intr_mode] = 0; q 8044 drivers/scsi/hpsa.c free_irq(pci_irq_vector(h->pdev, i), &h->q[i]); q 8045 drivers/scsi/hpsa.c h->q[i] = 0; q 8048 drivers/scsi/hpsa.c h->q[i] = 0; q 8067 drivers/scsi/hpsa.c h->q[i] = (u8) i; q 8075 drivers/scsi/hpsa.c &h->q[i]); q 8083 drivers/scsi/hpsa.c free_irq(pci_irq_vector(h->pdev, j), &h->q[j]); q 8084 drivers/scsi/hpsa.c h->q[j] = 0; q 8087 drivers/scsi/hpsa.c h->q[j] = 0; q 8099 drivers/scsi/hpsa.c &h->q[h->intr_mode]); q 8106 drivers/scsi/hpsa.c &h->q[h->intr_mode]); q 34 drivers/scsi/hpsa.h unsigned long (*command_completed)(struct ctlr_info *h, u8 q); q 256 drivers/scsi/hpsa.h u8 q[MAX_REPLY_QUEUES]; q 488 drivers/scsi/hpsa.h static unsigned long SA5_performant_completed(struct ctlr_info *h, u8 q) q 490 drivers/scsi/hpsa.h struct reply_queue_buffer *rq = &h->reply_queue[q]; q 526 drivers/scsi/hpsa.h __attribute__((unused)) u8 q) q 589 drivers/scsi/hpsa.h static unsigned long SA5_ioaccel_mode1_completed(struct ctlr_info *h, u8 q) q 592 drivers/scsi/hpsa.h struct reply_queue_buffer *rq = &h->reply_queue[q]; q 594 drivers/scsi/hpsa.h BUG_ON(q >= h->nreply_queues); q 608 drivers/scsi/hpsa.h writel((q << 24) | rq->current_entry, h->vaddr + q 24 drivers/scsi/ibmvscsi_tgt/libsrp.c static int srp_iu_pool_alloc(struct srp_queue *q, size_t max, q 30 drivers/scsi/ibmvscsi_tgt/libsrp.c q->pool = kcalloc(max, sizeof(struct iu_entry *), GFP_KERNEL); q 31 drivers/scsi/ibmvscsi_tgt/libsrp.c if (!q->pool) q 33 drivers/scsi/ibmvscsi_tgt/libsrp.c q->items = kcalloc(max, sizeof(struct iu_entry), GFP_KERNEL); q 34 drivers/scsi/ibmvscsi_tgt/libsrp.c if (!q->items) q 37 drivers/scsi/ibmvscsi_tgt/libsrp.c spin_lock_init(&q->lock); q 38 drivers/scsi/ibmvscsi_tgt/libsrp.c kfifo_init(&q->queue, (void *)q->pool, max * sizeof(void *)); q 40 drivers/scsi/ibmvscsi_tgt/libsrp.c for (i = 0, iue = q->items; i < max; i++) { q 41 drivers/scsi/ibmvscsi_tgt/libsrp.c kfifo_in(&q->queue, (void *)&iue, sizeof(void *)); q 48 drivers/scsi/ibmvscsi_tgt/libsrp.c kfree(q->pool); q 52 drivers/scsi/ibmvscsi_tgt/libsrp.c static void srp_iu_pool_free(struct srp_queue *q) q 54 drivers/scsi/ibmvscsi_tgt/libsrp.c kfree(q->items); q 55 drivers/scsi/ibmvscsi_tgt/libsrp.c kfree(q->pool); q 2527 drivers/scsi/ips.c struct scsi_cmnd *q; q 2638 drivers/scsi/ips.c q = p; q 2639 drivers/scsi/ips.c SC = ips_removeq_wait(&ha->scb_waitlist, q); q 2537 drivers/scsi/libiscsi.c iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size) q 2541 drivers/scsi/libiscsi.c memset(q, 0, sizeof(*q)); q 2543 drivers/scsi/libiscsi.c q->max = max; q 2549 drivers/scsi/libiscsi.c q->pool = kvcalloc(num_arrays * max, sizeof(void *), GFP_KERNEL); q 2550 drivers/scsi/libiscsi.c if (q->pool == NULL) q 2553 drivers/scsi/libiscsi.c kfifo_init(&q->queue, (void*)q->pool, max * sizeof(void*)); q 2556 drivers/scsi/libiscsi.c q->pool[i] = kzalloc(item_size, GFP_KERNEL); q 2557 drivers/scsi/libiscsi.c if (q->pool[i] == NULL) { q 2558 drivers/scsi/libiscsi.c q->max = i; q 2561 drivers/scsi/libiscsi.c kfifo_in(&q->queue, (void*)&q->pool[i], sizeof(void*)); q 2565 drivers/scsi/libiscsi.c *items = q->pool + max; q 2566 drivers/scsi/libiscsi.c memcpy(*items, q->pool, max * sizeof(void *)); q 2572 drivers/scsi/libiscsi.c iscsi_pool_free(q); q 2577 drivers/scsi/libiscsi.c void iscsi_pool_free(struct iscsi_pool *q) q 2581 drivers/scsi/libiscsi.c for (i = 0; i < q->max; i++) q 2582 drivers/scsi/libiscsi.c kfree(q->pool[i]); q 2583 drivers/scsi/libiscsi.c kvfree(q->pool); q 1176 drivers/scsi/lpfc/lpfc_attr.c lpfc_emptyq_wait(struct lpfc_hba *phba, struct list_head *q, spinlock_t *lock) q 1181 drivers/scsi/lpfc/lpfc_attr.c while (!list_empty(q)) { q 219 drivers/scsi/lpfc/lpfc_crtn.h int lpfc_sli4_poll_eq(struct lpfc_queue *q, uint8_t path); q 221 drivers/scsi/lpfc/lpfc_crtn.h void lpfc_sli4_start_polling(struct lpfc_queue *q); q 222 drivers/scsi/lpfc/lpfc_crtn.h void lpfc_sli4_stop_polling(struct lpfc_queue *q); q 3980 drivers/scsi/lpfc/lpfc_debugfs.c lpfc_idiag_que_param_check(struct lpfc_queue *q, int index, int count) q 3985 drivers/scsi/lpfc/lpfc_debugfs.c if (index > q->entry_count - 1) q 333 drivers/scsi/lpfc/lpfc_debugfs.h lpfc_debug_dump_qe(struct lpfc_queue *q, uint32_t idx) q 340 drivers/scsi/lpfc/lpfc_debugfs.h if (!q) q 342 drivers/scsi/lpfc/lpfc_debugfs.h if (idx >= q->entry_count) q 345 drivers/scsi/lpfc/lpfc_debugfs.h esize = q->entry_size; q 347 drivers/scsi/lpfc/lpfc_debugfs.h pword = lpfc_sli4_qe(q, idx); q 381 drivers/scsi/lpfc/lpfc_debugfs.h lpfc_debug_dump_q(struct lpfc_queue *q) q 386 drivers/scsi/lpfc/lpfc_debugfs.h if (!q) q 389 drivers/scsi/lpfc/lpfc_debugfs.h dev_printk(KERN_ERR, &(((q->phba))->pcidev)->dev, q 393 drivers/scsi/lpfc/lpfc_debugfs.h (q->phba)->brd_no, q 394 drivers/scsi/lpfc/lpfc_debugfs.h q->queue_id, q->type, q->subtype, q 395 drivers/scsi/lpfc/lpfc_debugfs.h q->entry_size, q->entry_count, q 396 drivers/scsi/lpfc/lpfc_debugfs.h q->host_index, q->hba_index); q 397 drivers/scsi/lpfc/lpfc_debugfs.h entry_count = q->entry_count; q 399 drivers/scsi/lpfc/lpfc_debugfs.h lpfc_debug_dump_qe(q, idx); q 138 drivers/scsi/lpfc/lpfc_sli.c lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe) q 149 drivers/scsi/lpfc/lpfc_sli.c if (unlikely(!q)) q 151 drivers/scsi/lpfc/lpfc_sli.c temp_wqe = lpfc_sli4_qe(q, q->host_index); q 154 drivers/scsi/lpfc/lpfc_sli.c idx = ((q->host_index + 1) % q->entry_count); q 155 drivers/scsi/lpfc/lpfc_sli.c if (idx == q->hba_index) { q 156 drivers/scsi/lpfc/lpfc_sli.c q->WQ_overflow++; q 159 drivers/scsi/lpfc/lpfc_sli.c q->WQ_posted++; q 161 drivers/scsi/lpfc/lpfc_sli.c if (!((q->host_index + 1) % q->notify_interval)) q 165 drivers/scsi/lpfc/lpfc_sli.c if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED) q 166 drivers/scsi/lpfc/lpfc_sli.c bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id); q 167 drivers/scsi/lpfc/lpfc_sli.c lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size); q 168 drivers/scsi/lpfc/lpfc_sli.c if (q->dpp_enable && q->phba->cfg_enable_dpp) { q 172 drivers/scsi/lpfc/lpfc_sli.c for (i = 0; i < q->entry_size; i += sizeof(uint64_t)) q 174 drivers/scsi/lpfc/lpfc_sli.c q->dpp_regaddr + i); q 176 drivers/scsi/lpfc/lpfc_sli.c for (i = 0; i < q->entry_size; i += sizeof(uint32_t)) q 178 drivers/scsi/lpfc/lpfc_sli.c q->dpp_regaddr + i); q 185 drivers/scsi/lpfc/lpfc_sli.c host_index = q->host_index; q 187 drivers/scsi/lpfc/lpfc_sli.c q->host_index = idx; q 191 drivers/scsi/lpfc/lpfc_sli.c if (q->db_format == LPFC_DB_LIST_FORMAT) { q 192 drivers/scsi/lpfc/lpfc_sli.c if (q->dpp_enable && q->phba->cfg_enable_dpp) { q 196 drivers/scsi/lpfc/lpfc_sli.c q->dpp_id); q 198 drivers/scsi/lpfc/lpfc_sli.c q->queue_id); q 201 drivers/scsi/lpfc/lpfc_sli.c bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id); q 205 drivers/scsi/lpfc/lpfc_sli.c &q->phba->sli4_hba.sli_intf); q 210 drivers/scsi/lpfc/lpfc_sli.c } else if (q->db_format == LPFC_DB_RING_FORMAT) { q 212 drivers/scsi/lpfc/lpfc_sli.c bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id); q 216 drivers/scsi/lpfc/lpfc_sli.c writel(doorbell.word0, q->db_regaddr); q 233 drivers/scsi/lpfc/lpfc_sli.c lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index) q 238 drivers/scsi/lpfc/lpfc_sli.c if (unlikely(!q)) q 241 drivers/scsi/lpfc/lpfc_sli.c if (q->hba_index == index) q 244 drivers/scsi/lpfc/lpfc_sli.c q->hba_index = ((q->hba_index + 1) % q->entry_count); q 246 drivers/scsi/lpfc/lpfc_sli.c } while (q->hba_index != index); q 263 drivers/scsi/lpfc/lpfc_sli.c lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe) q 269 drivers/scsi/lpfc/lpfc_sli.c if (unlikely(!q)) q 271 drivers/scsi/lpfc/lpfc_sli.c temp_mqe = lpfc_sli4_qe(q, q->host_index); q 274 drivers/scsi/lpfc/lpfc_sli.c if (((q->host_index + 1) % q->entry_count) == q->hba_index) q 276 drivers/scsi/lpfc/lpfc_sli.c lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size); q 278 drivers/scsi/lpfc/lpfc_sli.c q->phba->mbox = (MAILBOX_t *)temp_mqe; q 281 drivers/scsi/lpfc/lpfc_sli.c q->host_index = ((q->host_index + 1) % q->entry_count); q 286 drivers/scsi/lpfc/lpfc_sli.c bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id); q 287 drivers/scsi/lpfc/lpfc_sli.c writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr); q 302 drivers/scsi/lpfc/lpfc_sli.c lpfc_sli4_mq_release(struct lpfc_queue *q) q 305 drivers/scsi/lpfc/lpfc_sli.c if (unlikely(!q)) q 309 drivers/scsi/lpfc/lpfc_sli.c q->phba->mbox = NULL; q 310 drivers/scsi/lpfc/lpfc_sli.c q->hba_index = ((q->hba_index + 1) % q->entry_count); q 324 drivers/scsi/lpfc/lpfc_sli.c lpfc_sli4_eq_get(struct lpfc_queue *q) q 329 drivers/scsi/lpfc/lpfc_sli.c if (unlikely(!q)) q 331 drivers/scsi/lpfc/lpfc_sli.c eqe = lpfc_sli4_qe(q, q->host_index); q 334 drivers/scsi/lpfc/lpfc_sli.c if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid) q 356 drivers/scsi/lpfc/lpfc_sli.c lpfc_sli4_eq_clr_intr(struct lpfc_queue *q) q 364 drivers/scsi/lpfc/lpfc_sli.c (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); q 365 drivers/scsi/lpfc/lpfc_sli.c bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); q 366 drivers/scsi/lpfc/lpfc_sli.c writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); q 375 drivers/scsi/lpfc/lpfc_sli.c lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q) q 380 drivers/scsi/lpfc/lpfc_sli.c bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); q 381 drivers/scsi/lpfc/lpfc_sli.c writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); q 396 drivers/scsi/lpfc/lpfc_sli.c lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, q 402 drivers/scsi/lpfc/lpfc_sli.c if (unlikely(!q || (count == 0 && !arm))) q 414 drivers/scsi/lpfc/lpfc_sli.c (q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT)); q 415 drivers/scsi/lpfc/lpfc_sli.c bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id); q 416 drivers/scsi/lpfc/lpfc_sli.c writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); q 418 drivers/scsi/lpfc/lpfc_sli.c if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) q 419 drivers/scsi/lpfc/lpfc_sli.c readl(q->phba->sli4_hba.EQDBregaddr); q 434 drivers/scsi/lpfc/lpfc_sli.c lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, q 440 drivers/scsi/lpfc/lpfc_sli.c if (unlikely(!q || (count == 0 && !arm))) q 448 drivers/scsi/lpfc/lpfc_sli.c bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id); q 449 drivers/scsi/lpfc/lpfc_sli.c writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr); q 451 drivers/scsi/lpfc/lpfc_sli.c if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM)) q 452 drivers/scsi/lpfc/lpfc_sli.c readl(q->phba->sli4_hba.EQDBregaddr); q 539 drivers/scsi/lpfc/lpfc_sli.c lpfc_sli4_cq_get(struct lpfc_queue *q) q 544 drivers/scsi/lpfc/lpfc_sli.c if (unlikely(!q)) q 546 drivers/scsi/lpfc/lpfc_sli.c cqe = lpfc_sli4_qe(q, q->host_index); q 549 drivers/scsi/lpfc/lpfc_sli.c if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid) q 590 drivers/scsi/lpfc/lpfc_sli.c lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, q 596 drivers/scsi/lpfc/lpfc_sli.c if (unlikely(!q || (count == 0 && !arm))) q 606 drivers/scsi/lpfc/lpfc_sli.c (q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT)); q 607 drivers/scsi/lpfc/lpfc_sli.c bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id); q 608 drivers/scsi/lpfc/lpfc_sli.c writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr); q 623 drivers/scsi/lpfc/lpfc_sli.c lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, q 629 drivers/scsi/lpfc/lpfc_sli.c if (unlikely(!q || (count == 0 && !arm))) q 637 drivers/scsi/lpfc/lpfc_sli.c bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id); q 638 drivers/scsi/lpfc/lpfc_sli.c writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr); q 798 drivers/scsi/lpfc/lpfc_sli4.h void (*sli4_eq_clr_intr)(struct lpfc_queue *q); q 1090 drivers/scsi/lpfc/lpfc_sli4.h void lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, q 1092 drivers/scsi/lpfc/lpfc_sli4.h void lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, q 1094 drivers/scsi/lpfc/lpfc_sli4.h void lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q); q 1095 drivers/scsi/lpfc/lpfc_sli4.h void lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q, q 1097 drivers/scsi/lpfc/lpfc_sli4.h void lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q, q 1122 drivers/scsi/lpfc/lpfc_sli4.h static inline void *lpfc_sli4_qe(struct lpfc_queue *q, uint16_t idx) q 1124 drivers/scsi/lpfc/lpfc_sli4.h return q->q_pgs[idx / q->entry_cnt_per_pg] + q 1125 drivers/scsi/lpfc/lpfc_sli4.h (q->entry_size * (idx % q->entry_cnt_per_pg)); q 269 drivers/scsi/ncr53c8xx.c m_link_s *q; q 295 drivers/scsi/ncr53c8xx.c q = &h[i]; q 296 drivers/scsi/ncr53c8xx.c while (q->next && q->next != (m_link_s *) b) { q 297 drivers/scsi/ncr53c8xx.c q = q->next; q 299 drivers/scsi/ncr53c8xx.c if (!q->next) { q 304 drivers/scsi/ncr53c8xx.c q->next = q->next->next; q 1180 drivers/scsi/qedf/qedf_io.c if (!sc_cmd->request->q) { q 614 drivers/scsi/qedi/qedi_fw.c if (!sc_cmd->request->q) { q 233 drivers/scsi/qedi/qedi_iscsi.h #define QEDI_OFLD_WAIT_STATE(q) ((q)->state == EP_STATE_OFLDCONN_FAILED || \ q 234 drivers/scsi/qedi/qedi_iscsi.h (q)->state == EP_STATE_OFLDCONN_COMPL) q 547 drivers/scsi/qla2xxx/qla_dbg.c struct qla2xxx_mqueue_chain *q; q 566 drivers/scsi/qla2xxx/qla_dbg.c q = ptr; q 567 drivers/scsi/qla2xxx/qla_dbg.c *last_chain = &q->type; q 568 drivers/scsi/qla2xxx/qla_dbg.c q->type = htonl(DUMP_CHAIN_QUEUE); q 569 drivers/scsi/qla2xxx/qla_dbg.c q->chain_size = htonl( q 594 drivers/scsi/qla2xxx/qla_dbg.c struct qla2xxx_mqueue_chain *q; q 610 drivers/scsi/qla2xxx/qla_dbg.c q = ptr; q 611 drivers/scsi/qla2xxx/qla_dbg.c *last_chain = &q->type; q 612 drivers/scsi/qla2xxx/qla_dbg.c q->type = htonl(DUMP_CHAIN_QUEUE); q 613 drivers/scsi/qla2xxx/qla_dbg.c q->chain_size = htonl( q 638 drivers/scsi/qla2xxx/qla_dbg.c q = ptr; q 639 drivers/scsi/qla2xxx/qla_dbg.c *last_chain = &q->type; q 640 drivers/scsi/qla2xxx/qla_dbg.c q->type = htonl(DUMP_CHAIN_QUEUE); q 641 drivers/scsi/qla2xxx/qla_dbg.c q->chain_size = htonl( q 4883 drivers/scsi/qla2xxx/qla_os.c bool q = false; q 4889 drivers/scsi/qla2xxx/qla_os.c q = true; q 4893 drivers/scsi/qla2xxx/qla_os.c if (q) q 6655 drivers/scsi/qla2xxx/qla_os.c bool q = false; q 6659 drivers/scsi/qla2xxx/qla_os.c q = true; q 6661 drivers/scsi/qla2xxx/qla_os.c if (q) q 250 drivers/scsi/scsi_dh.c int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data) q 255 drivers/scsi/scsi_dh.c sdev = scsi_device_from_queue(q); q 297 drivers/scsi/scsi_dh.c int scsi_dh_set_params(struct request_queue *q, const char *params) q 302 drivers/scsi/scsi_dh.c sdev = scsi_device_from_queue(q); q 319 drivers/scsi/scsi_dh.c int scsi_dh_attach(struct request_queue *q, const char *name) q 325 drivers/scsi/scsi_dh.c sdev = scsi_device_from_queue(q); q 358 drivers/scsi/scsi_dh.c const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp) q 363 drivers/scsi/scsi_dh.c sdev = scsi_device_from_queue(q); q 1994 drivers/scsi/scsi_error.c blk_execute_rq_nowait(req->q, NULL, req, 1, eh_lock_door_done); q 279 drivers/scsi/scsi_lib.c blk_execute_rq(req->q, NULL, req, 1); q 360 drivers/scsi/scsi_lib.c static void scsi_kick_queue(struct request_queue *q) q 362 drivers/scsi/scsi_lib.c blk_mq_run_hw_queues(q, false); q 514 drivers/scsi/scsi_lib.c static void scsi_run_queue(struct request_queue *q) q 516 drivers/scsi/scsi_lib.c struct scsi_device *sdev = q->queuedata; q 523 drivers/scsi/scsi_lib.c blk_mq_run_hw_queues(q, false); q 529 drivers/scsi/scsi_lib.c struct request_queue *q; q 532 drivers/scsi/scsi_lib.c q = sdev->request_queue; q 533 drivers/scsi/scsi_lib.c scsi_run_queue(q); q 577 drivers/scsi/scsi_lib.c struct request_queue *q = sdev->request_queue; q 582 drivers/scsi/scsi_lib.c if (blk_queue_add_random(q)) q 611 drivers/scsi/scsi_lib.c percpu_ref_get(&q->q_usage_counter); q 619 drivers/scsi/scsi_lib.c blk_mq_run_hw_queues(q, true); q 621 drivers/scsi/scsi_lib.c percpu_ref_put(&q->q_usage_counter); q 666 drivers/scsi/scsi_lib.c struct request_queue *q) q 675 drivers/scsi/scsi_lib.c struct request_queue *q = cmd->device->request_queue; q 817 drivers/scsi/scsi_lib.c scsi_io_completion_reprep(cmd, q); q 935 drivers/scsi/scsi_lib.c struct request_queue *q = cmd->device->request_queue; q 980 drivers/scsi/scsi_lib.c scsi_io_completion_reprep(cmd, q); q 1002 drivers/scsi/scsi_lib.c count = blk_rq_map_sg(req->q, req, sdb->table.sgl); q 1046 drivers/scsi/scsi_lib.c ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); q 1055 drivers/scsi/scsi_lib.c count = blk_rq_map_integrity_sg(rq->q, rq->bio, q 1058 drivers/scsi/scsi_lib.c BUG_ON(count > queue_max_integrity_segments(rq->q)); q 1281 drivers/scsi/scsi_lib.c static inline int scsi_dev_queue_ready(struct request_queue *q, q 1368 drivers/scsi/scsi_lib.c static inline int scsi_host_queue_ready(struct request_queue *q, q 1430 drivers/scsi/scsi_lib.c static bool scsi_mq_lld_busy(struct request_queue *q) q 1432 drivers/scsi/scsi_lib.c struct scsi_device *sdev = q->queuedata; q 1435 drivers/scsi/scsi_lib.c if (blk_queue_dying(q)) q 1582 drivers/scsi/scsi_lib.c struct scsi_device *sdev = req->q->queuedata; q 1625 drivers/scsi/scsi_lib.c struct request_queue *q = hctx->queue; q 1626 drivers/scsi/scsi_lib.c struct scsi_device *sdev = q->queuedata; q 1633 drivers/scsi/scsi_lib.c struct request_queue *q = hctx->queue; q 1634 drivers/scsi/scsi_lib.c struct scsi_device *sdev = q->queuedata; q 1636 drivers/scsi/scsi_lib.c if (scsi_dev_queue_ready(q, sdev)) q 1648 drivers/scsi/scsi_lib.c struct request_queue *q = req->q; q 1649 drivers/scsi/scsi_lib.c struct scsi_device *sdev = q->queuedata; q 1668 drivers/scsi/scsi_lib.c if (!scsi_host_queue_ready(q, shost, sdev)) q 1782 drivers/scsi/scsi_lib.c void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) q 1789 drivers/scsi/scsi_lib.c blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, q 1797 drivers/scsi/scsi_lib.c blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize); q 1804 drivers/scsi/scsi_lib.c blk_queue_max_hw_sectors(q, shost->max_sectors); q 1806 drivers/scsi/scsi_lib.c blk_queue_bounce_limit(q, BLK_BOUNCE_ISA); q 1807 drivers/scsi/scsi_lib.c blk_queue_segment_boundary(q, shost->dma_boundary); q 1810 drivers/scsi/scsi_lib.c blk_queue_max_segment_size(q, shost->max_segment_size); q 1811 drivers/scsi/scsi_lib.c blk_queue_virt_boundary(q, shost->virt_boundary_mask); q 1812 drivers/scsi/scsi_lib.c dma_set_max_seg_size(dev, queue_max_segment_size(q)); q 1821 drivers/scsi/scsi_lib.c blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1); q 1845 drivers/scsi/scsi_lib.c struct request_queue *q = hctx->queue; q 1846 drivers/scsi/scsi_lib.c struct scsi_device *sdev = q->queuedata; q 1922 drivers/scsi/scsi_lib.c struct scsi_device *scsi_device_from_queue(struct request_queue *q) q 1926 drivers/scsi/scsi_lib.c if (q->mq_ops == &scsi_mq_ops_no_commit || q 1927 drivers/scsi/scsi_lib.c q->mq_ops == &scsi_mq_ops) q 1928 drivers/scsi/scsi_lib.c sdev = q->queuedata; q 2548 drivers/scsi/scsi_lib.c struct request_queue *q = sdev->request_queue; q 2561 drivers/scsi/scsi_lib.c blk_set_pm_only(q); q 2563 drivers/scsi/scsi_lib.c blk_mq_freeze_queue(q); q 2571 drivers/scsi/scsi_lib.c blk_mq_unfreeze_queue(q); q 2578 drivers/scsi/scsi_lib.c blk_clear_pm_only(q); q 2653 drivers/scsi/scsi_lib.c struct request_queue *q = sdev->request_queue; q 2669 drivers/scsi/scsi_lib.c blk_mq_quiesce_queue_nowait(q); q 2691 drivers/scsi/scsi_lib.c struct request_queue *q = sdev->request_queue; q 2697 drivers/scsi/scsi_lib.c blk_mq_quiesce_queue(q); q 2705 drivers/scsi/scsi_lib.c struct request_queue *q = sdev->request_queue; q 2707 drivers/scsi/scsi_lib.c blk_mq_unquiesce_queue(q); q 3672 drivers/scsi/scsi_transport_fc.c struct request_queue *q = rport->rqst_q; q 3674 drivers/scsi/scsi_transport_fc.c if (q) q 3675 drivers/scsi/scsi_transport_fc.c blk_mq_run_hw_queues(q, true); q 3784 drivers/scsi/scsi_transport_fc.c struct request_queue *q; q 3795 drivers/scsi/scsi_transport_fc.c q = bsg_setup_queue(dev, bsg_name, fc_bsg_dispatch, fc_bsg_job_timeout, q 3797 drivers/scsi/scsi_transport_fc.c if (IS_ERR(q)) { q 3801 drivers/scsi/scsi_transport_fc.c return PTR_ERR(q); q 3803 drivers/scsi/scsi_transport_fc.c __scsi_init_queue(shost, q); q 3804 drivers/scsi/scsi_transport_fc.c blk_queue_rq_timeout(q, FC_DEFAULT_BSG_TIMEOUT); q 3805 drivers/scsi/scsi_transport_fc.c fc_host->rqst_q = q; q 3819 drivers/scsi/scsi_transport_fc.c struct request_queue *q; q 3826 drivers/scsi/scsi_transport_fc.c q = bsg_setup_queue(dev, dev_name(dev), fc_bsg_dispatch_prep, q 3828 drivers/scsi/scsi_transport_fc.c if (IS_ERR(q)) { q 3830 drivers/scsi/scsi_transport_fc.c return PTR_ERR(q); q 3832 drivers/scsi/scsi_transport_fc.c __scsi_init_queue(shost, q); q 3833 drivers/scsi/scsi_transport_fc.c blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); q 3834 drivers/scsi/scsi_transport_fc.c rport->rqst_q = q; q 3849 drivers/scsi/scsi_transport_fc.c fc_bsg_remove(struct request_queue *q) q 3851 drivers/scsi/scsi_transport_fc.c bsg_remove_queue(q); q 1545 drivers/scsi/scsi_transport_iscsi.c struct request_queue *q; q 1552 drivers/scsi/scsi_transport_iscsi.c q = bsg_setup_queue(dev, bsg_name, iscsi_bsg_host_dispatch, NULL, 0); q 1553 drivers/scsi/scsi_transport_iscsi.c if (IS_ERR(q)) { q 1556 drivers/scsi/scsi_transport_iscsi.c return PTR_ERR(q); q 1558 drivers/scsi/scsi_transport_iscsi.c __scsi_init_queue(shost, q); q 1560 drivers/scsi/scsi_transport_iscsi.c ihost->bsg_q = q; q 47 drivers/scsi/scsi_transport_sas.c struct request_queue *q; q 192 drivers/scsi/scsi_transport_sas.c struct request_queue *q; q 200 drivers/scsi/scsi_transport_sas.c q = bsg_setup_queue(&rphy->dev, dev_name(&rphy->dev), q 202 drivers/scsi/scsi_transport_sas.c if (IS_ERR(q)) q 203 drivers/scsi/scsi_transport_sas.c return PTR_ERR(q); q 204 drivers/scsi/scsi_transport_sas.c rphy->q = q; q 209 drivers/scsi/scsi_transport_sas.c q = bsg_setup_queue(&shost->shost_gendev, name, q 211 drivers/scsi/scsi_transport_sas.c if (IS_ERR(q)) q 212 drivers/scsi/scsi_transport_sas.c return PTR_ERR(q); q 213 drivers/scsi/scsi_transport_sas.c to_sas_host_attrs(shost)->q = q; q 246 drivers/scsi/scsi_transport_sas.c struct request_queue *q = to_sas_host_attrs(shost)->q; q 248 drivers/scsi/scsi_transport_sas.c bsg_remove_queue(q); q 1631 drivers/scsi/scsi_transport_sas.c bsg_remove_queue(rphy->q); q 772 drivers/scsi/sd.c struct request_queue *q = sdkp->disk->queue; q 776 drivers/scsi/sd.c q->limits.discard_alignment = q 778 drivers/scsi/sd.c q->limits.discard_granularity = q 787 drivers/scsi/sd.c blk_queue_max_discard_sectors(q, 0); q 788 drivers/scsi/sd.c blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); q 820 drivers/scsi/sd.c blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9)); q 821 drivers/scsi/sd.c blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); q 948 drivers/scsi/sd.c struct request_queue *q = sdkp->disk->queue; q 1002 drivers/scsi/sd.c blk_queue_max_write_same_sectors(q, sdkp->max_ws_blocks * q 1004 drivers/scsi/sd.c blk_queue_max_write_zeroes_sectors(q, sdkp->max_ws_blocks * q 1076 drivers/scsi/sd.c rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER; q 2928 drivers/scsi/sd.c struct request_queue *q = sdkp->disk->queue; q 2943 drivers/scsi/sd.c blk_queue_flag_set(QUEUE_FLAG_NONROT, q); q 2944 drivers/scsi/sd.c blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); q 2949 drivers/scsi/sd.c q->limits.zoned = BLK_ZONED_HM; q 2954 drivers/scsi/sd.c q->limits.zoned = BLK_ZONED_HA; q 2960 drivers/scsi/sd.c q->limits.zoned = BLK_ZONED_NONE; q 2962 drivers/scsi/sd.c if (blk_queue_is_zoned(q) && sdkp->first_scan) q 2964 drivers/scsi/sd.c q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware"); q 3102 drivers/scsi/sd.c struct request_queue *q = sdkp->disk->queue; q 3139 drivers/scsi/sd.c blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); q 3140 drivers/scsi/sd.c blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); q 3169 drivers/scsi/sd.c q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max); q 3172 drivers/scsi/sd.c q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks); q 3175 drivers/scsi/sd.c q->limits.io_opt = 0; q 3181 drivers/scsi/sd.c rw_max = min(rw_max, queue_max_hw_sectors(q)); q 3188 drivers/scsi/sd.c q->limits.max_sectors > q->limits.max_dev_sectors || q 3189 drivers/scsi/sd.c q->limits.max_sectors > q->limits.max_hw_sectors) q 3190 drivers/scsi/sd.c q->limits.max_sectors = rw_max; q 3466 drivers/scsi/sd.c struct request_queue *q = disk->queue; q 3478 drivers/scsi/sd.c blk_mq_freeze_queue(q); q 3479 drivers/scsi/sd.c blk_mq_unfreeze_queue(q); q 128 drivers/scsi/sd_zbc.c struct request_queue *q = sdkp->disk->queue; q 144 drivers/scsi/sd_zbc.c queue_max_hw_sectors(q) << SECTOR_SHIFT); q 145 drivers/scsi/sd_zbc.c bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT); q 287 drivers/scsi/sg.c struct request_queue *q; q 347 drivers/scsi/sg.c q = sdp->device->request_queue; q 348 drivers/scsi/sg.c sdp->sg_tablesize = queue_max_segments(q); q 858 drivers/scsi/sg.c static int max_sectors_bytes(struct request_queue *q) q 860 drivers/scsi/sg.c unsigned int max_sectors = queue_max_sectors(q); q 1440 drivers/scsi/sg.c struct request_queue *q = scsidp->request_queue; q 1483 drivers/scsi/sg.c sdp->sg_tablesize = queue_max_segments(q); q 1714 drivers/scsi/sg.c struct request_queue *q = sfp->parentdp->device->request_queue; q 1740 drivers/scsi/sg.c rq = blk_get_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ? q 1763 drivers/scsi/sg.c blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len)) q 1814 drivers/scsi/sg.c res = blk_rq_map_user_iov(q, rq, md, &i, GFP_ATOMIC); q 1817 drivers/scsi/sg.c res = blk_rq_map_user(q, rq, md, hp->dxferp, q 559 drivers/scsi/st.c err = blk_rq_map_user(req->q, req, mdata, NULL, bufflen, q 586 drivers/scsi/st.c blk_execute_rq_nowait(req->q, NULL, req, 1, st_scsi_execute_end); q 97 drivers/scsi/sym53c8xx_2/sym_malloc.c m_link_p q; q 126 drivers/scsi/sym53c8xx_2/sym_malloc.c q = &h[i]; q 127 drivers/scsi/sym53c8xx_2/sym_malloc.c while (q->next && q->next != (m_link_p) b) { q 128 drivers/scsi/sym53c8xx_2/sym_malloc.c q = q->next; q 130 drivers/scsi/sym53c8xx_2/sym_malloc.c if (!q->next) { q 135 drivers/scsi/sym53c8xx_2/sym_malloc.c q->next = q->next->next; q 197 drivers/scsi/ufs/ufs_bsg.c struct request_queue *q; q 211 drivers/scsi/ufs/ufs_bsg.c q = bsg_setup_queue(bsg_dev, dev_name(bsg_dev), ufs_bsg_request, NULL, 0); q 212 drivers/scsi/ufs/ufs_bsg.c if (IS_ERR(q)) { q 213 drivers/scsi/ufs/ufs_bsg.c ret = PTR_ERR(q); q 217 drivers/scsi/ufs/ufs_bsg.c hba->bsg_queue = q; q 4649 drivers/scsi/ufs/ufshcd.c struct request_queue *q = sdev->request_queue; q 4651 drivers/scsi/ufs/ufshcd.c blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1); q 82 drivers/soc/fsl/qbman/qman_priv.h static inline u64 qm_mcr_querycgr_i_get64(const struct qm_mcr_querycgr *q) q 84 drivers/soc/fsl/qbman/qman_priv.h return ((u64)q->i_bcnt_hi << 32) | be32_to_cpu(q->i_bcnt_lo); q 86 drivers/soc/fsl/qbman/qman_priv.h static inline u64 qm_mcr_querycgr_a_get64(const struct qm_mcr_querycgr *q) q 88 drivers/soc/fsl/qbman/qman_priv.h return ((u64)q->a_bcnt_hi << 32) | be32_to_cpu(q->a_bcnt_lo); q 105 drivers/soc/fsl/qbman/qman_priv.h struct __qm_mcr_querycongestion q; q 120 drivers/soc/fsl/qbman/qman_priv.h return c->q.state[CGR_WORD(cgr)] & CGR_BIT(cgr); q 133 drivers/soc/fsl/qbman/qman_priv.h u32 *_d = dest->q.state; q 134 drivers/soc/fsl/qbman/qman_priv.h const u32 *_a = a->q.state; q 135 drivers/soc/fsl/qbman/qman_priv.h const u32 *_b = b->q.state; q 145 drivers/soc/fsl/qbman/qman_priv.h u32 *_d = dest->q.state; q 146 drivers/soc/fsl/qbman/qman_priv.h const u32 *_a = a->q.state; q 147 drivers/soc/fsl/qbman/qman_priv.h const u32 *_b = b->q.state; q 26 drivers/soc/qcom/rpmh.c #define DEFINE_RPMH_MSG_ONSTACK(dev, s, q, name) \ q 35 drivers/soc/qcom/rpmh.c .completion = q, \ q 20 drivers/soc/ti/knav_qmss_acc.c #define knav_range_offset_to_inst(kdev, range, q) \ q 21 drivers/soc/ti/knav_qmss_acc.c (range->queue_base_inst + (q << kdev->inst_shift)) q 266 drivers/spi/spi-fsl-qspi.c static inline int needs_swap_endian(struct fsl_qspi *q) q 268 drivers/spi/spi-fsl-qspi.c return q->devtype_data->quirks & QUADSPI_QUIRK_SWAP_ENDIAN; q 271 drivers/spi/spi-fsl-qspi.c static inline int needs_4x_clock(struct fsl_qspi *q) q 273 drivers/spi/spi-fsl-qspi.c return q->devtype_data->quirks & QUADSPI_QUIRK_4X_INT_CLK; q 276 drivers/spi/spi-fsl-qspi.c static inline int needs_fill_txfifo(struct fsl_qspi *q) q 278 drivers/spi/spi-fsl-qspi.c return q->devtype_data->quirks & QUADSPI_QUIRK_TKT253890; q 281 drivers/spi/spi-fsl-qspi.c static inline int needs_wakeup_wait_mode(struct fsl_qspi *q) q 283 drivers/spi/spi-fsl-qspi.c return q->devtype_data->quirks & QUADSPI_QUIRK_TKT245618; q 286 drivers/spi/spi-fsl-qspi.c static inline int needs_amba_base_offset(struct fsl_qspi *q) q 288 drivers/spi/spi-fsl-qspi.c return !(q->devtype_data->quirks & QUADSPI_QUIRK_BASE_INTERNAL); q 291 drivers/spi/spi-fsl-qspi.c static inline int needs_tdh_setting(struct fsl_qspi *q) q 293 drivers/spi/spi-fsl-qspi.c return q->devtype_data->quirks & QUADSPI_QUIRK_USE_TDH_SETTING; q 300 drivers/spi/spi-fsl-qspi.c static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a) q 302 drivers/spi/spi-fsl-qspi.c return needs_swap_endian(q) ? __swab32(a) : a; q 312 drivers/spi/spi-fsl-qspi.c static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr) q 314 drivers/spi/spi-fsl-qspi.c if (q->devtype_data->little_endian) q 320 drivers/spi/spi-fsl-qspi.c static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr) q 322 drivers/spi/spi-fsl-qspi.c if (q->devtype_data->little_endian) q 330 drivers/spi/spi-fsl-qspi.c struct fsl_qspi *q = dev_id; q 334 drivers/spi/spi-fsl-qspi.c reg = qspi_readl(q, q->iobase + QUADSPI_FR); q 335 drivers/spi/spi-fsl-qspi.c qspi_writel(q, reg, q->iobase + QUADSPI_FR); q 338 drivers/spi/spi-fsl-qspi.c complete(&q->c); q 340 drivers/spi/spi-fsl-qspi.c dev_dbg(q->dev, "QUADSPI_FR : 0x%.8x:0x%.8x\n", 0, reg); q 344 drivers/spi/spi-fsl-qspi.c static int fsl_qspi_check_buswidth(struct fsl_qspi *q, u8 width) q 359 drivers/spi/spi-fsl-qspi.c struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master); q 362 drivers/spi/spi-fsl-qspi.c ret = fsl_qspi_check_buswidth(q, op->cmd.buswidth); q 365 drivers/spi/spi-fsl-qspi.c ret |= fsl_qspi_check_buswidth(q, op->addr.buswidth); q 368 drivers/spi/spi-fsl-qspi.c ret |= fsl_qspi_check_buswidth(q, op->dummy.buswidth); q 371 drivers/spi/spi-fsl-qspi.c ret |= fsl_qspi_check_buswidth(q, op->data.buswidth); q 392 drivers/spi/spi-fsl-qspi.c (op->data.nbytes > q->devtype_data->ahb_buf_size || q 393 drivers/spi/spi-fsl-qspi.c (op->data.nbytes > q->devtype_data->rxfifo - 4 && q 398 drivers/spi/spi-fsl-qspi.c op->data.nbytes > q->devtype_data->txfifo) q 404 drivers/spi/spi-fsl-qspi.c static void fsl_qspi_prepare_lut(struct fsl_qspi *q, q 407 drivers/spi/spi-fsl-qspi.c void __iomem *base = q->iobase; q 448 drivers/spi/spi-fsl-qspi.c qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY); q 449 drivers/spi/spi-fsl-qspi.c qspi_writel(q, QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR); q 453 drivers/spi/spi-fsl-qspi.c qspi_writel(q, lutval[i], base + QUADSPI_LUT_REG(i)); q 456 drivers/spi/spi-fsl-qspi.c qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY); q 457 drivers/spi/spi-fsl-qspi.c qspi_writel(q, QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR); q 460 drivers/spi/spi-fsl-qspi.c static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q) q 464 drivers/spi/spi-fsl-qspi.c ret = clk_prepare_enable(q->clk_en); q 468 drivers/spi/spi-fsl-qspi.c ret = clk_prepare_enable(q->clk); q 470 drivers/spi/spi-fsl-qspi.c clk_disable_unprepare(q->clk_en); q 474 drivers/spi/spi-fsl-qspi.c if (needs_wakeup_wait_mode(q)) q 475 drivers/spi/spi-fsl-qspi.c pm_qos_add_request(&q->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 0); q 480 drivers/spi/spi-fsl-qspi.c static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q) q 482 drivers/spi/spi-fsl-qspi.c if (needs_wakeup_wait_mode(q)) q 483 drivers/spi/spi-fsl-qspi.c pm_qos_remove_request(&q->pm_qos_req); q 485 drivers/spi/spi-fsl-qspi.c clk_disable_unprepare(q->clk); q 486 drivers/spi/spi-fsl-qspi.c clk_disable_unprepare(q->clk_en); q 496 drivers/spi/spi-fsl-qspi.c static void fsl_qspi_invalidate(struct fsl_qspi *q) q 500 drivers/spi/spi-fsl-qspi.c reg = qspi_readl(q, q->iobase + QUADSPI_MCR); q 502 drivers/spi/spi-fsl-qspi.c qspi_writel(q, reg, q->iobase + QUADSPI_MCR); q 511 drivers/spi/spi-fsl-qspi.c qspi_writel(q, reg, q->iobase + QUADSPI_MCR); q 514 drivers/spi/spi-fsl-qspi.c static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_device *spi) q 519 drivers/spi/spi-fsl-qspi.c if (q->selected == spi->chip_select) q 522 drivers/spi/spi-fsl-qspi.c if (needs_4x_clock(q)) q 525 drivers/spi/spi-fsl-qspi.c fsl_qspi_clk_disable_unprep(q); q 527 drivers/spi/spi-fsl-qspi.c ret = clk_set_rate(q->clk, rate); q 531 drivers/spi/spi-fsl-qspi.c ret = fsl_qspi_clk_prep_enable(q); q 535 drivers/spi/spi-fsl-qspi.c q->selected = spi->chip_select; q 537 drivers/spi/spi-fsl-qspi.c fsl_qspi_invalidate(q); q 540 drivers/spi/spi-fsl-qspi.c static void fsl_qspi_read_ahb(struct fsl_qspi *q, const struct spi_mem_op *op) q 543 drivers/spi/spi-fsl-qspi.c q->ahb_addr + q->selected * q->devtype_data->ahb_buf_size, q 547 drivers/spi/spi-fsl-qspi.c static void fsl_qspi_fill_txfifo(struct fsl_qspi *q, q 550 drivers/spi/spi-fsl-qspi.c void __iomem *base = q->iobase; q 556 drivers/spi/spi-fsl-qspi.c val = fsl_qspi_endian_xchg(q, val); q 557 drivers/spi/spi-fsl-qspi.c qspi_writel(q, val, base + QUADSPI_TBDR); q 562 drivers/spi/spi-fsl-qspi.c val = fsl_qspi_endian_xchg(q, val); q 563 drivers/spi/spi-fsl-qspi.c qspi_writel(q, val, base + QUADSPI_TBDR); q 566 drivers/spi/spi-fsl-qspi.c if (needs_fill_txfifo(q)) { q 568 drivers/spi/spi-fsl-qspi.c qspi_writel(q, 0, base + QUADSPI_TBDR); q 572 drivers/spi/spi-fsl-qspi.c static void fsl_qspi_read_rxfifo(struct fsl_qspi *q, q 575 drivers/spi/spi-fsl-qspi.c void __iomem *base = q->iobase; q 581 drivers/spi/spi-fsl-qspi.c val = qspi_readl(q, base + QUADSPI_RBDR(i / 4)); q 582 drivers/spi/spi-fsl-qspi.c val = fsl_qspi_endian_xchg(q, val); q 587 drivers/spi/spi-fsl-qspi.c val = qspi_readl(q, base + QUADSPI_RBDR(i / 4)); q 588 drivers/spi/spi-fsl-qspi.c val = fsl_qspi_endian_xchg(q, val); q 593 drivers/spi/spi-fsl-qspi.c static int fsl_qspi_do_op(struct fsl_qspi *q, const struct spi_mem_op *op) q 595 drivers/spi/spi-fsl-qspi.c void __iomem *base = q->iobase; q 598 drivers/spi/spi-fsl-qspi.c init_completion(&q->c); q 605 drivers/spi/spi-fsl-qspi.c qspi_writel(q, op->data.nbytes | QUADSPI_IPCR_SEQID(SEQID_LUT), q 609 drivers/spi/spi-fsl-qspi.c if (!wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000))) q 613 drivers/spi/spi-fsl-qspi.c fsl_qspi_read_rxfifo(q, op); q 618 drivers/spi/spi-fsl-qspi.c static int fsl_qspi_readl_poll_tout(struct fsl_qspi *q, void __iomem *base, q 623 drivers/spi/spi-fsl-qspi.c if (!q->devtype_data->little_endian) q 632 drivers/spi/spi-fsl-qspi.c struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master); q 633 drivers/spi/spi-fsl-qspi.c void __iomem *base = q->iobase; q 637 drivers/spi/spi-fsl-qspi.c mutex_lock(&q->lock); q 640 drivers/spi/spi-fsl-qspi.c fsl_qspi_readl_poll_tout(q, base + QUADSPI_SR, (QUADSPI_SR_IP_ACC_MASK | q 643 drivers/spi/spi-fsl-qspi.c fsl_qspi_select_mem(q, mem->spi); q 645 drivers/spi/spi-fsl-qspi.c if (needs_amba_base_offset(q)) q 646 drivers/spi/spi-fsl-qspi.c addr_offset = q->memmap_phy; q 648 drivers/spi/spi-fsl-qspi.c qspi_writel(q, q 649 drivers/spi/spi-fsl-qspi.c q->selected * q->devtype_data->ahb_buf_size + addr_offset, q 652 drivers/spi/spi-fsl-qspi.c qspi_writel(q, qspi_readl(q, base + QUADSPI_MCR) | q 656 drivers/spi/spi-fsl-qspi.c qspi_writel(q, QUADSPI_SPTRCLR_BFPTRC | QUADSPI_SPTRCLR_IPPTRC, q 659 drivers/spi/spi-fsl-qspi.c fsl_qspi_prepare_lut(q, op); q 666 drivers/spi/spi-fsl-qspi.c if (op->data.nbytes > (q->devtype_data->rxfifo - 4) && q 668 drivers/spi/spi-fsl-qspi.c fsl_qspi_read_ahb(q, op); q 670 drivers/spi/spi-fsl-qspi.c qspi_writel(q, QUADSPI_RBCT_WMRK_MASK | q 674 drivers/spi/spi-fsl-qspi.c fsl_qspi_fill_txfifo(q, op); q 676 drivers/spi/spi-fsl-qspi.c err = fsl_qspi_do_op(q, op); q 680 drivers/spi/spi-fsl-qspi.c fsl_qspi_invalidate(q); q 682 drivers/spi/spi-fsl-qspi.c mutex_unlock(&q->lock); q 689 drivers/spi/spi-fsl-qspi.c struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master); q 692 drivers/spi/spi-fsl-qspi.c if (op->data.nbytes > q->devtype_data->txfifo) q 693 drivers/spi/spi-fsl-qspi.c op->data.nbytes = q->devtype_data->txfifo; q 695 drivers/spi/spi-fsl-qspi.c if (op->data.nbytes > q->devtype_data->ahb_buf_size) q 696 drivers/spi/spi-fsl-qspi.c op->data.nbytes = q->devtype_data->ahb_buf_size; q 697 drivers/spi/spi-fsl-qspi.c else if (op->data.nbytes > (q->devtype_data->rxfifo - 4)) q 704 drivers/spi/spi-fsl-qspi.c static int fsl_qspi_default_setup(struct fsl_qspi *q) q 706 drivers/spi/spi-fsl-qspi.c void __iomem *base = q->iobase; q 711 drivers/spi/spi-fsl-qspi.c fsl_qspi_clk_disable_unprep(q); q 714 drivers/spi/spi-fsl-qspi.c ret = clk_set_rate(q->clk, 66000000); q 718 drivers/spi/spi-fsl-qspi.c ret = fsl_qspi_clk_prep_enable(q); q 723 drivers/spi/spi-fsl-qspi.c qspi_writel(q, QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK, q 728 drivers/spi/spi-fsl-qspi.c qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK, q 736 drivers/spi/spi-fsl-qspi.c if (needs_tdh_setting(q)) q 737 drivers/spi/spi-fsl-qspi.c qspi_writel(q, qspi_readl(q, base + QUADSPI_FLSHCR) & q 741 drivers/spi/spi-fsl-qspi.c reg = qspi_readl(q, base + QUADSPI_SMPR); q 742 drivers/spi/spi-fsl-qspi.c qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK q 748 drivers/spi/spi-fsl-qspi.c qspi_writel(q, 0, base + QUADSPI_BUF0IND); q 749 drivers/spi/spi-fsl-qspi.c qspi_writel(q, 0, base + QUADSPI_BUF1IND); q 750 drivers/spi/spi-fsl-qspi.c qspi_writel(q, 0, base + QUADSPI_BUF2IND); q 752 drivers/spi/spi-fsl-qspi.c qspi_writel(q, QUADSPI_BFGENCR_SEQID(SEQID_LUT), q 753 drivers/spi/spi-fsl-qspi.c q->iobase + QUADSPI_BFGENCR); q 754 drivers/spi/spi-fsl-qspi.c qspi_writel(q, QUADSPI_RBCT_WMRK_MASK, base + QUADSPI_RBCT); q 755 drivers/spi/spi-fsl-qspi.c qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK | q 756 drivers/spi/spi-fsl-qspi.c QUADSPI_BUF3CR_ADATSZ(q->devtype_data->ahb_buf_size / 8), q 759 drivers/spi/spi-fsl-qspi.c if (needs_amba_base_offset(q)) q 760 drivers/spi/spi-fsl-qspi.c addr_offset = q->memmap_phy; q 769 drivers/spi/spi-fsl-qspi.c qspi_writel(q, q->devtype_data->ahb_buf_size + addr_offset, q 771 drivers/spi/spi-fsl-qspi.c qspi_writel(q, q->devtype_data->ahb_buf_size * 2 + addr_offset, q 773 drivers/spi/spi-fsl-qspi.c qspi_writel(q, q->devtype_data->ahb_buf_size * 3 + addr_offset, q 775 drivers/spi/spi-fsl-qspi.c qspi_writel(q, q->devtype_data->ahb_buf_size * 4 + addr_offset, q 778 drivers/spi/spi-fsl-qspi.c q->selected = -1; q 781 drivers/spi/spi-fsl-qspi.c qspi_writel(q, QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK, q 785 drivers/spi/spi-fsl-qspi.c qspi_writel(q, 0xffffffff, q->iobase + QUADSPI_FR); q 788 drivers/spi/spi-fsl-qspi.c qspi_writel(q, QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER); q 795 drivers/spi/spi-fsl-qspi.c struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master); q 804 drivers/spi/spi-fsl-qspi.c if (of_get_available_child_count(q->dev->of_node) == 1) q 805 drivers/spi/spi-fsl-qspi.c return dev_name(q->dev); q 808 drivers/spi/spi-fsl-qspi.c "%s-%d", dev_name(q->dev), q 832 drivers/spi/spi-fsl-qspi.c struct fsl_qspi *q; q 835 drivers/spi/spi-fsl-qspi.c ctlr = spi_alloc_master(&pdev->dev, sizeof(*q)); q 842 drivers/spi/spi-fsl-qspi.c q = spi_controller_get_devdata(ctlr); q 843 drivers/spi/spi-fsl-qspi.c q->dev = dev; q 844 drivers/spi/spi-fsl-qspi.c q->devtype_data = of_device_get_match_data(dev); q 845 drivers/spi/spi-fsl-qspi.c if (!q->devtype_data) { q 850 drivers/spi/spi-fsl-qspi.c platform_set_drvdata(pdev, q); q 854 drivers/spi/spi-fsl-qspi.c q->iobase = devm_ioremap_resource(dev, res); q 855 drivers/spi/spi-fsl-qspi.c if (IS_ERR(q->iobase)) { q 856 drivers/spi/spi-fsl-qspi.c ret = PTR_ERR(q->iobase); q 862 drivers/spi/spi-fsl-qspi.c q->ahb_addr = devm_ioremap_resource(dev, res); q 863 drivers/spi/spi-fsl-qspi.c if (IS_ERR(q->ahb_addr)) { q 864 drivers/spi/spi-fsl-qspi.c ret = PTR_ERR(q->ahb_addr); q 868 drivers/spi/spi-fsl-qspi.c q->memmap_phy = res->start; q 871 drivers/spi/spi-fsl-qspi.c q->clk_en = devm_clk_get(dev, "qspi_en"); q 872 drivers/spi/spi-fsl-qspi.c if (IS_ERR(q->clk_en)) { q 873 drivers/spi/spi-fsl-qspi.c ret = PTR_ERR(q->clk_en); q 877 drivers/spi/spi-fsl-qspi.c q->clk = devm_clk_get(dev, "qspi"); q 878 drivers/spi/spi-fsl-qspi.c if (IS_ERR(q->clk)) { q 879 drivers/spi/spi-fsl-qspi.c ret = PTR_ERR(q->clk); q 883 drivers/spi/spi-fsl-qspi.c ret = fsl_qspi_clk_prep_enable(q); q 895 drivers/spi/spi-fsl-qspi.c fsl_qspi_irq_handler, 0, pdev->name, q); q 901 drivers/spi/spi-fsl-qspi.c mutex_init(&q->lock); q 907 drivers/spi/spi-fsl-qspi.c fsl_qspi_default_setup(q); q 918 drivers/spi/spi-fsl-qspi.c mutex_destroy(&q->lock); q 921 drivers/spi/spi-fsl-qspi.c fsl_qspi_clk_disable_unprep(q); q 932 drivers/spi/spi-fsl-qspi.c struct fsl_qspi *q = platform_get_drvdata(pdev); q 935 drivers/spi/spi-fsl-qspi.c qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR); q 936 drivers/spi/spi-fsl-qspi.c qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER); q 938 drivers/spi/spi-fsl-qspi.c fsl_qspi_clk_disable_unprep(q); q 940 drivers/spi/spi-fsl-qspi.c mutex_destroy(&q->lock); q 952 drivers/spi/spi-fsl-qspi.c struct fsl_qspi *q = dev_get_drvdata(dev); q 954 drivers/spi/spi-fsl-qspi.c fsl_qspi_default_setup(q); q 814 drivers/spi/spi-pxa2xx.c unsigned long q, q1, q2; q 860 drivers/spi/spi-pxa2xx.c q = q1; q 864 drivers/spi/spi-pxa2xx.c q = q2; q 885 drivers/spi/spi-pxa2xx.c q = 1; q 891 drivers/spi/spi-pxa2xx.c return q - 1; q 324 drivers/staging/fieldbus/anybuss/host.c ab_task_dequeue_finish_put(struct kfifo *q, struct anybuss_host *cd) q 329 drivers/staging/fieldbus/anybuss/host.c ret = kfifo_out(q, &t, sizeof(t)); q 336 drivers/staging/fieldbus/anybuss/host.c ab_task_enqueue(struct ab_task *t, struct kfifo *q, spinlock_t *slock, q 343 drivers/staging/fieldbus/anybuss/host.c ret = kfifo_in_spinlocked(q, &t, sizeof(t), slock); q 353 drivers/staging/fieldbus/anybuss/host.c ab_task_enqueue_wait(struct ab_task *t, struct kfifo *q, spinlock_t *slock, q 358 drivers/staging/fieldbus/anybuss/host.c ret = ab_task_enqueue(t, q, slock, wq); q 862 drivers/staging/fieldbus/anybuss/host.c static void process_q(struct anybuss_host *cd, struct kfifo *q) q 867 drivers/staging/fieldbus/anybuss/host.c ret = kfifo_out_peek(q, &t, sizeof(t)); q 872 drivers/staging/fieldbus/anybuss/host.c ab_task_dequeue_finish_put(q, cd); q 1226 drivers/staging/fieldbus/anybuss/host.c static int taskq_alloc(struct device *dev, struct kfifo *q) q 1234 drivers/staging/fieldbus/anybuss/host.c return kfifo_init(q, buf, size); q 2145 drivers/staging/media/allegro-dvt/allegro-core.c static int allegro_start_streaming(struct vb2_queue *q, unsigned int count) q 2147 drivers/staging/media/allegro-dvt/allegro-core.c struct allegro_channel *channel = vb2_get_drv_priv(q); q 2152 drivers/staging/media/allegro-dvt/allegro-core.c V4L2_TYPE_IS_OUTPUT(q->type) ? "output" : "capture"); q 2154 drivers/staging/media/allegro-dvt/allegro-core.c if (V4L2_TYPE_IS_OUTPUT(q->type)) { q 2157 drivers/staging/media/allegro-dvt/allegro-core.c } else if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { q 2164 drivers/staging/media/allegro-dvt/allegro-core.c static void allegro_stop_streaming(struct vb2_queue *q) q 2166 drivers/staging/media/allegro-dvt/allegro-core.c struct allegro_channel *channel = vb2_get_drv_priv(q); q 2172 drivers/staging/media/allegro-dvt/allegro-core.c V4L2_TYPE_IS_OUTPUT(q->type) ? "output" : "capture"); q 2174 drivers/staging/media/allegro-dvt/allegro-core.c if (V4L2_TYPE_IS_OUTPUT(q->type)) { q 2178 drivers/staging/media/allegro-dvt/allegro-core.c } else if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { q 382 drivers/staging/media/hantro/hantro.h dma_addr_t hantro_get_ref(struct vb2_queue *q, u64 ts); q 46 drivers/staging/media/hantro/hantro_drv.c dma_addr_t hantro_get_ref(struct vb2_queue *q, u64 ts) q 51 drivers/staging/media/hantro/hantro_drv.c index = vb2_find_timestamp(q, ts, 0); q 54 drivers/staging/media/hantro/hantro_drv.c buf = vb2_get_buffer(q, index); q 180 drivers/staging/media/hantro/hantro_g1_vp8_dec.c const struct v4l2_vp8_quantization_header *q = &hdr->quant_header; q 186 drivers/staging/media/hantro/hantro_g1_vp8_dec.c hantro_reg_write(vpu, &vp8_dec_quant[0], q->y_ac_qi); q 189 drivers/staging/media/hantro/hantro_g1_vp8_dec.c u32 quant = clamp(q->y_ac_qi + seg->quant_update[i], q 200 drivers/staging/media/hantro/hantro_g1_vp8_dec.c hantro_reg_write(vpu, &vp8_dec_quant_delta[0], q->y_dc_delta); q 201 drivers/staging/media/hantro/hantro_g1_vp8_dec.c hantro_reg_write(vpu, &vp8_dec_quant_delta[1], q->y2_dc_delta); q 202 drivers/staging/media/hantro/hantro_g1_vp8_dec.c hantro_reg_write(vpu, &vp8_dec_quant_delta[2], q->y2_ac_delta); q 203 drivers/staging/media/hantro/hantro_g1_vp8_dec.c hantro_reg_write(vpu, &vp8_dec_quant_delta[3], q->uv_dc_delta); q 204 drivers/staging/media/hantro/hantro_g1_vp8_dec.c hantro_reg_write(vpu, &vp8_dec_quant_delta[4], q->uv_ac_delta); q 373 drivers/staging/media/hantro/hantro_g1_vp8_dec.c struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q; q 276 drivers/staging/media/hantro/hantro_h264.c struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q; q 536 drivers/staging/media/hantro/hantro_h264.c struct vb2_queue *cap_q = &ctx->fh.m2m_ctx->cap_q_ctx.q; q 353 drivers/staging/media/hantro/hantro_v4l2.c ctx->fh.m2m_ctx->out_q_ctx.q.requires_requests = false; q 358 drivers/staging/media/hantro/hantro_v4l2.c ctx->fh.m2m_ctx->out_q_ctx.q.requires_requests = true; q 610 drivers/staging/media/hantro/hantro_v4l2.c static bool hantro_vq_is_coded(struct vb2_queue *q) q 612 drivers/staging/media/hantro/hantro_v4l2.c struct hantro_ctx *ctx = vb2_get_drv_priv(q); q 614 drivers/staging/media/hantro/hantro_v4l2.c return hantro_is_encoder_ctx(ctx) != V4L2_TYPE_IS_OUTPUT(q->type); q 617 drivers/staging/media/hantro/hantro_v4l2.c static int hantro_start_streaming(struct vb2_queue *q, unsigned int count) q 619 drivers/staging/media/hantro/hantro_v4l2.c struct hantro_ctx *ctx = vb2_get_drv_priv(q); q 622 drivers/staging/media/hantro/hantro_v4l2.c if (V4L2_TYPE_IS_OUTPUT(q->type)) q 627 drivers/staging/media/hantro/hantro_v4l2.c if (hantro_vq_is_coded(q)) { q 630 drivers/staging/media/hantro/hantro_v4l2.c if (V4L2_TYPE_IS_OUTPUT(q->type)) q 645 drivers/staging/media/hantro/hantro_v4l2.c hantro_return_bufs(struct vb2_queue *q, q 648 drivers/staging/media/hantro/hantro_v4l2.c struct hantro_ctx *ctx = vb2_get_drv_priv(q); q 662 drivers/staging/media/hantro/hantro_v4l2.c static void hantro_stop_streaming(struct vb2_queue *q) q 664 drivers/staging/media/hantro/hantro_v4l2.c struct hantro_ctx *ctx = vb2_get_drv_priv(q); q 666 drivers/staging/media/hantro/hantro_v4l2.c if (hantro_vq_is_coded(q)) { q 676 drivers/staging/media/hantro/hantro_v4l2.c if (V4L2_TYPE_IS_OUTPUT(q->type)) q 677 drivers/staging/media/hantro/hantro_v4l2.c hantro_return_bufs(q, v4l2_m2m_src_buf_remove); q 679 drivers/staging/media/hantro/hantro_v4l2.c hantro_return_bufs(q, v4l2_m2m_dst_buf_remove); q 318 drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c const struct v4l2_vp8_quantization_header *q = &hdr->quant_header; q 324 drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c hantro_reg_write(vpu, &vp8_dec_quant[0], q->y_ac_qi); q 327 drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c u32 quant = clamp(q->y_ac_qi + seg->quant_update[i], q 338 drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c hantro_reg_write(vpu, &vp8_dec_quant_delta[0], q->y_dc_delta); q 339 drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c hantro_reg_write(vpu, &vp8_dec_quant_delta[1], q->y2_dc_delta); q 340 drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c hantro_reg_write(vpu, &vp8_dec_quant_delta[2], q->y2_ac_delta); q 341 drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c hantro_reg_write(vpu, &vp8_dec_quant_delta[3], q->uv_dc_delta); q 342 drivers/staging/media/hantro/rk3399_vpu_hw_vp8_dec.c hantro_reg_write(vpu, &vp8_dec_quant_delta[4], q->uv_ac_delta); q 45 drivers/staging/media/imx/imx-media-capture.c struct vb2_queue q; q 286 drivers/staging/media/imx/imx-media-capture.c if (vb2_is_busy(&priv->q)) { q 325 drivers/staging/media/imx/imx-media-capture.c if (vb2_is_busy(&priv->q)) q 658 drivers/staging/media/imx/imx-media-capture.c struct vb2_queue *vq = &priv->q; q 718 drivers/staging/media/imx/imx-media-capture.c struct vb2_queue *vq = &priv->q; q 736 drivers/staging/media/imx/imx-media-capture.c struct vb2_queue *vq = &priv->q; q 860 drivers/staging/media/imx/imx-media-capture.c vfd->queue = &priv->q; q 498 drivers/staging/media/imx/imx-media-csc-scaler.c static int ipu_csc_scaler_start_streaming(struct vb2_queue *q, q 502 drivers/staging/media/imx/imx-media-csc-scaler.c struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(q); q 510 drivers/staging/media/imx/imx-media-csc-scaler.c (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) ? q 547 drivers/staging/media/imx/imx-media-csc-scaler.c static void ipu_csc_scaler_stop_streaming(struct vb2_queue *q) q 549 drivers/staging/media/imx/imx-media-csc-scaler.c struct ipu_csc_scaler_ctx *ctx = vb2_get_drv_priv(q); q 559 drivers/staging/media/imx/imx-media-csc-scaler.c if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { q 40 drivers/staging/media/ipu3/ipu3-css.c #define IPU3_CSS_QUEUE_TO_FLAGS(q) (1 << (q)) q 181 drivers/staging/media/ipu3/ipu3-css.c static bool imgu_css_queue_enabled(struct imgu_css_queue *q) q 183 drivers/staging/media/ipu3/ipu3-css.c return q->css_fmt; q 1107 drivers/staging/media/ipu3/ipu3-css.c struct imgu_abi_queues __iomem *q = base + IMGU_REG_SP_DMEM_BASE(sp) + q 1110 drivers/staging/media/ipu3/ipu3-css.c return queue >= 0 ? readb(&q->host2sp_bufq_info[thread][queue].end) : q 1111 drivers/staging/media/ipu3/ipu3-css.c readb(&q->host2sp_evtq_info.end); q 1121 drivers/staging/media/ipu3/ipu3-css.c struct imgu_abi_queues __iomem *q = base + IMGU_REG_SP_DMEM_BASE(sp) + q 1126 drivers/staging/media/ipu3/ipu3-css.c size = readb(&q->host2sp_bufq_info[thread][queue].size); q 1127 drivers/staging/media/ipu3/ipu3-css.c start = readb(&q->host2sp_bufq_info[thread][queue].start); q 1128 drivers/staging/media/ipu3/ipu3-css.c end = readb(&q->host2sp_bufq_info[thread][queue].end); q 1130 drivers/staging/media/ipu3/ipu3-css.c size = readb(&q->host2sp_evtq_info.size); q 1131 drivers/staging/media/ipu3/ipu3-css.c start = readb(&q->host2sp_evtq_info.start); q 1132 drivers/staging/media/ipu3/ipu3-css.c end = readb(&q->host2sp_evtq_info.end); q 1143 drivers/staging/media/ipu3/ipu3-css.c writel(data, &q->host2sp_bufq[thread][queue][end]); q 1144 drivers/staging/media/ipu3/ipu3-css.c writeb(end2, &q->host2sp_bufq_info[thread][queue].end); q 1146 drivers/staging/media/ipu3/ipu3-css.c writel(data, &q->host2sp_evtq[end]); q 1147 drivers/staging/media/ipu3/ipu3-css.c writeb(end2, &q->host2sp_evtq_info.end); q 1159 drivers/staging/media/ipu3/ipu3-css.c struct imgu_abi_queues __iomem *q = base + IMGU_REG_SP_DMEM_BASE(sp) + q 1164 drivers/staging/media/ipu3/ipu3-css.c size = readb(&q->sp2host_bufq_info[queue].size); q 1165 drivers/staging/media/ipu3/ipu3-css.c start = readb(&q->sp2host_bufq_info[queue].start); q 1166 drivers/staging/media/ipu3/ipu3-css.c end = readb(&q->sp2host_bufq_info[queue].end); q 1168 drivers/staging/media/ipu3/ipu3-css.c size = readb(&q->sp2host_evtq_info.size); q 1169 drivers/staging/media/ipu3/ipu3-css.c start = readb(&q->sp2host_evtq_info.start); q 1170 drivers/staging/media/ipu3/ipu3-css.c end = readb(&q->sp2host_evtq_info.end); q 1182 drivers/staging/media/ipu3/ipu3-css.c *data = readl(&q->sp2host_bufq[queue][start]); q 1183 drivers/staging/media/ipu3/ipu3-css.c writeb(start2, &q->sp2host_bufq_info[queue].start); q 1187 drivers/staging/media/ipu3/ipu3-css.c *data = readl(&q->sp2host_evtq[start]); q 1188 drivers/staging/media/ipu3/ipu3-css.c writeb(start2, &q->sp2host_evtq_info.start); q 1402 drivers/staging/media/ipu3/ipu3-css.c int q, r, pipe; q 1424 drivers/staging/media/ipu3/ipu3-css.c for (q = 0; q < IPU3_CSS_QUEUES; q++) q 1426 drivers/staging/media/ipu3/ipu3-css.c &css_pipe->queue[q].bufs, q 1439 drivers/staging/media/ipu3/ipu3-css.c int q; q 1443 drivers/staging/media/ipu3/ipu3-css.c for (q = 0; q < IPU3_CSS_QUEUES; q++) q 1444 drivers/staging/media/ipu3/ipu3-css.c if (!list_empty(&css_pipe->queue[q].bufs)) q 1447 drivers/staging/media/ipu3/ipu3-css.c return (q == IPU3_CSS_QUEUES); q 1470 drivers/staging/media/ipu3/ipu3-css.c unsigned int p, q, i; q 1492 drivers/staging/media/ipu3/ipu3-css.c for (q = 0; q < IPU3_CSS_QUEUES; q++) { q 1493 drivers/staging/media/ipu3/ipu3-css.c unsigned int abi_buf_num = ARRAY_SIZE(css_pipe->abi_buffers[q]); q 1497 drivers/staging/media/ipu3/ipu3-css.c &css_pipe->abi_buffers[q][i], q 1514 drivers/staging/media/ipu3/ipu3-css.c unsigned int p, q, i, abi_buf_num; q 1518 drivers/staging/media/ipu3/ipu3-css.c for (q = 0; q < IPU3_CSS_QUEUES; q++) { q 1519 drivers/staging/media/ipu3/ipu3-css.c abi_buf_num = ARRAY_SIZE(css_pipe->abi_buffers[q]); q 1521 drivers/staging/media/ipu3/ipu3-css.c imgu_dmamap_free(imgu, &css_pipe->abi_buffers[q][i]); q 1551 drivers/staging/media/ipu3/ipu3-css.c int r, q, pipe; q 1565 drivers/staging/media/ipu3/ipu3-css.c for (q = 0; q < IPU3_CSS_QUEUES; q++) { q 1566 drivers/staging/media/ipu3/ipu3-css.c r = imgu_css_queue_init(&css_pipe->queue[q], NULL, 0); q 1747 drivers/staging/media/ipu3/ipu3-css.c struct imgu_css_queue *q; q 1751 drivers/staging/media/ipu3/ipu3-css.c q = kcalloc(IPU3_CSS_QUEUES, sizeof(struct imgu_css_queue), GFP_KERNEL); q 1752 drivers/staging/media/ipu3/ipu3-css.c if (!q) q 1755 drivers/staging/media/ipu3/ipu3-css.c in = &q[IPU3_CSS_QUEUE_IN].fmt.mpix; q 1756 drivers/staging/media/ipu3/ipu3-css.c out = &q[IPU3_CSS_QUEUE_OUT].fmt.mpix; q 1757 drivers/staging/media/ipu3/ipu3-css.c vf = &q[IPU3_CSS_QUEUE_VF].fmt.mpix; q 1768 drivers/staging/media/ipu3/ipu3-css.c if (imgu_css_queue_init(&q[i], fmts[i], q 1792 drivers/staging/media/ipu3/ipu3-css.c if (!imgu_css_queue_enabled(&q[IPU3_CSS_QUEUE_IN]) || q 1793 drivers/staging/media/ipu3/ipu3-css.c !imgu_css_queue_enabled(&q[IPU3_CSS_QUEUE_OUT])) { q 1799 drivers/staging/media/ipu3/ipu3-css.c if (!imgu_css_queue_enabled(&q[IPU3_CSS_QUEUE_OUT])) { q 1834 drivers/staging/media/ipu3/ipu3-css.c ret = imgu_css_find_binary(css, pipe, q, r); q 1848 drivers/staging/media/ipu3/ipu3-css.c if (imgu_css_queue_init(&q[i], &q[i].fmt.mpix, q 1855 drivers/staging/media/ipu3/ipu3-css.c *fmts[i] = q[i].fmt.mpix; q 1871 drivers/staging/media/ipu3/ipu3-css.c kfree(q); q 164 drivers/staging/media/meson/vdec/vdec.c static void process_num_buffers(struct vb2_queue *q, q 170 drivers/staging/media/meson/vdec/vdec.c unsigned int buffers_total = q->num_buffers + *num_buffers; q 173 drivers/staging/media/meson/vdec/vdec.c *num_buffers = fmt_out->min_buffers - q->num_buffers; q 175 drivers/staging/media/meson/vdec/vdec.c *num_buffers = fmt_out->max_buffers - q->num_buffers; q 182 drivers/staging/media/meson/vdec/vdec.c sess->num_dst_bufs = q->num_buffers + *num_buffers; q 183 drivers/staging/media/meson/vdec/vdec.c q->min_buffers_needed = max(fmt_out->min_buffers, sess->num_dst_bufs); q 186 drivers/staging/media/meson/vdec/vdec.c static int vdec_queue_setup(struct vb2_queue *q, unsigned int *num_buffers, q 190 drivers/staging/media/meson/vdec/vdec.c struct amvdec_session *sess = vb2_get_drv_priv(q); q 194 drivers/staging/media/meson/vdec/vdec.c switch (q->type) { q 218 drivers/staging/media/meson/vdec/vdec.c process_num_buffers(q, sess, num_buffers, false); q 225 drivers/staging/media/meson/vdec/vdec.c switch (q->type) { q 247 drivers/staging/media/meson/vdec/vdec.c process_num_buffers(q, sess, num_buffers, true); q 274 drivers/staging/media/meson/vdec/vdec.c static int vdec_start_streaming(struct vb2_queue *q, unsigned int count) q 276 drivers/staging/media/meson/vdec/vdec.c struct amvdec_session *sess = vb2_get_drv_priv(q); q 287 drivers/staging/media/meson/vdec/vdec.c if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) q 296 drivers/staging/media/meson/vdec/vdec.c q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { q 344 drivers/staging/media/meson/vdec/vdec.c if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) q 382 drivers/staging/media/meson/vdec/vdec.c static void vdec_stop_streaming(struct vb2_queue *q) q 384 drivers/staging/media/meson/vdec/vdec.c struct amvdec_session *sess = vb2_get_drv_priv(q); q 406 drivers/staging/media/meson/vdec/vdec.c if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { q 446 drivers/staging/media/meson/vdec/vdec_helpers.c vb2_queue_error(&sess->m2m_ctx->cap_q_ctx.q); q 447 drivers/staging/media/meson/vdec/vdec_helpers.c vb2_queue_error(&sess->m2m_ctx->out_q_ctx.q); q 1098 drivers/staging/media/omap4iss/iss_video.c struct vb2_queue *q; q 1120 drivers/staging/media/omap4iss/iss_video.c q = &handle->queue; q 1122 drivers/staging/media/omap4iss/iss_video.c q->type = video->type; q 1123 drivers/staging/media/omap4iss/iss_video.c q->io_modes = VB2_MMAP | VB2_DMABUF; q 1124 drivers/staging/media/omap4iss/iss_video.c q->drv_priv = handle; q 1125 drivers/staging/media/omap4iss/iss_video.c q->ops = &iss_video_vb2ops; q 1126 drivers/staging/media/omap4iss/iss_video.c q->mem_ops = &vb2_dma_contig_memops; q 1127 drivers/staging/media/omap4iss/iss_video.c q->buf_struct_size = sizeof(struct iss_buffer); q 1128 drivers/staging/media/omap4iss/iss_video.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1129 drivers/staging/media/omap4iss/iss_video.c q->dev = video->iss->dev; q 1131 drivers/staging/media/omap4iss/iss_video.c ret = vb2_queue_init(q); q 182 drivers/staging/media/omap4iss/iss_video.h #define iss_video_queue_to_iss_video_fh(q) \ q 183 drivers/staging/media/omap4iss/iss_video.h container_of(q, struct iss_video_fh, queue) q 1399 drivers/staging/qlge/qlge.h struct tx_ring_desc *q; /* descriptor list for the queue */ q 1671 drivers/staging/qlge/qlge_dbg.c pr_err("tx_ring->q = %p\n", tx_ring->q); q 2112 drivers/staging/qlge/qlge_main.c tx_ring_desc = &tx_ring->q[mac_rsp->tid]; q 2656 drivers/staging/qlge/qlge_main.c tx_ring_desc = &tx_ring->q[tx_ring->prod_idx]; q 2771 drivers/staging/qlge/qlge_main.c tx_ring_desc = tx_ring->q; q 2790 drivers/staging/qlge/qlge_main.c kfree(tx_ring->q); q 2791 drivers/staging/qlge/qlge_main.c tx_ring->q = NULL; q 2805 drivers/staging/qlge/qlge_main.c tx_ring->q = q 2808 drivers/staging/qlge/qlge_main.c if (tx_ring->q == NULL) q 3066 drivers/staging/qlge/qlge_main.c tx_ring_desc = &tx_ring->q[i]; q 1841 drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c struct vb2_queue *q; q 1916 drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c q = &dev->capture.vb_vidq; q 1917 drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c memset(q, 0, sizeof(*q)); q 1918 drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q 1919 drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ; q 1920 drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c q->drv_priv = dev; q 1921 drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c q->buf_struct_size = sizeof(struct mmal_buffer); q 1922 drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c q->ops = &bm2835_mmal_video_qops; q 1923 drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c q->mem_ops = &vb2_vmalloc_memops; q 1924 drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 1925 drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c q->lock = &dev->mutex; q 1926 drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c ret = vb2_queue_init(q); q 1289 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c struct list_head *q, *buf_head; q 1310 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c list_for_each_safe(buf_head, q, &port->buffers) { q 1336 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c struct list_head *q, *buf_head; q 1352 drivers/staging/vc04_services/bcm2835-camera/mmal-vchiq.c list_for_each_safe(buf_head, q, &port->buffers) { q 75 drivers/staging/vt6655/device.h #define AVAIL_TD(p, q) ((p)->opts.tx_descs[(q)] - ((p)->iTDUsed[(q)])) q 35 drivers/staging/vt6655/tmacro.h #define LODWORD(q) ((q).u.dwLowDword) q 36 drivers/staging/vt6655/tmacro.h #define HIDWORD(q) ((q).u.dwHighDword) q 816 drivers/target/target_core_device.c struct request_queue *q) q 818 drivers/target/target_core_device.c int block_size = queue_logical_block_size(q); q 820 drivers/target/target_core_device.c if (!blk_queue_discard(q)) q 824 drivers/target/target_core_device.c q->limits.max_discard_sectors >> (ilog2(block_size) - 9); q 829 drivers/target/target_core_device.c attrib->unmap_granularity = q->limits.discard_granularity / block_size; q 830 drivers/target/target_core_device.c attrib->unmap_granularity_alignment = q->limits.discard_alignment / q 832 drivers/target/target_core_device.c attrib->unmap_zeroes_data = (q->limits.max_write_zeroes_sectors); q 136 drivers/target/target_core_file.c struct request_queue *q = bdev_get_queue(inode->i_bdev); q 152 drivers/target/target_core_file.c if (target_configure_unmap_from_queue(&dev->dev_attrib, q)) q 161 drivers/target/target_core_file.c if (blk_queue_nonrot(q)) q 72 drivers/target/target_core_iblock.c struct request_queue *q; q 106 drivers/target/target_core_iblock.c q = bdev_get_queue(bd); q 109 drivers/target/target_core_iblock.c dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); q 110 drivers/target/target_core_iblock.c dev->dev_attrib.hw_queue_depth = q->nr_requests; q 112 drivers/target/target_core_iblock.c if (target_configure_unmap_from_queue(&dev->dev_attrib, q)) q 126 drivers/target/target_core_iblock.c if (blk_queue_nonrot(q)) q 194 drivers/target/target_core_iblock.c struct request_queue *q) q 697 drivers/target/target_core_iblock.c struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd); q 703 drivers/target/target_core_iblock.c if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) { q 706 drivers/target/target_core_iblock.c else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) q 796 drivers/target/target_core_iblock.c struct request_queue *q = bdev_get_queue(bd); q 798 drivers/target/target_core_iblock.c return iblock_emulate_read_cap_with_block_size(dev, bd, q); q 857 drivers/target/target_core_iblock.c struct request_queue *q = bdev_get_queue(bd); q 859 drivers/target/target_core_iblock.c return test_bit(QUEUE_FLAG_WC, &q->queue_flags); q 291 drivers/target/target_core_pscsi.c struct request_queue *q = sd->request_queue; q 306 drivers/target/target_core_pscsi.c min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q)); q 20 drivers/tty/hvc/hvsi_lib.c struct hvsi_query q; q 29 drivers/tty/hvc/hvsi_lib.c q.hdr.type = VS_QUERY_PACKET_HEADER; q 30 drivers/tty/hvc/hvsi_lib.c q.hdr.len = sizeof(struct hvsi_query); q 31 drivers/tty/hvc/hvsi_lib.c q.verb = cpu_to_be16(VSV_SEND_VERSION_NUMBER); q 32 drivers/tty/hvc/hvsi_lib.c hvsi_send_packet(pv, &q.hdr); q 259 drivers/tty/hvc/hvsi_lib.c struct hvsi_query q; q 266 drivers/tty/hvc/hvsi_lib.c q.hdr.type = VS_QUERY_PACKET_HEADER; q 267 drivers/tty/hvc/hvsi_lib.c q.hdr.len = sizeof(struct hvsi_query); q 268 drivers/tty/hvc/hvsi_lib.c q.verb = cpu_to_be16(VSV_SEND_MODEM_CTL_STATUS); q 269 drivers/tty/hvc/hvsi_lib.c rc = hvsi_send_packet(pv, &q.hdr); q 185 drivers/tty/synclink_gt.c wait_queue_head_t q; q 2319 drivers/tty/synclink_gt.c wake_up_interruptible(&w->q); q 2992 drivers/tty/synclink_gt.c init_waitqueue_head(&w->q); q 3000 drivers/tty/synclink_gt.c add_wait_queue(&w->q, &w->wait); q 3008 drivers/tty/synclink_gt.c remove_wait_queue(&cw->q, &cw->wait); q 3024 drivers/tty/synclink_gt.c wake_up_interruptible(&(*head)->q); q 203 drivers/tty/vt/consolemap.c unsigned char *q; q 206 drivers/tty/vt/consolemap.c q = p->inverse_translations[i]; q 208 drivers/tty/vt/consolemap.c if (!q) { q 209 drivers/tty/vt/consolemap.c q = p->inverse_translations[i] = kmalloc(MAX_GLYPH, GFP_KERNEL); q 210 drivers/tty/vt/consolemap.c if (!q) return; q 212 drivers/tty/vt/consolemap.c memset(q, 0, MAX_GLYPH); q 216 drivers/tty/vt/consolemap.c if (glyph >= 0 && glyph < MAX_GLYPH && q[glyph] < 32) { q 218 drivers/tty/vt/consolemap.c q[glyph] = j; q 228 drivers/tty/vt/consolemap.c u16 *q; q 231 drivers/tty/vt/consolemap.c q = p->inverse_trans_unicode; q 232 drivers/tty/vt/consolemap.c if (!q) { q 233 drivers/tty/vt/consolemap.c q = p->inverse_trans_unicode = q 235 drivers/tty/vt/consolemap.c if (!q) q 238 drivers/tty/vt/consolemap.c memset(q, 0, MAX_GLYPH * sizeof(u16)); q 251 drivers/tty/vt/consolemap.c && q[glyph] < 32) q 252 drivers/tty/vt/consolemap.c q[glyph] = (i << 11) + (j << 6) + k; q 300 drivers/tty/vt/consolemap.c struct uni_pagedir *p, *q = NULL; q 306 drivers/tty/vt/consolemap.c if (p && p != q) { q 309 drivers/tty/vt/consolemap.c q = p; q 437 drivers/tty/vt/consolemap.c struct uni_pagedir *q; q 442 drivers/tty/vt/consolemap.c q = *vc_cons[i].d->vc_uni_pagedir_loc; q 443 drivers/tty/vt/consolemap.c if (!q || q == p || q->sum != p->sum) q 447 drivers/tty/vt/consolemap.c p1 = p->uni_pgdir[j]; q1 = q->uni_pgdir[j]; q 464 drivers/tty/vt/consolemap.c q->refcount++; q 465 drivers/tty/vt/consolemap.c *conp->vc_uni_pagedir_loc = q; q 506 drivers/tty/vt/consolemap.c struct uni_pagedir *p, *q; q 510 drivers/tty/vt/consolemap.c q = kzalloc(sizeof(*p), GFP_KERNEL); q 511 drivers/tty/vt/consolemap.c if (!q) { q 516 drivers/tty/vt/consolemap.c q->refcount=1; q 517 drivers/tty/vt/consolemap.c *vc->vc_uni_pagedir_loc = q; q 539 drivers/tty/vt/consolemap.c struct uni_pagedir *p, *q; q 574 drivers/tty/vt/consolemap.c q = *vc->vc_uni_pagedir_loc; q 595 drivers/tty/vt/consolemap.c err1 = con_insert_unipair(q, l, p2[k]); q 599 drivers/tty/vt/consolemap.c con_release_unimap(q); q 600 drivers/tty/vt/consolemap.c kfree(q); q 618 drivers/tty/vt/consolemap.c p = q; q 662 drivers/tty/vt/consolemap.c u16 *q; q 686 drivers/tty/vt/consolemap.c q = dfont_unitable; q 690 drivers/tty/vt/consolemap.c err1 = con_insert_unipair(p, *(q++), i); q 717 drivers/tty/vt/consolemap.c struct uni_pagedir *q; q 724 drivers/tty/vt/consolemap.c q = *src_vc->vc_uni_pagedir_loc; q 725 drivers/tty/vt/consolemap.c q->refcount++; q 726 drivers/tty/vt/consolemap.c *dst_vc->vc_uni_pagedir_loc = q; q 1998 drivers/tty/vt/keyboard.c u_char *q; q 2053 drivers/tty/vt/keyboard.c q = func_table[i]; q 2064 drivers/tty/vt/keyboard.c delta = (q ? -strlen(q) : 1) + strlen(kbs->kb_string); q 2074 drivers/tty/vt/keyboard.c if (!q) q 2093 drivers/tty/vt/keyboard.c if (!q) q 669 drivers/tty/vt/vt.c u16 *q = p; q 672 drivers/tty/vt/vt.c if (p > q) q 673 drivers/tty/vt/vt.c vc->vc_sw->con_putcs(vc, q, p-q, yy, startx); q 675 drivers/tty/vt/vt.c q = p; q 682 drivers/tty/vt/vt.c if (p > q) q 683 drivers/tty/vt/vt.c vc->vc_sw->con_putcs(vc, q, p-q, yy, startx); q 771 drivers/tty/vt/vt.c u16 *q = p; q 777 drivers/tty/vt/vt.c a = scr_readw(q); q 779 drivers/tty/vt/vt.c scr_writew(a, q); q 780 drivers/tty/vt/vt.c q++; q 784 drivers/tty/vt/vt.c a = scr_readw(q); q 786 drivers/tty/vt/vt.c scr_writew(a, q); q 787 drivers/tty/vt/vt.c q++; q 791 drivers/tty/vt/vt.c a = scr_readw(q); q 793 drivers/tty/vt/vt.c scr_writew(a, q); q 794 drivers/tty/vt/vt.c q++; q 665 drivers/usb/core/devio.c struct list_head *p, *q, hitlist; q 670 drivers/usb/core/devio.c list_for_each_safe(p, q, &ps->async_pending) q 2368 drivers/usb/host/ehci-sched.c union ehci_shadow q, *q_p; q 2392 drivers/usb/host/ehci-sched.c q.ptr = q_p->ptr; q 2396 drivers/usb/host/ehci-sched.c while (q.ptr != NULL) { q 2408 drivers/usb/host/ehci-sched.c if (q.itd->hw_transaction[uf] & q 2413 drivers/usb/host/ehci-sched.c q_p = &q.itd->itd_next; q 2414 drivers/usb/host/ehci-sched.c hw_p = &q.itd->hw_next; q 2416 drivers/usb/host/ehci-sched.c q.itd->hw_next); q 2417 drivers/usb/host/ehci-sched.c q = *q_p; q 2428 drivers/usb/host/ehci-sched.c *q_p = q.itd->itd_next; q 2430 drivers/usb/host/ehci-sched.c q.itd->hw_next != EHCI_LIST_END(ehci)) q 2431 drivers/usb/host/ehci-sched.c *hw_p = q.itd->hw_next; q 2434 drivers/usb/host/ehci-sched.c type = Q_NEXT_TYPE(ehci, q.itd->hw_next); q 2436 drivers/usb/host/ehci-sched.c modified = itd_complete(ehci, q.itd); q 2437 drivers/usb/host/ehci-sched.c q = *q_p; q 2449 drivers/usb/host/ehci-sched.c && (q.sitd->hw_results & SITD_ACTIVE(ehci))) { q 2451 drivers/usb/host/ehci-sched.c q_p = &q.sitd->sitd_next; q 2452 drivers/usb/host/ehci-sched.c hw_p = &q.sitd->hw_next; q 2453 drivers/usb/host/ehci-sched.c type = Q_NEXT_TYPE(ehci, q.sitd->hw_next); q 2454 drivers/usb/host/ehci-sched.c q = *q_p; q 2463 drivers/usb/host/ehci-sched.c *q_p = q.sitd->sitd_next; q 2465 drivers/usb/host/ehci-sched.c q.sitd->hw_next != EHCI_LIST_END(ehci)) q 2466 drivers/usb/host/ehci-sched.c *hw_p = q.sitd->hw_next; q 2469 drivers/usb/host/ehci-sched.c type = Q_NEXT_TYPE(ehci, q.sitd->hw_next); q 2471 drivers/usb/host/ehci-sched.c modified = sitd_complete(ehci, q.sitd); q 2472 drivers/usb/host/ehci-sched.c q = *q_p; q 2476 drivers/usb/host/ehci-sched.c type, frame, q.ptr); q 2482 drivers/usb/host/ehci-sched.c q.ptr = NULL; q 3342 drivers/usb/host/fotg210-hcd.c union fotg210_shadow *q = &fotg210->pshadow[frame]; q 3346 drivers/usb/host/fotg210-hcd.c while (q->ptr) { q 3349 drivers/usb/host/fotg210-hcd.c hw = q->qh->hw; q 3352 drivers/usb/host/fotg210-hcd.c usecs += q->qh->usecs; q 3356 drivers/usb/host/fotg210-hcd.c usecs += q->qh->c_usecs; q 3358 drivers/usb/host/fotg210-hcd.c q = &q->qh->qh_next; q 3365 drivers/usb/host/fotg210-hcd.c if (q->fstn->hw_prev != FOTG210_LIST_END(fotg210)) q 3368 drivers/usb/host/fotg210-hcd.c hw_p = &q->fstn->hw_next; q 3369 drivers/usb/host/fotg210-hcd.c q = &q->fstn->fstn_next; q 3372 drivers/usb/host/fotg210-hcd.c if (q->itd->hw_transaction[uframe]) q 3373 drivers/usb/host/fotg210-hcd.c usecs += q->itd->stream->usecs; q 3374 drivers/usb/host/fotg210-hcd.c hw_p = &q->itd->hw_next; q 3375 drivers/usb/host/fotg210-hcd.c q = &q->itd->itd_next; q 4587 drivers/usb/host/fotg210-hcd.c union fotg210_shadow q, *q_p; q 4593 drivers/usb/host/fotg210-hcd.c q.ptr = q_p->ptr; q 4597 drivers/usb/host/fotg210-hcd.c while (q.ptr) { q 4608 drivers/usb/host/fotg210-hcd.c if (q.itd->hw_transaction[uf] & q 4613 drivers/usb/host/fotg210-hcd.c q_p = &q.itd->itd_next; q 4614 drivers/usb/host/fotg210-hcd.c hw_p = &q.itd->hw_next; q 4616 drivers/usb/host/fotg210-hcd.c q.itd->hw_next); q 4617 drivers/usb/host/fotg210-hcd.c q = *q_p; q 4627 drivers/usb/host/fotg210-hcd.c *q_p = q.itd->itd_next; q 4628 drivers/usb/host/fotg210-hcd.c *hw_p = q.itd->hw_next; q 4629 drivers/usb/host/fotg210-hcd.c type = Q_NEXT_TYPE(fotg210, q.itd->hw_next); q 4631 drivers/usb/host/fotg210-hcd.c modified = itd_complete(fotg210, q.itd); q 4632 drivers/usb/host/fotg210-hcd.c q = *q_p; q 4636 drivers/usb/host/fotg210-hcd.c type, frame, q.ptr); q 4641 drivers/usb/host/fotg210-hcd.c q.ptr = NULL; q 2271 drivers/usb/host/oxu210hp-hcd.c union ehci_shadow *q = &oxu->pshadow[frame]; q 2274 drivers/usb/host/oxu210hp-hcd.c while (q->ptr) { q 2279 drivers/usb/host/oxu210hp-hcd.c if (q->qh->hw_info2 & cpu_to_le32(1 << uframe)) q 2280 drivers/usb/host/oxu210hp-hcd.c usecs += q->qh->usecs; q 2282 drivers/usb/host/oxu210hp-hcd.c if (q->qh->hw_info2 & cpu_to_le32(1 << (8 + uframe))) q 2283 drivers/usb/host/oxu210hp-hcd.c usecs += q->qh->c_usecs; q 2284 drivers/usb/host/oxu210hp-hcd.c hw_p = &q->qh->hw_next; q 2285 drivers/usb/host/oxu210hp-hcd.c q = &q->qh->qh_next; q 2693 drivers/usb/host/oxu210hp-hcd.c union ehci_shadow q, *q_p; q 2707 drivers/usb/host/oxu210hp-hcd.c q.ptr = q_p->ptr; q 2711 drivers/usb/host/oxu210hp-hcd.c while (q.ptr != NULL) { q 2717 drivers/usb/host/oxu210hp-hcd.c temp.qh = qh_get(q.qh); q 2718 drivers/usb/host/oxu210hp-hcd.c type = Q_NEXT_TYPE(q.qh->hw_next); q 2719 drivers/usb/host/oxu210hp-hcd.c q = q.qh->qh_next; q 2727 drivers/usb/host/oxu210hp-hcd.c type, frame, q.ptr); q 2728 drivers/usb/host/oxu210hp-hcd.c q.ptr = NULL; q 46 drivers/usb/musb/musb_host.h static inline struct musb_qh *first_qh(struct list_head *q) q 48 drivers/usb/musb/musb_host.h if (list_empty(q)) q 50 drivers/usb/musb/musb_host.h return list_entry(q->next, struct musb_qh, ring); q 345 drivers/usb/serial/digi_acceleport.c wait_queue_head_t *q, long timeout, q 351 drivers/usb/serial/digi_acceleport.c prepare_to_wait(q, &wait, TASK_INTERRUPTIBLE); q 354 drivers/usb/serial/digi_acceleport.c finish_wait(q, &wait); q 551 drivers/vhost/scsi.c struct vhost_scsi_virtqueue *q; q 553 drivers/vhost/scsi.c q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq); q 554 drivers/vhost/scsi.c vq = q - vs->vqs; q 204 drivers/video/console/mdacon.c u16 *q, q_save; q 209 drivers/video/console/mdacon.c q = mda_vram_base + 0x01000 / 2; q 212 drivers/video/console/mdacon.c q_save = scr_readw(q); q 230 drivers/video/console/mdacon.c scr_writew(0xA55A, q); q 232 drivers/video/console/mdacon.c if (scr_readw(q) == 0xA55A) q 235 drivers/video/console/mdacon.c scr_writew(0x5AA5, q); q 237 drivers/video/console/mdacon.c if (scr_readw(q) == 0x5AA5) q 241 drivers/video/console/mdacon.c scr_writew(q_save, q); q 650 drivers/video/console/sticore.c unsigned char *n, *p, *q; q 657 drivers/video/console/sticore.c q = (unsigned char *)f->raw; q 659 drivers/video/console/sticore.c *p = *q++; q 211 drivers/video/fbdev/aty/mach64_ct.c u32 q; q 216 drivers/video/fbdev/aty/mach64_ct.c q = par->ref_clk_per * pll->pll_ref_div * 4 / vclk_per; q 217 drivers/video/fbdev/aty/mach64_ct.c if (q < 16*8 || q > 255*8) { q 221 drivers/video/fbdev/aty/mach64_ct.c pll->vclk_post_div = (q < 128*8); q 222 drivers/video/fbdev/aty/mach64_ct.c pll->vclk_post_div += (q < 64*8); q 223 drivers/video/fbdev/aty/mach64_ct.c pll->vclk_post_div += (q < 32*8); q 227 drivers/video/fbdev/aty/mach64_ct.c pll->vclk_fb_div = q * pll->vclk_post_div_real / 8; q 404 drivers/video/fbdev/aty/mach64_ct.c u32 q, memcntl, trp; q 527 drivers/video/fbdev/aty/mach64_ct.c q = par->ref_clk_per * pll->ct.pll_ref_div * 8 / q 530 drivers/video/fbdev/aty/mach64_ct.c if (q < 16*8 || q > 255*8) { q 534 drivers/video/fbdev/aty/mach64_ct.c xpost_div = (q < 128*8); q 535 drivers/video/fbdev/aty/mach64_ct.c xpost_div += (q < 64*8); q 536 drivers/video/fbdev/aty/mach64_ct.c xpost_div += (q < 32*8); q 539 drivers/video/fbdev/aty/mach64_ct.c pll->ct.mclk_fb_div = q * pll->ct.xclk_post_div_real / 8; q 578 drivers/video/fbdev/aty/mach64_ct.c q = par->ref_clk_per * pll->ct.pll_ref_div * 4 / par->mclk_per; q 579 drivers/video/fbdev/aty/mach64_ct.c if (q < 16*8 || q > 255*8) { q 583 drivers/video/fbdev/aty/mach64_ct.c mpost_div = (q < 128*8); q 584 drivers/video/fbdev/aty/mach64_ct.c mpost_div += (q < 64*8); q 585 drivers/video/fbdev/aty/mach64_ct.c mpost_div += (q < 32*8); q 588 drivers/video/fbdev/aty/mach64_ct.c pll->ct.sclk_fb_div = q * sclk_post_div_real / 8; q 608 drivers/video/fbdev/core/fbcon.c unsigned short *save = NULL, *r, *q; q 624 drivers/video/fbdev/core/fbcon.c q = (unsigned short *) (vc->vc_origin + q 627 drivers/video/fbdev/core/fbcon.c for (r = q - logo_lines * cols; r < q; r++) q 630 drivers/video/fbdev/core/fbcon.c if (r != q && new_rows >= rows + logo_lines) { q 636 drivers/video/fbdev/core/fbcon.c r = q - step; q 639 drivers/video/fbdev/core/fbcon.c r = q; q 642 drivers/video/fbdev/core/fbcon.c if (r == q) { q 644 drivers/video/fbdev/core/fbcon.c r = q - step - cols; q 669 drivers/video/fbdev/core/fbcon.c q = (unsigned short *) (vc->vc_origin + q 672 drivers/video/fbdev/core/fbcon.c scr_memcpyw(q, save, logo_lines * new_cols * 2); q 2858 drivers/video/fbdev/core/fbcon.c unsigned long p, q; q 2862 drivers/video/fbdev/core/fbcon.c q = vc->vc_origin + q 2870 drivers/video/fbdev/core/fbcon.c q -= vc->vc_size_row; q 2871 drivers/video/fbdev/core/fbcon.c scr_memcpyw((u16 *) q, (u16 *) p, q 282 drivers/video/fbdev/hgafb.c void __iomem *p, *q; q 299 drivers/video/fbdev/hgafb.c q = hga_vram + 0x01000; q 301 drivers/video/fbdev/hgafb.c p_save = readw(p); q_save = readw(q); q 847 drivers/video/fbdev/matrox/matroxfb_DAC1064.c u_int32_t q; q 416 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c unsigned itc, ec, q, sc; q 438 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c q = (ptr[2] >> 2) & 0x3; q 454 drivers/video/fbdev/omap2/omapfb/dss/hdmi5_core.c (itc << 7) | (ec << 4) | (q << 2) | (sc << 0)); q 473 drivers/virt/fsl_hypervisor.c uint32_t q[QSIZE]; q 500 drivers/virt/fsl_hypervisor.c dbq->q[dbq->tail] = doorbell; q 638 drivers/virt/fsl_hypervisor.c dbell = dbq->q[dbq->head]; q 148 drivers/visorbus/visorchannel.c static int sig_queue_offset(struct channel_header *chan_hdr, int q) q 151 drivers/visorbus/visorchannel.c ((q) * sizeof(struct signal_queue_header))); q 158 drivers/visorbus/visorchannel.c static int sig_data_offset(struct channel_header *chan_hdr, int q, q 161 drivers/visorbus/visorchannel.c return (sig_queue_offset(chan_hdr, q) + sig_hdr->sig_base_offset + q 105 drivers/xen/events/events_fifo.c struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); q 112 drivers/xen/events/events_fifo.c q->head[i] = 0; q 287 drivers/xen/events/events_fifo.c struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu); q 292 drivers/xen/events/events_fifo.c head = q->head[priority]; q 324 drivers/xen/events/events_fifo.c q->head[priority] = head; q 331 drivers/xen/events/events_fifo.c unsigned q; q 338 drivers/xen/events/events_fifo.c q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES); q 339 drivers/xen/events/events_fifo.c consume_one_event(cpu, control_block, q, &ready, drop); q 705 drivers/xen/gntdev-dmabuf.c struct gntdev_dmabuf *q, *gntdev_dmabuf, *ret = ERR_PTR(-ENOENT); q 708 drivers/xen/gntdev-dmabuf.c list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) { q 750 drivers/xen/gntdev-dmabuf.c struct gntdev_dmabuf *q, *gntdev_dmabuf; q 752 drivers/xen/gntdev-dmabuf.c list_for_each_entry_safe(gntdev_dmabuf, q, &priv->imp_list, next) q 136 fs/afs/addr_list.c const char *q, *stop; q 148 fs/afs/addr_list.c q = memchr(p, ']', end - p); q 150 fs/afs/addr_list.c for (q = p; q < end; q++) q 151 fs/afs/addr_list.c if (*q == '+' || *q == delim) q 155 fs/afs/addr_list.c if (in4_pton(p, q - p, (u8 *)&x[0], -1, &stop)) { q 157 fs/afs/addr_list.c } else if (in6_pton(p, q - p, (u8 *)x, -1, &stop)) { q 164 fs/afs/addr_list.c p = q; q 170 fs/afs/addr_list.c if (q < end && *q == ']') q 101 fs/autofs/expire.c struct dentry *q; q 105 fs/autofs/expire.c q = positive_after(root, prev); q 109 fs/autofs/expire.c return q; q 294 fs/block_dev.c struct request_queue *q = bdev_get_queue(bdev); q 296 fs/block_dev.c return blk_poll(q, READ_ONCE(kiocb->ki_cookie), wait); q 3621 fs/btrfs/disk-io.c struct request_queue *q = bdev_get_queue(device->bdev); q 3624 fs/btrfs/disk-io.c if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) q 487 fs/btrfs/ioctl.c struct request_queue *q; q 511 fs/btrfs/ioctl.c q = bdev_get_queue(device->bdev); q 512 fs/btrfs/ioctl.c if (blk_queue_discard(q)) { q 514 fs/btrfs/ioctl.c minlen = min_t(u64, q->limits.discard_granularity, q 785 fs/btrfs/volumes.c struct request_queue *q; q 829 fs/btrfs/volumes.c q = bdev_get_queue(bdev); q 830 fs/btrfs/volumes.c if (!blk_queue_nonrot(q)) q 2595 fs/btrfs/volumes.c struct request_queue *q; q 2655 fs/btrfs/volumes.c q = bdev_get_queue(bdev); q 2696 fs/btrfs/volumes.c if (!blk_queue_nonrot(q)) q 901 fs/ceph/caps.c struct rb_node *q; q 905 fs/ceph/caps.c for (q = rb_first(&ci->i_caps); q != p; q 906 fs/ceph/caps.c q = rb_next(q)) { q 907 fs/ceph/caps.c cap = rb_entry(q, struct ceph_cap, q 807 fs/cifs/dir.c static int cifs_ci_hash(const struct dentry *dentry, struct qstr *q) q 815 fs/cifs/dir.c for (i = 0; i < q->len; i += charlen) { q 816 fs/cifs/dir.c charlen = codepage->char2uni(&q->name[i], q->len - i, &c); q 822 fs/cifs/dir.c q->hash = end_name_hash(hash); q 1079 fs/compat_ioctl.c static int __init init_sys32_ioctl_cmp(const void *p, const void *q) q 1083 fs/compat_ioctl.c b = *(unsigned int *)q; q 1629 fs/configfs/dir.c struct list_head *p, *q = &cursor->s_sibling; q 1636 fs/configfs/dir.c list_move(q, &parent_sd->s_children); q 1637 fs/configfs/dir.c for (p = q->next; p != &parent_sd->s_children; p = p->next) { q 1676 fs/configfs/dir.c list_move(q, p); q 1677 fs/configfs/dir.c p = q; q 1825 fs/dcache.c struct qstr q; q 1827 fs/dcache.c q.name = name; q 1828 fs/dcache.c q.hash_len = hashlen_string(parent, name); q 1829 fs/dcache.c return d_alloc(parent, &q); q 89 fs/efivarfs/super.c struct qstr q; q 92 fs/efivarfs/super.c q.name = name; q 93 fs/efivarfs/super.c q.len = strlen(name); q 95 fs/efivarfs/super.c err = efivarfs_d_hash(parent, &q); q 99 fs/efivarfs/super.c d = d_alloc(parent, &q); q 1151 fs/erofs/zdata.c struct z_erofs_unzip_io *q[], q 1159 fs/erofs/zdata.c q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, true); q 1160 fs/erofs/zdata.c qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head; q 1162 fs/erofs/zdata.c q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, forcefg); q 1163 fs/erofs/zdata.c qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head; q 1165 fs/erofs/zdata.c return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], !forcefg)); q 1187 fs/erofs/zdata.c static bool postsubmit_is_all_bypassed(struct z_erofs_unzip_io *q[], q 1198 fs/erofs/zdata.c kvfree(container_of(q[JQ_SUBMIT], struct z_erofs_unzip_io_sb, io)); q 1210 fs/erofs/zdata.c struct z_erofs_unzip_io *q[NR_JOBQUEUES]; q 1224 fs/erofs/zdata.c bi_private = jobqueueset_init(sb, qtail, q, fgq, force_fg); q 1227 fs/erofs/zdata.c q[JQ_SUBMIT]->head = owned_head; q 1300 fs/erofs/zdata.c if (postsubmit_is_all_bypassed(q, nr_bios, force_fg)) q 1006 fs/ext2/inode.c static inline int all_zeroes(__le32 *p, __le32 *q) q 1008 fs/ext2/inode.c while (p < q) q 1107 fs/ext2/inode.c static inline void ext2_free_data(struct inode *inode, __le32 *p, __le32 *q) q 1112 fs/ext2/inode.c for ( ; p < q ; p++) { q 1147 fs/ext2/inode.c static void ext2_free_branches(struct inode *inode, __le32 *p, __le32 *q, int depth) q 1154 fs/ext2/inode.c for ( ; p < q ; p++) { q 1179 fs/ext2/inode.c ext2_free_data(inode, p, q); q 720 fs/ext4/indirect.c static inline int all_zeroes(__le32 *p, __le32 *q) q 722 fs/ext4/indirect.c while (p < q) q 1102 fs/ext4/ioctl.c struct request_queue *q = bdev_get_queue(sb->s_bdev); q 1109 fs/ext4/ioctl.c if (!blk_queue_discard(q)) q 1124 fs/ext4/ioctl.c q->limits.discard_granularity); q 754 fs/ext4/namei.c struct dx_entry *at, *entries, *p, *q, *m; q 824 fs/ext4/namei.c q = entries + count - 1; q 825 fs/ext4/namei.c while (p <= q) { q 826 fs/ext4/namei.c m = p + (q - p) / 2; q 829 fs/ext4/namei.c q = m - 1; q 1235 fs/ext4/namei.c struct dx_map_entry *p, *q, *top = map + count - 1; q 1242 fs/ext4/namei.c for (p = top, q = p - count; q >= map; p--, q--) q 1243 fs/ext4/namei.c if (p->hash < q->hash) q 1244 fs/ext4/namei.c swap(*p, *q); q 1249 fs/ext4/namei.c q = top; q 1250 fs/ext4/namei.c while (q-- > map) { q 1251 fs/ext4/namei.c if (q[1].hash >= q[0].hash) q 1253 fs/ext4/namei.c swap(*(q+1), *q); q 4640 fs/ext4/super.c struct request_queue *q = bdev_get_queue(sb->s_bdev); q 4641 fs/ext4/super.c if (!blk_queue_discard(q)) q 2125 fs/f2fs/file.c struct request_queue *q = bdev_get_queue(sb->s_bdev); q 2144 fs/f2fs/file.c q->limits.discard_granularity); q 600 fs/f2fs/segment.c wait_queue_head_t *q = &fcc->flush_wait_queue; q 629 fs/f2fs/segment.c wait_event_interruptible(*q, q 1119 fs/f2fs/segment.c struct request_queue *q = bdev_get_queue(bdev); q 1121 fs/f2fs/segment.c SECTOR_TO_BLOCK(q->limits.max_discard_sectors); q 1301 fs/f2fs/segment.c struct request_queue *q = bdev_get_queue(bdev); q 1303 fs/f2fs/segment.c SECTOR_TO_BLOCK(q->limits.max_discard_sectors); q 1695 fs/f2fs/segment.c wait_queue_head_t *q = &dcc->discard_wait_queue; q 1706 fs/f2fs/segment.c wait_event_interruptible_timeout(*q, q 130 fs/fat/file.c struct request_queue *q = bdev_get_queue(sb->s_bdev); q 136 fs/fat/file.c if (!blk_queue_discard(q)) q 144 fs/fat/file.c q->limits.discard_granularity); q 1878 fs/fat/inode.c struct request_queue *q = bdev_get_queue(sb->s_bdev); q 1879 fs/fat/inode.c if (!blk_queue_discard(q)) q 394 fs/fs_context.c char *q; q 407 fs/fs_context.c q = kvasprintf(GFP_KERNEL, fmt, va); q 409 fs/fs_context.c if (!q) q 420 fs/fs_context.c q = kstrdup(p, GFP_KERNEL); q 426 fs/fs_context.c q = (char *)p; q 432 fs/fs_context.c printk(KERN_WARNING "%s\n", q + 2); q 435 fs/fs_context.c printk(KERN_ERR "%s\n", q + 2); q 438 fs/fs_context.c printk(KERN_NOTICE "%s\n", q + 2); q 442 fs/fs_context.c kfree(q); q 457 fs/fs_context.c log->buffer[index] = q; q 88 fs/fs_pin.c struct hlist_node *q; q 90 fs/fs_pin.c q = READ_ONCE(p->first); q 91 fs/fs_pin.c if (!q) { q 95 fs/fs_pin.c pin_kill(hlist_entry(q, struct fs_pin, s_list)); q 816 fs/gfs2/quota.c struct gfs2_quota q; q 826 fs/gfs2/quota.c memset(&q, 0, sizeof(struct gfs2_quota)); q 827 fs/gfs2/quota.c err = gfs2_internal_read(ip, (char *)&q, &loc, sizeof(q)); q 831 fs/gfs2/quota.c loc -= sizeof(q); /* gfs2_internal_read would've advanced the loc ptr */ q 833 fs/gfs2/quota.c be64_add_cpu(&q.qu_value, change); q 834 fs/gfs2/quota.c if (((s64)be64_to_cpu(q.qu_value)) < 0) q 835 fs/gfs2/quota.c q.qu_value = 0; /* Never go negative on quota usage */ q 836 fs/gfs2/quota.c qd->qd_qb.qb_value = q.qu_value; q 839 fs/gfs2/quota.c q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift); q 840 fs/gfs2/quota.c qd->qd_qb.qb_warn = q.qu_warn; q 843 fs/gfs2/quota.c q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift); q 844 fs/gfs2/quota.c qd->qd_qb.qb_limit = q.qu_limit; q 847 fs/gfs2/quota.c q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift); q 848 fs/gfs2/quota.c qd->qd_qb.qb_value = q.qu_value; q 852 fs/gfs2/quota.c err = gfs2_write_disk_quota(ip, &q, loc); q 966 fs/gfs2/quota.c struct gfs2_quota q; q 971 fs/gfs2/quota.c memset(&q, 0, sizeof(struct gfs2_quota)); q 973 fs/gfs2/quota.c error = gfs2_internal_read(ip, (char *)&q, &pos, sizeof(q)); q 980 fs/gfs2/quota.c qlvb->qb_limit = q.qu_limit; q 981 fs/gfs2/quota.c qlvb->qb_warn = q.qu_warn; q 982 fs/gfs2/quota.c qlvb->qb_value = q.qu_value; q 1397 fs/gfs2/rgrp.c struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev); q 1413 fs/gfs2/rgrp.c if (!blk_queue_discard(q)) q 1426 fs/gfs2/rgrp.c q->limits.discard_granularity) >> bs_shift; q 122 fs/hpfs/alloc.c unsigned i, q; q 138 fs/hpfs/alloc.c q = nr + n; b = 0; q 139 fs/hpfs/alloc.c while ((a = tstbits(bmp, q, n + forward)) != 0) { q 140 fs/hpfs/alloc.c q += a; q 141 fs/hpfs/alloc.c if (n != 1) q = ((q-1)&~(n-1))+n; q 143 fs/hpfs/alloc.c if (q>>5 != nr>>5) { q 145 fs/hpfs/alloc.c q = nr & 0x1f; q 147 fs/hpfs/alloc.c } else if (q > nr) break; q 150 fs/hpfs/alloc.c ret = bs + q; q 159 fs/hpfs/alloc.c q = i<<5; q 163 fs/hpfs/alloc.c q--; k <<= 1; q 166 fs/hpfs/alloc.c if (n != 1) q = ((q-1)&~(n-1))+n; q 167 fs/hpfs/alloc.c while ((a = tstbits(bmp, q, n + forward)) != 0) { q 168 fs/hpfs/alloc.c q += a; q 169 fs/hpfs/alloc.c if (n != 1) q = ((q-1)&~(n-1))+n; q 170 fs/hpfs/alloc.c if (q>>5 > i) break; q 173 fs/hpfs/alloc.c ret = bs + q; q 289 fs/hpfs/ea.c secno q = hpfs_alloc_sector(s, fno, 1, 0); q 290 fs/hpfs/ea.c if (!q) goto bail; q 291 fs/hpfs/ea.c fnode->ea_secno = cpu_to_le32(q); q 2099 fs/inode.c DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP); q 2102 fs/inode.c prepare_to_wait(wq, &q.wq_entry, TASK_UNINTERRUPTIBLE); q 2106 fs/inode.c finish_wait(wq, &q.wq_entry); q 53 fs/iomap/direct-io.c struct request_queue *q = READ_ONCE(kiocb->private); q 55 fs/iomap/direct-io.c if (!q) q 57 fs/iomap/direct-io.c return blk_poll(q, READ_ONCE(kiocb->ki_cookie), spin); q 39 fs/jffs2/compr_rubin.c unsigned long q; q 92 fs/jffs2/compr_rubin.c rs->q = 0; q 108 fs/jffs2/compr_rubin.c while ((rs->q >= UPPER_BIT_RUBIN) || q 109 fs/jffs2/compr_rubin.c ((rs->p + rs->q) <= UPPER_BIT_RUBIN)) { q 112 fs/jffs2/compr_rubin.c ret = pushbit(&rs->pp, (rs->q & UPPER_BIT_RUBIN) ? 1 : 0, 0); q 115 fs/jffs2/compr_rubin.c rs->q &= LOWER_BITS_RUBIN; q 116 fs/jffs2/compr_rubin.c rs->q <<= 1; q 132 fs/jffs2/compr_rubin.c rs->q += i0; q 144 fs/jffs2/compr_rubin.c pushbit(&rs->pp, (UPPER_BIT_RUBIN & rs->q) ? 1 : 0, 1); q 145 fs/jffs2/compr_rubin.c rs->q &= LOWER_BITS_RUBIN; q 146 fs/jffs2/compr_rubin.c rs->q <<= 1; q 164 fs/jffs2/compr_rubin.c unsigned long q) q 177 fs/jffs2/compr_rubin.c q &= lower_bits_rubin; q 178 fs/jffs2/compr_rubin.c q <<= 1; q 180 fs/jffs2/compr_rubin.c } while ((q >= UPPER_BIT_RUBIN) || ((p + q) <= UPPER_BIT_RUBIN)); q 183 fs/jffs2/compr_rubin.c rs->q = q; q 202 fs/jffs2/compr_rubin.c unsigned long p = rs->p, q = rs->q; q 206 fs/jffs2/compr_rubin.c if (q >= UPPER_BIT_RUBIN || ((p + q) <= UPPER_BIT_RUBIN)) q 207 fs/jffs2/compr_rubin.c __do_decode(rs, p, q); q 216 fs/jffs2/compr_rubin.c threshold = rs->q + i0; q 219 fs/jffs2/compr_rubin.c rs->q += i0; q 125 fs/jfs/ioctl.c struct request_queue *q = bdev_get_queue(sb->s_bdev); q 132 fs/jfs/ioctl.c if (!blk_queue_discard(q)) { q 142 fs/jfs/ioctl.c q->limits.discard_granularity); q 377 fs/jfs/super.c struct request_queue *q = bdev_get_queue(sb->s_bdev); q 383 fs/jfs/super.c if (blk_queue_discard(q)) q 396 fs/jfs/super.c struct request_queue *q = bdev_get_queue(sb->s_bdev); q 399 fs/jfs/super.c if (blk_queue_discard(q)) { q 209 fs/minix/itree_common.c static inline int all_zeroes(block_t *p, block_t *q) q 211 fs/minix/itree_common.c while (p < q) q 257 fs/minix/itree_common.c static inline void free_data(struct inode *inode, block_t *p, block_t *q) q 261 fs/minix/itree_common.c for ( ; p < q ; p++) { q 270 fs/minix/itree_common.c static void free_branches(struct inode *inode, block_t *p, block_t *q, int depth) q 276 fs/minix/itree_common.c for ( ; p < q ; p++) { q 291 fs/minix/itree_common.c free_data(inode, p, q); q 1752 fs/namespace.c struct mount *res, *p, *q, *r, *parent; q 1760 fs/namespace.c res = q = clone_mnt(mnt, dentry, flag); q 1761 fs/namespace.c if (IS_ERR(q)) q 1762 fs/namespace.c return q; q 1764 fs/namespace.c q->mnt_mountpoint = mnt->mnt_mountpoint; q 1777 fs/namespace.c q = ERR_PTR(-EPERM); q 1791 fs/namespace.c q = q->mnt_parent; q 1794 fs/namespace.c parent = q; q 1795 fs/namespace.c q = clone_mnt(p, p->mnt.mnt_root, flag); q 1796 fs/namespace.c if (IS_ERR(q)) q 1799 fs/namespace.c list_add_tail(&q->mnt_list, &res->mnt_list); q 1800 fs/namespace.c attach_mnt(q, parent, p->mnt_mp); q 1811 fs/namespace.c return q; q 2094 fs/namespace.c struct mount *q; q 2096 fs/namespace.c q = __lookup_mnt(&child->mnt_parent->mnt, q 2098 fs/namespace.c if (q) q 2099 fs/namespace.c mnt_change_mountpoint(child, smp, q); q 3217 fs/namespace.c struct mount *p, *q; q 3260 fs/namespace.c q = new; q 3262 fs/namespace.c q->mnt_ns = new_ns; q 3266 fs/namespace.c new_fs->root.mnt = mntget(&q->mnt); q 3270 fs/namespace.c new_fs->pwd.mnt = mntget(&q->mnt); q 3275 fs/namespace.c q = next_mnt(q, new); q 3276 fs/namespace.c if (!q) q 3278 fs/namespace.c while (p->mnt.mnt_root != q->mnt.mnt_root) q 7117 fs/nfs/nfs4proc.c wait_queue_head_t *q = &clp->cl_lock_waitq; q 7135 fs/nfs/nfs4proc.c add_wait_queue(q, &wait); q 7138 fs/nfs/nfs4proc.c finish_wait(q, &wait); q 7146 fs/nfs/nfs4proc.c finish_wait(q, &wait); q 217 fs/nfsd/blocklayout.c struct request_queue *q = bdev->bd_disk->queue; q 231 fs/nfsd/blocklayout.c if (WARN_ON_ONCE(!blk_queue_scsi_passthrough(q))) q 239 fs/nfsd/blocklayout.c rq = blk_get_request(q, REQ_OP_SCSI_IN, 0); q 246 fs/nfsd/blocklayout.c error = blk_rq_map_kern(q, rq, buf, bufflen, GFP_KERNEL); q 257 fs/nfsd/blocklayout.c blk_execute_rq(rq->q, NULL, rq, 1); q 1071 fs/nilfs2/ioctl.c struct request_queue *q = bdev_get_queue(nilfs->ns_bdev); q 1078 fs/nilfs2/ioctl.c if (!blk_queue_discard(q)) q 1084 fs/nilfs2/ioctl.c range.minlen = max_t(u64, range.minlen, q->limits.discard_granularity); q 921 fs/ocfs2/ioctl.c struct request_queue *q = bdev_get_queue(sb->s_bdev); q 928 fs/ocfs2/ioctl.c if (!blk_queue_discard(q)) q 934 fs/ocfs2/ioctl.c range.minlen = max_t(u64, q->limits.discard_granularity, q 13 fs/orangefs/orangefs-bufmap.c wait_queue_head_t q; q 20 fs/orangefs/orangefs-bufmap.c .q = __WAIT_QUEUE_HEAD_INITIALIZER(rw_map.q) q 24 fs/orangefs/orangefs-bufmap.c .q = __WAIT_QUEUE_HEAD_INITIALIZER(readdir_map.q) q 30 fs/orangefs/orangefs-bufmap.c spin_lock(&m->q.lock); q 33 fs/orangefs/orangefs-bufmap.c wake_up_all_locked(&m->q); q 34 fs/orangefs/orangefs-bufmap.c spin_unlock(&m->q.lock); q 39 fs/orangefs/orangefs-bufmap.c spin_lock(&m->q.lock); q 41 fs/orangefs/orangefs-bufmap.c spin_unlock(&m->q.lock); q 47 fs/orangefs/orangefs-bufmap.c spin_lock(&m->q.lock); q 51 fs/orangefs/orangefs-bufmap.c __add_wait_queue_entry_tail(&m->q, &wait); q 57 fs/orangefs/orangefs-bufmap.c spin_unlock(&m->q.lock); q 59 fs/orangefs/orangefs-bufmap.c spin_lock(&m->q.lock); q 61 fs/orangefs/orangefs-bufmap.c __remove_wait_queue(&m->q, &wait); q 65 fs/orangefs/orangefs-bufmap.c spin_unlock(&m->q.lock); q 71 fs/orangefs/orangefs-bufmap.c spin_lock(&m->q.lock); q 75 fs/orangefs/orangefs-bufmap.c wake_up_locked(&m->q); q 77 fs/orangefs/orangefs-bufmap.c wake_up_all_locked(&m->q); q 78 fs/orangefs/orangefs-bufmap.c spin_unlock(&m->q.lock); q 89 fs/orangefs/orangefs-bufmap.c __add_wait_queue_entry_tail_exclusive(&m->q, &wait); q 101 fs/orangefs/orangefs-bufmap.c spin_unlock(&m->q.lock); q 103 fs/orangefs/orangefs-bufmap.c spin_lock(&m->q.lock); q 114 fs/orangefs/orangefs-bufmap.c else if (left <= 0 && waitqueue_active(&m->q)) q 115 fs/orangefs/orangefs-bufmap.c __wake_up_locked_key(&m->q, TASK_INTERRUPTIBLE, NULL); q 127 fs/orangefs/orangefs-bufmap.c spin_lock(&m->q.lock); q 135 fs/orangefs/orangefs-bufmap.c spin_unlock(&m->q.lock); q 501 fs/proc/base.c int q; q 504 fs/proc/base.c for (q = 0; q < LT_BACKTRACEDEPTH; q++) { q 505 fs/proc/base.c unsigned long bt = lr->backtrace[q]; q 269 fs/sysv/itree.c static inline int all_zeroes(sysv_zone_t *p, sysv_zone_t *q) q 271 fs/sysv/itree.c while (p < q) q 326 fs/sysv/itree.c static inline void free_data(struct inode *inode, sysv_zone_t *p, sysv_zone_t *q) q 328 fs/sysv/itree.c for ( ; p < q ; p++) { q 338 fs/sysv/itree.c static void free_branches(struct inode *inode, sysv_zone_t *p, sysv_zone_t *q, int depth) q 344 fs/sysv/itree.c for ( ; p < q ; p++) { q 361 fs/sysv/itree.c free_data(inode, p, q); q 131 fs/ufs/inode.c Indirect chain[4], *q = chain; q 149 fs/ufs/inode.c if (!grow_chain32(ufsi, NULL, &ufsi->i_u1.i_data[*p++], chain, q)) q 151 fs/ufs/inode.c if (!q->key32) q 159 fs/ufs/inode.c fs32_to_cpu(sb, q->key32) + (n>>shift)); q 163 fs/ufs/inode.c if (!grow_chain32(ufsi, bh, ptr, chain, ++q)) q 165 fs/ufs/inode.c if (!q->key32) q 168 fs/ufs/inode.c res = fs32_to_cpu(sb, q->key32); q 172 fs/ufs/inode.c if (!grow_chain64(ufsi, NULL, &ufsi->i_u1.u2_i_data[*p++], chain, q)) q 174 fs/ufs/inode.c if (!q->key64) q 183 fs/ufs/inode.c fs64_to_cpu(sb, q->key64) + (n>>shift)); q 187 fs/ufs/inode.c if (!grow_chain64(ufsi, bh, ptr, chain, ++q)) q 189 fs/ufs/inode.c if (!q->key64) q 192 fs/ufs/inode.c res = fs64_to_cpu(sb, q->key64); q 196 fs/ufs/inode.c while (q > chain) { q 197 fs/ufs/inode.c brelse(q->bh); q 198 fs/ufs/inode.c q--; q 203 fs/ufs/inode.c while (q > chain) { q 204 fs/ufs/inode.c brelse(q->bh); q 205 fs/ufs/inode.c q--; q 150 fs/xfs/xfs_discard.c struct request_queue *q = bdev_get_queue(mp->m_ddev_targp->bt_bdev); q 151 fs/xfs/xfs_discard.c unsigned int granularity = q->limits.discard_granularity; q 160 fs/xfs/xfs_discard.c if (!blk_queue_discard(q)) q 72 fs/xfs/xfs_dquot.c struct xfs_quotainfo *q = mp->m_quotainfo; q 78 fs/xfs/xfs_dquot.c defq = xfs_get_defquota(dq, q); q 214 fs/xfs/xfs_dquot.c struct xfs_quotainfo *q = mp->m_quotainfo; q 227 fs/xfs/xfs_dquot.c curid = id - (id % q->qi_dqperchunk); q 228 fs/xfs/xfs_dquot.c memset(d, 0, BBTOB(q->qi_dqchunklen)); q 229 fs/xfs/xfs_dquot.c for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) { q 245 fs/xfs/xfs_dquot.c xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1); q 39 fs/xfs/xfs_qm_syscalls.c struct xfs_quotainfo *q = mp->m_quotainfo; q 62 fs/xfs/xfs_qm_syscalls.c ASSERT(q); q 63 fs/xfs/xfs_qm_syscalls.c mutex_lock(&q->qi_quotaofflock); q 74 fs/xfs/xfs_qm_syscalls.c mutex_unlock(&q->qi_quotaofflock); q 179 fs/xfs/xfs_qm_syscalls.c mutex_unlock(&q->qi_quotaofflock); q 187 fs/xfs/xfs_qm_syscalls.c if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) { q 188 fs/xfs/xfs_qm_syscalls.c xfs_irele(q->qi_uquotaip); q 189 fs/xfs/xfs_qm_syscalls.c q->qi_uquotaip = NULL; q 191 fs/xfs/xfs_qm_syscalls.c if ((dqtype & XFS_QMOPT_GQUOTA) && q->qi_gquotaip) { q 192 fs/xfs/xfs_qm_syscalls.c xfs_irele(q->qi_gquotaip); q 193 fs/xfs/xfs_qm_syscalls.c q->qi_gquotaip = NULL; q 195 fs/xfs/xfs_qm_syscalls.c if ((dqtype & XFS_QMOPT_PQUOTA) && q->qi_pquotaip) { q 196 fs/xfs/xfs_qm_syscalls.c xfs_irele(q->qi_pquotaip); q 197 fs/xfs/xfs_qm_syscalls.c q->qi_pquotaip = NULL; q 201 fs/xfs/xfs_qm_syscalls.c mutex_unlock(&q->qi_quotaofflock); q 385 fs/xfs/xfs_qm_syscalls.c struct xfs_quotainfo *q = mp->m_quotainfo; q 403 fs/xfs/xfs_qm_syscalls.c mutex_lock(&q->qi_quotaofflock); q 418 fs/xfs/xfs_qm_syscalls.c defq = xfs_get_defquota(dqp, q); q 502 fs/xfs/xfs_qm_syscalls.c q->qi_btimelimit = newlim->d_spc_timer; q 506 fs/xfs/xfs_qm_syscalls.c q->qi_itimelimit = newlim->d_ino_timer; q 510 fs/xfs/xfs_qm_syscalls.c q->qi_rtbtimelimit = newlim->d_rt_spc_timer; q 514 fs/xfs/xfs_qm_syscalls.c q->qi_bwarnlimit = newlim->d_spc_warns; q 516 fs/xfs/xfs_qm_syscalls.c q->qi_iwarnlimit = newlim->d_ino_warns; q 518 fs/xfs/xfs_qm_syscalls.c q->qi_rtbwarnlimit = newlim->d_rt_spc_warns; q 537 fs/xfs/xfs_qm_syscalls.c mutex_unlock(&q->qi_quotaofflock); q 26 fs/xfs/xfs_quotaops.c struct xfs_quotainfo *q = mp->m_quotainfo; q 40 fs/xfs/xfs_quotaops.c tstate->spc_timelimit = q->qi_btimelimit; q 41 fs/xfs/xfs_quotaops.c tstate->ino_timelimit = q->qi_itimelimit; q 42 fs/xfs/xfs_quotaops.c tstate->rt_spc_timelimit = q->qi_rtbtimelimit; q 43 fs/xfs/xfs_quotaops.c tstate->spc_warnlimit = q->qi_bwarnlimit; q 44 fs/xfs/xfs_quotaops.c tstate->ino_warnlimit = q->qi_iwarnlimit; q 45 fs/xfs/xfs_quotaops.c tstate->rt_spc_warnlimit = q->qi_rtbwarnlimit; q 60 fs/xfs/xfs_quotaops.c struct xfs_quotainfo *q = mp->m_quotainfo; q 65 fs/xfs/xfs_quotaops.c state->s_incoredqs = q->qi_dquots; q 79 fs/xfs/xfs_quotaops.c xfs_qm_fill_state(&state->s_state[USRQUOTA], mp, q->qi_uquotaip, q 81 fs/xfs/xfs_quotaops.c xfs_qm_fill_state(&state->s_state[GRPQUOTA], mp, q->qi_gquotaip, q 83 fs/xfs/xfs_quotaops.c xfs_qm_fill_state(&state->s_state[PRJQUOTA], mp, q->qi_pquotaip, q 1702 fs/xfs/xfs_super.c struct request_queue *q = bdev_get_queue(sb->s_bdev); q 1704 fs/xfs/xfs_super.c if (!blk_queue_discard(q)) { q 282 fs/xfs/xfs_trans_dquot.c struct xfs_dqtrx *q) q 284 fs/xfs/xfs_trans_dquot.c ASSERT(q[0].qt_dquot != NULL); q 285 fs/xfs/xfs_trans_dquot.c if (q[1].qt_dquot == NULL) { q 286 fs/xfs/xfs_trans_dquot.c xfs_dqlock(q[0].qt_dquot); q 287 fs/xfs/xfs_trans_dquot.c xfs_trans_dqjoin(tp, q[0].qt_dquot); q 290 fs/xfs/xfs_trans_dquot.c xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot); q 291 fs/xfs/xfs_trans_dquot.c xfs_trans_dqjoin(tp, q[0].qt_dquot); q 292 fs/xfs/xfs_trans_dquot.c xfs_trans_dqjoin(tp, q[1].qt_dquot); q 588 fs/xfs/xfs_trans_dquot.c xfs_quotainfo_t *q = mp->m_quotainfo; q 594 fs/xfs/xfs_trans_dquot.c defq = xfs_get_defquota(dqp, q); q 833 fs/xfs/xfs_trans_dquot.c xfs_qoff_logitem_t *q; q 837 fs/xfs/xfs_trans_dquot.c q = xfs_qm_qoff_logitem_init(tp->t_mountp, startqoff, flags); q 838 fs/xfs/xfs_trans_dquot.c ASSERT(q != NULL); q 843 fs/xfs/xfs_trans_dquot.c xfs_trans_add_item(tp, &q->qql_item); q 844 fs/xfs/xfs_trans_dquot.c return q; q 64 include/crypto/b128ops.h static inline void u128_xor(u128 *r, const u128 *p, const u128 *q) q 66 include/crypto/b128ops.h r->a = p->a ^ q->a; q 67 include/crypto/b128ops.h r->b = p->b ^ q->b; q 70 include/crypto/b128ops.h static inline void be128_xor(be128 *r, const be128 *p, const be128 *q) q 72 include/crypto/b128ops.h u128_xor((u128 *)r, (u128 *)p, (u128 *)q); q 75 include/crypto/b128ops.h static inline void le128_xor(le128 *r, const le128 *p, const le128 *q) q 77 include/crypto/b128ops.h u128_xor((u128 *)r, (u128 *)p, (u128 *)q); q 37 include/crypto/dh.h void *q; q 36 include/crypto/internal/rsa.h const u8 *q; q 455 include/linux/bio.h void generic_start_io_acct(struct request_queue *q, int op, q 457 include/linux/bio.h void generic_end_io_acct(struct request_queue *q, int op, q 110 include/linux/blk-cgroup.h struct request_queue *q; q 188 include/linux/blk-cgroup.h struct request_queue *q, bool update_hint); q 190 include/linux/blk-cgroup.h struct request_queue *q); q 192 include/linux/blk-cgroup.h struct request_queue *q); q 193 include/linux/blk-cgroup.h int blkcg_init_queue(struct request_queue *q); q 194 include/linux/blk-cgroup.h void blkcg_drain_queue(struct request_queue *q); q 195 include/linux/blk-cgroup.h void blkcg_exit_queue(struct request_queue *q); q 200 include/linux/blk-cgroup.h int blkcg_activate_policy(struct request_queue *q, q 202 include/linux/blk-cgroup.h void blkcg_deactivate_policy(struct request_queue *q, q 359 include/linux/blk-cgroup.h struct request_queue *q, q 365 include/linux/blk-cgroup.h return q->root_blkg; q 368 include/linux/blk-cgroup.h if (blkg && blkg->q == q) q 371 include/linux/blk-cgroup.h return blkg_lookup_slowpath(blkcg, q, update_hint); q 383 include/linux/blk-cgroup.h struct request_queue *q) q 386 include/linux/blk-cgroup.h return __blkg_lookup(blkcg, q, false); q 395 include/linux/blk-cgroup.h static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) q 397 include/linux/blk-cgroup.h return q->root_blkg; q 564 include/linux/blk-cgroup.h (p_blkg)->q, false))) q 579 include/linux/blk-cgroup.h (p_blkg)->q, false))) q 704 include/linux/blk-cgroup.h extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, q 707 include/linux/blk-cgroup.h static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg, q 726 include/linux/blk-cgroup.h static inline bool blkcg_bio_issue_check(struct request_queue *q, q 745 include/linux/blk-cgroup.h throtl = blk_throtl_bio(q, blkg, bio); q 816 include/linux/blk-cgroup.h void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay); q 842 include/linux/blk-cgroup.h static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { } q 845 include/linux/blk-cgroup.h static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q) q 847 include/linux/blk-cgroup.h static inline int blkcg_init_queue(struct request_queue *q) { return 0; } q 848 include/linux/blk-cgroup.h static inline void blkcg_drain_queue(struct request_queue *q) { } q 849 include/linux/blk-cgroup.h static inline void blkcg_exit_queue(struct request_queue *q) { } q 852 include/linux/blk-cgroup.h static inline int blkcg_activate_policy(struct request_queue *q, q 854 include/linux/blk-cgroup.h static inline void blkcg_deactivate_policy(struct request_queue *q, q 869 include/linux/blk-cgroup.h static inline bool blkcg_bio_issue_check(struct request_queue *q, q 872 include/linux/blk-cgroup.h #define blk_queue_for_each_rl(rl, q) \ q 873 include/linux/blk-cgroup.h for ((rl) = &(q)->root_rl; (rl); (rl) = NULL) q 360 include/linux/blk-mq.h for ((i) = 0; (i) < (q)->nr_hw_queues && \ q 361 include/linux/blk-mq.h ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) q 379 include/linux/blk-mq.h if (rq->q->mq_ops->cleanup_rq) q 380 include/linux/blk-mq.h rq->q->mq_ops->cleanup_rq(rq); q 13 include/linux/blk-pm.h extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev); q 14 include/linux/blk-pm.h extern int blk_pre_runtime_suspend(struct request_queue *q); q 15 include/linux/blk-pm.h extern void blk_post_runtime_suspend(struct request_queue *q, int err); q 16 include/linux/blk-pm.h extern void blk_pre_runtime_resume(struct request_queue *q); q 17 include/linux/blk-pm.h extern void blk_post_runtime_resume(struct request_queue *q, int err); q 18 include/linux/blk-pm.h extern void blk_set_runtime_active(struct request_queue *q); q 20 include/linux/blk-pm.h static inline void blk_pm_runtime_init(struct request_queue *q, q 133 include/linux/blkdev.h struct request_queue *q; q 290 include/linux/blkdev.h typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio); q 626 include/linux/blkdev.h void blk_queue_flag_set(unsigned int flag, struct request_queue *q); q 627 include/linux/blkdev.h void blk_queue_flag_clear(unsigned int flag, struct request_queue *q); q 628 include/linux/blkdev.h bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); q 630 include/linux/blkdev.h #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) q 631 include/linux/blkdev.h #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) q 632 include/linux/blkdev.h #define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) q 633 include/linux/blkdev.h #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags) q 634 include/linux/blkdev.h #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) q 635 include/linux/blkdev.h #define blk_queue_noxmerges(q) \ q 636 include/linux/blkdev.h test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) q 637 include/linux/blkdev.h #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) q 638 include/linux/blkdev.h #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) q 639 include/linux/blkdev.h #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) q 640 include/linux/blkdev.h #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) q 641 include/linux/blkdev.h #define blk_queue_zone_resetall(q) \ q 642 include/linux/blkdev.h test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags) q 643 include/linux/blkdev.h #define blk_queue_secure_erase(q) \ q 644 include/linux/blkdev.h (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags)) q 645 include/linux/blkdev.h #define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags) q 646 include/linux/blkdev.h #define blk_queue_scsi_passthrough(q) \ q 647 include/linux/blkdev.h test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags) q 648 include/linux/blkdev.h #define blk_queue_pci_p2pdma(q) \ q 649 include/linux/blkdev.h test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags) q 651 include/linux/blkdev.h #define blk_queue_rq_alloc_time(q) \ q 652 include/linux/blkdev.h test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags) q 654 include/linux/blkdev.h #define blk_queue_rq_alloc_time(q) false q 660 include/linux/blkdev.h #define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags) q 661 include/linux/blkdev.h #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only) q 662 include/linux/blkdev.h #define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags) q 663 include/linux/blkdev.h #define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags) q 665 include/linux/blkdev.h extern void blk_set_pm_only(struct request_queue *q); q 666 include/linux/blkdev.h extern void blk_clear_pm_only(struct request_queue *q); q 684 include/linux/blkdev.h static inline bool queue_is_mq(struct request_queue *q) q 686 include/linux/blkdev.h return q->mq_ops; q 690 include/linux/blkdev.h blk_queue_zoned_model(struct request_queue *q) q 692 include/linux/blkdev.h return q->limits.zoned; q 695 include/linux/blkdev.h static inline bool blk_queue_is_zoned(struct request_queue *q) q 697 include/linux/blkdev.h switch (blk_queue_zoned_model(q)) { q 706 include/linux/blkdev.h static inline sector_t blk_queue_zone_sectors(struct request_queue *q) q 708 include/linux/blkdev.h return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; q 712 include/linux/blkdev.h static inline unsigned int blk_queue_nr_zones(struct request_queue *q) q 714 include/linux/blkdev.h return blk_queue_is_zoned(q) ? q->nr_zones : 0; q 717 include/linux/blkdev.h static inline unsigned int blk_queue_zone_no(struct request_queue *q, q 720 include/linux/blkdev.h if (!blk_queue_is_zoned(q)) q 722 include/linux/blkdev.h return sector >> ilog2(q->limits.chunk_sectors); q 725 include/linux/blkdev.h static inline bool blk_queue_zone_is_seq(struct request_queue *q, q 728 include/linux/blkdev.h if (!blk_queue_is_zoned(q) || !q->seq_zones_bitmap) q 730 include/linux/blkdev.h return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap); q 733 include/linux/blkdev.h static inline unsigned int blk_queue_nr_zones(struct request_queue *q) q 772 include/linux/blkdev.h static inline unsigned int blk_queue_depth(struct request_queue *q) q 774 include/linux/blkdev.h if (q->queue_depth) q 775 include/linux/blkdev.h return q->queue_depth; q 777 include/linux/blkdev.h return q->nr_requests; q 852 include/linux/blkdev.h extern void blk_rq_init(struct request_queue *q, struct request *rq); q 856 include/linux/blkdev.h extern int blk_lld_busy(struct request_queue *q); q 862 include/linux/blkdev.h extern blk_status_t blk_insert_cloned_request(struct request_queue *q, q 874 include/linux/blkdev.h extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags); q 875 include/linux/blkdev.h extern void blk_queue_exit(struct request_queue *q); q 876 include/linux/blkdev.h extern void blk_sync_queue(struct request_queue *q); q 896 include/linux/blkdev.h int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin); q 960 include/linux/blkdev.h return blk_queue_zone_no(rq->q, blk_rq_pos(rq)); q 965 include/linux/blkdev.h return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq)); q 993 include/linux/blkdev.h static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, q 997 include/linux/blkdev.h return min(q->limits.max_discard_sectors, q 1001 include/linux/blkdev.h return q->limits.max_write_same_sectors; q 1004 include/linux/blkdev.h return q->limits.max_write_zeroes_sectors; q 1006 include/linux/blkdev.h return q->limits.max_sectors; q 1013 include/linux/blkdev.h static inline unsigned int blk_max_size_offset(struct request_queue *q, q 1016 include/linux/blkdev.h if (!q->limits.chunk_sectors) q 1017 include/linux/blkdev.h return q->limits.max_sectors; q 1019 include/linux/blkdev.h return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors - q 1020 include/linux/blkdev.h (offset & (q->limits.chunk_sectors - 1)))); q 1026 include/linux/blkdev.h struct request_queue *q = rq->q; q 1029 include/linux/blkdev.h return q->limits.max_hw_sectors; q 1031 include/linux/blkdev.h if (!q->limits.chunk_sectors || q 1034 include/linux/blkdev.h return blk_queue_get_max_sectors(q, req_op(rq)); q 1036 include/linux/blkdev.h return min(blk_max_size_offset(q, offset), q 1037 include/linux/blkdev.h blk_queue_get_max_sectors(q, req_op(rq))); q 1077 include/linux/blkdev.h extern void blk_queue_max_discard_sectors(struct request_queue *q, q 1079 include/linux/blkdev.h extern void blk_queue_max_write_same_sectors(struct request_queue *q, q 1081 include/linux/blkdev.h extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q, q 1085 include/linux/blkdev.h extern void blk_queue_alignment_offset(struct request_queue *q, q 1088 include/linux/blkdev.h extern void blk_queue_io_min(struct request_queue *q, unsigned int min); q 1090 include/linux/blkdev.h extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); q 1091 include/linux/blkdev.h extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); q 1102 include/linux/blkdev.h extern int blk_queue_dma_drain(struct request_queue *q, q 1110 include/linux/blkdev.h extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); q 1111 include/linux/blkdev.h extern void blk_queue_required_elevator_features(struct request_queue *q, q 1113 include/linux/blkdev.h extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q, q 1262 include/linux/blkdev.h static inline unsigned long queue_segment_boundary(const struct request_queue *q) q 1264 include/linux/blkdev.h return q->limits.seg_boundary_mask; q 1267 include/linux/blkdev.h static inline unsigned long queue_virt_boundary(const struct request_queue *q) q 1269 include/linux/blkdev.h return q->limits.virt_boundary_mask; q 1272 include/linux/blkdev.h static inline unsigned int queue_max_sectors(const struct request_queue *q) q 1274 include/linux/blkdev.h return q->limits.max_sectors; q 1277 include/linux/blkdev.h static inline unsigned int queue_max_hw_sectors(const struct request_queue *q) q 1279 include/linux/blkdev.h return q->limits.max_hw_sectors; q 1282 include/linux/blkdev.h static inline unsigned short queue_max_segments(const struct request_queue *q) q 1284 include/linux/blkdev.h return q->limits.max_segments; q 1287 include/linux/blkdev.h static inline unsigned short queue_max_discard_segments(const struct request_queue *q) q 1289 include/linux/blkdev.h return q->limits.max_discard_segments; q 1292 include/linux/blkdev.h static inline unsigned int queue_max_segment_size(const struct request_queue *q) q 1294 include/linux/blkdev.h return q->limits.max_segment_size; q 1297 include/linux/blkdev.h static inline unsigned queue_logical_block_size(const struct request_queue *q) q 1301 include/linux/blkdev.h if (q && q->limits.logical_block_size) q 1302 include/linux/blkdev.h retval = q->limits.logical_block_size; q 1312 include/linux/blkdev.h static inline unsigned int queue_physical_block_size(const struct request_queue *q) q 1314 include/linux/blkdev.h return q->limits.physical_block_size; q 1322 include/linux/blkdev.h static inline unsigned int queue_io_min(const struct request_queue *q) q 1324 include/linux/blkdev.h return q->limits.io_min; q 1332 include/linux/blkdev.h static inline unsigned int queue_io_opt(const struct request_queue *q) q 1334 include/linux/blkdev.h return q->limits.io_opt; q 1342 include/linux/blkdev.h static inline int queue_alignment_offset(const struct request_queue *q) q 1344 include/linux/blkdev.h if (q->limits.misaligned) q 1347 include/linux/blkdev.h return q->limits.alignment_offset; q 1361 include/linux/blkdev.h struct request_queue *q = bdev_get_queue(bdev); q 1363 include/linux/blkdev.h if (q->limits.misaligned) q 1369 include/linux/blkdev.h return q->limits.alignment_offset; q 1372 include/linux/blkdev.h static inline int queue_discard_alignment(const struct request_queue *q) q 1374 include/linux/blkdev.h if (q->limits.discard_misaligned) q 1377 include/linux/blkdev.h return q->limits.discard_alignment; q 1405 include/linux/blkdev.h struct request_queue *q = bdev_get_queue(bdev); q 1410 include/linux/blkdev.h return q->limits.discard_alignment; q 1415 include/linux/blkdev.h struct request_queue *q = bdev_get_queue(bdev); q 1417 include/linux/blkdev.h if (q) q 1418 include/linux/blkdev.h return q->limits.max_write_same_sectors; q 1425 include/linux/blkdev.h struct request_queue *q = bdev_get_queue(bdev); q 1427 include/linux/blkdev.h if (q) q 1428 include/linux/blkdev.h return q->limits.max_write_zeroes_sectors; q 1435 include/linux/blkdev.h struct request_queue *q = bdev_get_queue(bdev); q 1437 include/linux/blkdev.h if (q) q 1438 include/linux/blkdev.h return blk_queue_zoned_model(q); q 1445 include/linux/blkdev.h struct request_queue *q = bdev_get_queue(bdev); q 1447 include/linux/blkdev.h if (q) q 1448 include/linux/blkdev.h return blk_queue_is_zoned(q); q 1455 include/linux/blkdev.h struct request_queue *q = bdev_get_queue(bdev); q 1457 include/linux/blkdev.h if (q) q 1458 include/linux/blkdev.h return blk_queue_zone_sectors(q); q 1462 include/linux/blkdev.h static inline int queue_dma_alignment(const struct request_queue *q) q 1464 include/linux/blkdev.h return q ? q->dma_alignment : 511; q 1467 include/linux/blkdev.h static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, q 1470 include/linux/blkdev.h unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; q 1570 include/linux/blkdev.h static inline void blk_queue_max_integrity_segments(struct request_queue *q, q 1573 include/linux/blkdev.h q->limits.max_integrity_segments = segs; q 1577 include/linux/blkdev.h queue_max_integrity_segments(const struct request_queue *q) q 1579 include/linux/blkdev.h return q->limits.max_integrity_segments; q 1610 include/linux/blkdev.h if (WARN_ON_ONCE(queue_max_integrity_segments(rq->q) > 1)) q 1626 include/linux/blkdev.h static inline int blk_rq_count_integrity_sg(struct request_queue *q, q 1631 include/linux/blkdev.h static inline int blk_rq_map_integrity_sg(struct request_queue *q, q 1656 include/linux/blkdev.h static inline void blk_queue_max_integrity_segments(struct request_queue *q, q 1660 include/linux/blkdev.h static inline unsigned short queue_max_integrity_segments(const struct request_queue *q) q 1742 include/linux/blkdev.h return rq->q->seq_zones_wlock && q 1743 include/linux/blkdev.h test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock); q 52 include/linux/blktrace_api.h #define blk_add_cgroup_trace_msg(q, cg, fmt, ...) \ q 57 include/linux/blktrace_api.h bt = rcu_dereference((q)->blk_trace); \ q 62 include/linux/blktrace_api.h #define blk_add_trace_msg(q, fmt, ...) \ q 63 include/linux/blktrace_api.h blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__) q 66 include/linux/blktrace_api.h static inline bool blk_trace_note_message_enabled(struct request_queue *q) q 72 include/linux/blktrace_api.h bt = rcu_dereference(q->blk_trace); q 78 include/linux/blktrace_api.h extern void blk_add_driver_data(struct request_queue *q, struct request *rq, q 80 include/linux/blktrace_api.h extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, q 83 include/linux/blktrace_api.h extern int blk_trace_startstop(struct request_queue *q, int start); q 84 include/linux/blktrace_api.h extern int blk_trace_remove(struct request_queue *q); q 92 include/linux/blktrace_api.h # define blk_trace_shutdown(q) do { } while (0) q 93 include/linux/blktrace_api.h # define blk_add_driver_data(q, rq, data, len) do {} while (0) q 94 include/linux/blktrace_api.h # define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY) q 95 include/linux/blktrace_api.h # define blk_trace_startstop(q, start) (-ENOTTY) q 96 include/linux/blktrace_api.h # define blk_trace_remove(q) (-ENOTTY) q 97 include/linux/blktrace_api.h # define blk_add_trace_msg(q, fmt, ...) do { } while (0) q 98 include/linux/blktrace_api.h # define blk_add_cgroup_trace_msg(q, cg, fmt, ...) do { } while (0) q 100 include/linux/blktrace_api.h # define blk_trace_note_message_enabled(q) (false) q 25 include/linux/bsg.h int bsg_register_queue(struct request_queue *q, struct device *parent, q 27 include/linux/bsg.h int bsg_scsi_register_queue(struct request_queue *q, struct device *parent); q 28 include/linux/bsg.h void bsg_unregister_queue(struct request_queue *q); q 30 include/linux/bsg.h static inline int bsg_scsi_register_queue(struct request_queue *q, q 35 include/linux/bsg.h static inline void bsg_unregister_queue(struct request_queue *q) q 173 include/linux/clk.h bool clk_is_match(const struct clk *p, const struct clk *q); q 216 include/linux/clk.h static inline bool clk_is_match(const struct clk *p, const struct clk *q) q 218 include/linux/clk.h return p == q; q 38 include/linux/cordic.h s32 q; q 38 include/linux/elevator.h int (*request_merge)(struct request_queue *q, struct request **, struct bio *); q 93 include/linux/elevator.h void elv_rqhash_del(struct request_queue *q, struct request *rq); q 94 include/linux/elevator.h void elv_rqhash_add(struct request_queue *q, struct request *rq); q 95 include/linux/elevator.h void elv_rqhash_reposition(struct request_queue *q, struct request *rq); q 96 include/linux/elevator.h struct request *elv_rqhash_find(struct request_queue *q, sector_t offset); q 401 include/linux/genhd.h unsigned int part_in_flight(struct request_queue *q, struct hd_struct *part); q 402 include/linux/genhd.h void part_in_flight_rw(struct request_queue *q, struct hd_struct *part, q 404 include/linux/genhd.h void part_dec_in_flight(struct request_queue *q, struct hd_struct *part, q 406 include/linux/genhd.h void part_inc_in_flight(struct request_queue *q, struct hd_struct *part, q 74 include/linux/iocontext.h struct request_queue *q; q 407 include/linux/lightnvm.h struct request_queue *q; q 425 include/linux/lightnvm.h struct request_queue *q; q 496 include/linux/mlx4/qp.h static inline u16 folded_qp(u32 q) q 500 include/linux/mlx4/qp.h res = ((q & 0xff) ^ ((q & 0xff0000) >> 16)) | (q & 0xff00); q 643 include/linux/netdevice.h static inline int netdev_queue_numa_node_read(const struct netdev_queue *q) q 646 include/linux/netdevice.h return q->numa_node; q 652 include/linux/netdevice.h static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node) q 655 include/linux/netdevice.h q->numa_node = node; q 3063 include/linux/netdevice.h void __netif_schedule(struct Qdisc *q); q 3311 include/linux/netdevice.h static inline void netdev_tx_reset_queue(struct netdev_queue *q) q 3314 include/linux/netdevice.h clear_bit(__QUEUE_STATE_STACK_XOFF, &q->state); q 3315 include/linux/netdevice.h dql_reset(&q->dql); q 272 include/linux/string.h __FORTIFY_INLINE char *strncpy(char *p, const char *q, __kernel_size_t size) q 279 include/linux/string.h return __builtin_strncpy(p, q, size); q 282 include/linux/string.h __FORTIFY_INLINE char *strcat(char *p, const char *q) q 286 include/linux/string.h return __builtin_strcat(p, q); q 287 include/linux/string.h if (strlcat(p, q, p_size) >= p_size) q 319 include/linux/string.h __FORTIFY_INLINE size_t strlcpy(char *p, const char *q, size_t size) q 323 include/linux/string.h size_t q_size = __builtin_object_size(q, 0); q 325 include/linux/string.h return __real_strlcpy(p, q, size); q 326 include/linux/string.h ret = strlen(q); q 333 include/linux/string.h __builtin_memcpy(p, q, len); q 340 include/linux/string.h __FORTIFY_INLINE char *strncat(char *p, const char *q, __kernel_size_t count) q 344 include/linux/string.h size_t q_size = __builtin_object_size(q, 0); q 346 include/linux/string.h return __builtin_strncat(p, q, count); q 348 include/linux/string.h copy_len = strnlen(q, count); q 351 include/linux/string.h __builtin_memcpy(p + p_len, q, copy_len); q 366 include/linux/string.h __FORTIFY_INLINE void *memcpy(void *p, const void *q, __kernel_size_t size) q 369 include/linux/string.h size_t q_size = __builtin_object_size(q, 0); q 378 include/linux/string.h return __builtin_memcpy(p, q, size); q 381 include/linux/string.h __FORTIFY_INLINE void *memmove(void *p, const void *q, __kernel_size_t size) q 384 include/linux/string.h size_t q_size = __builtin_object_size(q, 0); q 393 include/linux/string.h return __builtin_memmove(p, q, size); q 407 include/linux/string.h __FORTIFY_INLINE int memcmp(const void *p, const void *q, __kernel_size_t size) q 410 include/linux/string.h size_t q_size = __builtin_object_size(q, 0); q 419 include/linux/string.h return __builtin_memcmp(p, q, size); q 455 include/linux/string.h __FORTIFY_INLINE char *strcpy(char *p, const char *q) q 458 include/linux/string.h size_t q_size = __builtin_object_size(q, 0); q 460 include/linux/string.h return __builtin_strcpy(p, q); q 461 include/linux/string.h memcpy(p, q, strlen(q) + 1); q 212 include/linux/sunrpc/sched.h #define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0) q 282 include/linux/sunrpc/sched.h static inline const char * rpc_qname(const struct rpc_wait_queue *q) q 284 include/linux/sunrpc/sched.h return ((q && q->name) ? q->name : "unknown"); q 287 include/linux/sunrpc/sched.h static inline void rpc_assign_waitqueue_name(struct rpc_wait_queue *q, q 290 include/linux/sunrpc/sched.h q->name = name; q 293 include/linux/sunrpc/sched.h static inline void rpc_assign_waitqueue_name(struct rpc_wait_queue *q, q 82 include/linux/swait.h extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name, q 85 include/linux/swait.h #define init_swait_queue_head(q) \ q 88 include/linux/swait.h __init_swait_queue_head((q), #q, &__key); \ q 160 include/linux/swait.h extern void swake_up_one(struct swait_queue_head *q); q 161 include/linux/swait.h extern void swake_up_all(struct swait_queue_head *q); q 162 include/linux/swait.h extern void swake_up_locked(struct swait_queue_head *q); q 164 include/linux/swait.h extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state); q 165 include/linux/swait.h extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state); q 167 include/linux/swait.h extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait); q 168 include/linux/swait.h extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait); q 42 include/linux/t10-pi.h unsigned int shift = ilog2(queue_logical_block_size(rq->q)); q 45 include/linux/t10-pi.h if (rq->q->integrity.interval_exp) q 46 include/linux/t10-pi.h shift = rq->q->integrity.interval_exp; q 253 include/math-emu/op-1.h #define _FP_SQRT_MEAT_1(R, S, T, X, q) \ q 255 include/math-emu/op-1.h while (q != _FP_WORK_ROUND) \ q 257 include/math-emu/op-1.h T##_f = S##_f + q; \ q 260 include/math-emu/op-1.h S##_f = T##_f + q; \ q 262 include/math-emu/op-1.h R##_f += q; \ q 265 include/math-emu/op-1.h q >>= 1; \ q 524 include/math-emu/op-2.h #define _FP_SQRT_MEAT_2(R, S, T, X, q) \ q 526 include/math-emu/op-2.h while (q) \ q 528 include/math-emu/op-2.h T##_f1 = S##_f1 + q; \ q 531 include/math-emu/op-2.h S##_f1 = T##_f1 + q; \ q 533 include/math-emu/op-2.h R##_f1 += q; \ q 536 include/math-emu/op-2.h q >>= 1; \ q 538 include/math-emu/op-2.h q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \ q 539 include/math-emu/op-2.h while (q != _FP_WORK_ROUND) \ q 541 include/math-emu/op-2.h T##_f0 = S##_f0 + q; \ q 546 include/math-emu/op-2.h S##_f0 = T##_f0 + q; \ q 549 include/math-emu/op-2.h R##_f0 += q; \ q 552 include/math-emu/op-2.h q >>= 1; \ q 429 include/math-emu/op-4.h #define _FP_SQRT_MEAT_4(R, S, T, X, q) \ q 431 include/math-emu/op-4.h while (q) \ q 433 include/math-emu/op-4.h T##_f[3] = S##_f[3] + q; \ q 436 include/math-emu/op-4.h S##_f[3] = T##_f[3] + q; \ q 438 include/math-emu/op-4.h R##_f[3] += q; \ q 441 include/math-emu/op-4.h q >>= 1; \ q 443 include/math-emu/op-4.h q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \ q 444 include/math-emu/op-4.h while (q) \ q 446 include/math-emu/op-4.h T##_f[2] = S##_f[2] + q; \ q 451 include/math-emu/op-4.h S##_f[2] = T##_f[2] + q; \ q 455 include/math-emu/op-4.h R##_f[2] += q; \ q 458 include/math-emu/op-4.h q >>= 1; \ q 460 include/math-emu/op-4.h q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \ q 461 include/math-emu/op-4.h while (q) \ q 463 include/math-emu/op-4.h T##_f[1] = S##_f[1] + q; \ q 470 include/math-emu/op-4.h S##_f[1] = T##_f[1] + q; \ q 475 include/math-emu/op-4.h R##_f[1] += q; \ q 478 include/math-emu/op-4.h q >>= 1; \ q 480 include/math-emu/op-4.h q = (_FP_W_TYPE)1 << (_FP_W_TYPE_SIZE - 1); \ q 481 include/math-emu/op-4.h while (q != _FP_WORK_ROUND) \ q 483 include/math-emu/op-4.h T##_f[0] = S##_f[0] + q; \ q 489 include/math-emu/op-4.h S##_f[0] = T##_f[0] + q; \ q 494 include/math-emu/op-4.h R##_f[0] += q; \ q 497 include/math-emu/op-4.h q >>= 1; \ q 594 include/math-emu/op-common.h _FP_W_TYPE q; \ q 636 include/math-emu/op-common.h q = _FP_OVERFLOW_##fs >> 1; \ q 637 include/math-emu/op-common.h _FP_SQRT_MEAT_##wc(R, S, T, X, q); \ q 877 include/math-emu/op-common.h #define _FP_DIV_HELP_imm(q, r, n, d) \ q 879 include/math-emu/op-common.h q = n / d, r = n % d; \ q 184 include/media/drv-intf/saa7146_vv.h void saa7146_buffer_finish(struct saa7146_dev *dev, struct saa7146_dmaqueue *q, int state); q 185 include/media/drv-intf/saa7146_vv.h void saa7146_buffer_next(struct saa7146_dev *dev, struct saa7146_dmaqueue *q,int vbi); q 186 include/media/drv-intf/saa7146_vv.h int saa7146_buffer_queue(struct saa7146_dev *dev, struct saa7146_dmaqueue *q, struct saa7146_buf *buf); q 188 include/media/drv-intf/saa7146_vv.h void saa7146_dma_free(struct saa7146_dev* dev,struct videobuf_queue *q, q 85 include/media/v4l2-mc.h int v4l_vb2q_enable_media_source(struct vb2_queue *q); q 142 include/media/v4l2-mc.h static inline int v4l_vb2q_enable_media_source(struct vb2_queue *q) q 64 include/media/v4l2-mem2mem.h struct vb2_queue q; q 530 include/media/v4l2-mem2mem.h return &m2m_ctx->out_q_ctx.q; q 541 include/media/v4l2-mem2mem.h return &m2m_ctx->cap_q_ctx.q; q 50 include/media/videobuf-core.h struct videobuf_queue *q; q 103 include/media/videobuf-core.h int (*buf_setup)(struct videobuf_queue *q, q 105 include/media/videobuf-core.h int (*buf_prepare)(struct videobuf_queue *q, q 108 include/media/videobuf-core.h void (*buf_queue)(struct videobuf_queue *q, q 110 include/media/videobuf-core.h void (*buf_release)(struct videobuf_queue *q, q 122 include/media/videobuf-core.h int (*iolock) (struct videobuf_queue *q, q 125 include/media/videobuf-core.h int (*sync) (struct videobuf_queue *q, q 127 include/media/videobuf-core.h int (*mmap_mapper) (struct videobuf_queue *q, q 162 include/media/videobuf-core.h static inline void videobuf_queue_lock(struct videobuf_queue *q) q 164 include/media/videobuf-core.h if (!q->ext_lock) q 165 include/media/videobuf-core.h mutex_lock(&q->vb_lock); q 168 include/media/videobuf-core.h static inline void videobuf_queue_unlock(struct videobuf_queue *q) q 170 include/media/videobuf-core.h if (!q->ext_lock) q 171 include/media/videobuf-core.h mutex_unlock(&q->vb_lock); q 174 include/media/videobuf-core.h int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb, q 176 include/media/videobuf-core.h int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb, q 179 include/media/videobuf-core.h struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q); q 182 include/media/videobuf-core.h void *videobuf_queue_to_vaddr(struct videobuf_queue *q, q 185 include/media/videobuf-core.h void videobuf_queue_core_init(struct videobuf_queue *q, q 195 include/media/videobuf-core.h int videobuf_queue_is_busy(struct videobuf_queue *q); q 196 include/media/videobuf-core.h void videobuf_queue_cancel(struct videobuf_queue *q); q 198 include/media/videobuf-core.h enum v4l2_field videobuf_next_field(struct videobuf_queue *q); q 199 include/media/videobuf-core.h int videobuf_reqbufs(struct videobuf_queue *q, q 201 include/media/videobuf-core.h int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b); q 202 include/media/videobuf-core.h int videobuf_qbuf(struct videobuf_queue *q, q 204 include/media/videobuf-core.h int videobuf_dqbuf(struct videobuf_queue *q, q 206 include/media/videobuf-core.h int videobuf_streamon(struct videobuf_queue *q); q 207 include/media/videobuf-core.h int videobuf_streamoff(struct videobuf_queue *q); q 209 include/media/videobuf-core.h void videobuf_stop(struct videobuf_queue *q); q 211 include/media/videobuf-core.h int videobuf_read_start(struct videobuf_queue *q); q 212 include/media/videobuf-core.h void videobuf_read_stop(struct videobuf_queue *q); q 213 include/media/videobuf-core.h ssize_t videobuf_read_stream(struct videobuf_queue *q, q 216 include/media/videobuf-core.h ssize_t videobuf_read_one(struct videobuf_queue *q, q 220 include/media/videobuf-core.h struct videobuf_queue *q, q 223 include/media/videobuf-core.h int videobuf_mmap_setup(struct videobuf_queue *q, q 226 include/media/videobuf-core.h int __videobuf_mmap_setup(struct videobuf_queue *q, q 229 include/media/videobuf-core.h int videobuf_mmap_free(struct videobuf_queue *q); q 230 include/media/videobuf-core.h int videobuf_mmap_mapper(struct videobuf_queue *q, q 16 include/media/videobuf-dma-contig.h void videobuf_queue_dma_contig_init(struct videobuf_queue *q, q 27 include/media/videobuf-dma-contig.h void videobuf_dma_contig_free(struct videobuf_queue *q, q 91 include/media/videobuf-dma-sg.h void videobuf_queue_sg_init(struct videobuf_queue *q, q 29 include/media/videobuf-vmalloc.h void videobuf_queue_vmalloc_init(struct videobuf_queue *q, q 412 include/media/videobuf2-core.h int (*queue_setup)(struct vb2_queue *q, q 416 include/media/videobuf2-core.h void (*wait_prepare)(struct vb2_queue *q); q 417 include/media/videobuf2-core.h void (*wait_finish)(struct vb2_queue *q); q 425 include/media/videobuf2-core.h int (*start_streaming)(struct vb2_queue *q, unsigned int count); q 426 include/media/videobuf2-core.h void (*stop_streaming)(struct vb2_queue *q); q 681 include/media/videobuf2-core.h void vb2_discard_done(struct vb2_queue *q); q 693 include/media/videobuf2-core.h int vb2_wait_for_all_buffers(struct vb2_queue *q); q 710 include/media/videobuf2-core.h void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb); q 739 include/media/videobuf2-core.h int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, q 762 include/media/videobuf2-core.h int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, q 786 include/media/videobuf2-core.h int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb); q 814 include/media/videobuf2-core.h int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb, q 840 include/media/videobuf2-core.h int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb, q 855 include/media/videobuf2-core.h int vb2_core_streamon(struct vb2_queue *q, unsigned int type); q 870 include/media/videobuf2-core.h int vb2_core_streamoff(struct vb2_queue *q, unsigned int type); q 891 include/media/videobuf2-core.h int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type, q 908 include/media/videobuf2-core.h int vb2_core_queue_init(struct vb2_queue *q); q 918 include/media/videobuf2-core.h void vb2_core_queue_release(struct vb2_queue *q); q 933 include/media/videobuf2-core.h void vb2_queue_error(struct vb2_queue *q); q 957 include/media/videobuf2-core.h int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma); q 975 include/media/videobuf2-core.h unsigned long vb2_get_unmapped_area(struct vb2_queue *q, q 1000 include/media/videobuf2-core.h __poll_t vb2_core_poll(struct vb2_queue *q, struct file *file, q 1011 include/media/videobuf2-core.h size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, q 1021 include/media/videobuf2-core.h size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count, q 1050 include/media/videobuf2-core.h int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv, q 1057 include/media/videobuf2-core.h int vb2_thread_stop(struct vb2_queue *q); q 1063 include/media/videobuf2-core.h static inline bool vb2_is_streaming(struct vb2_queue *q) q 1065 include/media/videobuf2-core.h return q->streaming; q 1081 include/media/videobuf2-core.h static inline bool vb2_fileio_is_active(struct vb2_queue *q) q 1083 include/media/videobuf2-core.h return q->fileio; q 1092 include/media/videobuf2-core.h static inline bool vb2_is_busy(struct vb2_queue *q) q 1094 include/media/videobuf2-core.h return (q->num_buffers > 0); q 1101 include/media/videobuf2-core.h static inline void *vb2_get_drv_priv(struct vb2_queue *q) q 1103 include/media/videobuf2-core.h return q->drv_priv; q 1152 include/media/videobuf2-core.h static inline bool vb2_start_streaming_called(struct vb2_queue *q) q 1154 include/media/videobuf2-core.h return q->start_streaming_called; q 1161 include/media/videobuf2-core.h static inline void vb2_clear_last_buffer_dequeued(struct vb2_queue *q) q 1163 include/media/videobuf2-core.h q->last_buffer_dequeued = false; q 1176 include/media/videobuf2-core.h static inline struct vb2_buffer *vb2_get_buffer(struct vb2_queue *q, q 1179 include/media/videobuf2-core.h if (index < q->num_buffers) q 1180 include/media/videobuf2-core.h return q->bufs[index]; q 1196 include/media/videobuf2-core.h bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb); q 1208 include/media/videobuf2-core.h int vb2_verify_memory_type(struct vb2_queue *q, q 71 include/media/videobuf2-v4l2.h int vb2_find_timestamp(const struct vb2_queue *q, u64 timestamp, q 74 include/media/videobuf2-v4l2.h int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b); q 84 include/media/videobuf2-v4l2.h int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req); q 94 include/media/videobuf2-v4l2.h int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create); q 118 include/media/videobuf2-v4l2.h int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev, q 144 include/media/videobuf2-v4l2.h int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev, q 156 include/media/videobuf2-v4l2.h int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb); q 182 include/media/videobuf2-v4l2.h int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking); q 200 include/media/videobuf2-v4l2.h int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type); q 219 include/media/videobuf2-v4l2.h int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type); q 232 include/media/videobuf2-v4l2.h int __must_check vb2_queue_init(struct vb2_queue *q); q 242 include/media/videobuf2-v4l2.h void vb2_queue_release(struct vb2_queue *q); q 263 include/media/videobuf2-v4l2.h __poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait); q 23 include/net/dn_nsp.h struct sk_buff_head *q, unsigned short acknum); q 636 include/net/dsa.h #define BRCM_TAG_SET_PORT_QUEUE(p, q) ((p) << 8 | q) q 55 include/net/gen_stats.h struct gnet_stats_queue *q, __u32 qlen); q 58 include/net/gen_stats.h const struct gnet_stats_queue *q, __u32 qlen); q 101 include/net/inet_frag.h void (*constructor)(struct inet_frag_queue *q, q 124 include/net/inet_frag.h void inet_frag_kill(struct inet_frag_queue *q); q 125 include/net/inet_frag.h void inet_frag_destroy(struct inet_frag_queue *q); q 131 include/net/inet_frag.h static inline void inet_frag_put(struct inet_frag_queue *q) q 133 include/net/inet_frag.h if (refcount_dec_and_test(&q->refcnt)) q 134 include/net/inet_frag.h inet_frag_destroy(q); q 169 include/net/inet_frag.h int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb, q 171 include/net/inet_frag.h void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb, q 173 include/net/inet_frag.h void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head, q 175 include/net/inet_frag.h struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q); q 23 include/net/ipv6_frag.h struct inet_frag_queue q; q 31 include/net/ipv6_frag.h static inline void ip6frag_init(struct inet_frag_queue *q, const void *a) q 33 include/net/ipv6_frag.h struct frag_queue *fq = container_of(q, struct frag_queue, q); q 36 include/net/ipv6_frag.h q->key.v6 = *key; q 70 include/net/ipv6_frag.h if (fq->q.fqdir->dead) q 72 include/net/ipv6_frag.h spin_lock(&fq->q.lock); q 74 include/net/ipv6_frag.h if (fq->q.flags & INET_FRAG_COMPLETE) q 77 include/net/ipv6_frag.h inet_frag_kill(&fq->q); q 87 include/net/ipv6_frag.h if (!(fq->q.flags & INET_FRAG_FIRST_IN)) q 94 include/net/ipv6_frag.h head = inet_frag_pull_head(&fq->q); q 99 include/net/ipv6_frag.h spin_unlock(&fq->q.lock); q 106 include/net/ipv6_frag.h spin_unlock(&fq->q.lock); q 109 include/net/ipv6_frag.h inet_frag_put(&fq->q); q 48 include/net/pkt_cls.h struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, q 50 include/net/pkt_cls.h int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, q 54 include/net/pkt_cls.h void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, q 70 include/net/pkt_cls.h return block->q; q 89 include/net/pkt_cls.h struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, q 96 include/net/pkt_cls.h int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, q 108 include/net/pkt_cls.h void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, q 145 include/net/pkt_cls.h __tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base) q 149 include/net/pkt_cls.h cl = q->ops->cl_ops->bind_tcf(q, base, r->classid); q 152 include/net/pkt_cls.h q->ops->cl_ops->unbind_tcf(q, cl); q 158 include/net/pkt_cls.h struct Qdisc *q = tp->chain->block->q; q 163 include/net/pkt_cls.h if (!q) q 165 include/net/pkt_cls.h sch_tree_lock(q); q 166 include/net/pkt_cls.h __tcf_bind_filter(q, r, base); q 167 include/net/pkt_cls.h sch_tree_unlock(q); q 171 include/net/pkt_cls.h __tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r) q 176 include/net/pkt_cls.h q->ops->cl_ops->unbind_tcf(q, cl); q 182 include/net/pkt_cls.h struct Qdisc *q = tp->chain->block->q; q 184 include/net/pkt_cls.h if (!q) q 186 include/net/pkt_cls.h __tcf_unbind_filter(q, r); q 25 include/net/pkt_sched.h static inline void *qdisc_priv(struct Qdisc *q) q 27 include/net/pkt_sched.h return (char *) q + QDISC_ALIGN(sizeof(struct Qdisc)); q 92 include/net/pkt_sched.h int fifo_set_limit(struct Qdisc *q, unsigned int limit); q 102 include/net/pkt_sched.h void qdisc_hash_add(struct Qdisc *q, bool invisible); q 103 include/net/pkt_sched.h void qdisc_hash_del(struct Qdisc *q); q 112 include/net/pkt_sched.h bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, q 116 include/net/pkt_sched.h void __qdisc_run(struct Qdisc *q); q 118 include/net/pkt_sched.h static inline void qdisc_run(struct Qdisc *q) q 120 include/net/pkt_sched.h if (qdisc_run_begin(q)) { q 124 include/net/pkt_sched.h if (!(q->flags & TCQ_F_NOLOCK) || q 125 include/net/pkt_sched.h likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) q 126 include/net/pkt_sched.h __qdisc_run(q); q 127 include/net/pkt_sched.h qdisc_run_end(q); q 150 include/net/pkt_sched.h static inline struct net *qdisc_net(struct Qdisc *q) q 152 include/net/pkt_sched.h return dev_net(q->dev_queue->dev); q 101 include/net/sch_generic.h struct qdisc_skb_head q; q 144 include/net/sch_generic.h static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) q 146 include/net/sch_generic.h return q->flags & TCQ_F_CPUSTATS; q 153 include/net/sch_generic.h return !READ_ONCE(qdisc->q.qlen); q 413 include/net/sch_generic.h struct Qdisc *q; q 466 include/net/sch_generic.h static inline int qdisc_qlen_cpu(const struct Qdisc *q) q 468 include/net/sch_generic.h return this_cpu_ptr(q->cpu_qstats)->qlen; q 471 include/net/sch_generic.h static inline int qdisc_qlen(const struct Qdisc *q) q 473 include/net/sch_generic.h return q->q.qlen; q 476 include/net/sch_generic.h static inline int qdisc_qlen_sum(const struct Qdisc *q) q 478 include/net/sch_generic.h __u32 qlen = q->qstats.qlen; q 481 include/net/sch_generic.h if (qdisc_is_percpu_stats(q)) { q 483 include/net/sch_generic.h qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen; q 485 include/net/sch_generic.h qlen += q->q.qlen; q 498 include/net/sch_generic.h return &qdisc->q.lock; q 503 include/net/sch_generic.h struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc); q 505 include/net/sch_generic.h return q; q 558 include/net/sch_generic.h static inline void sch_tree_lock(const struct Qdisc *q) q 560 include/net/sch_generic.h spin_lock_bh(qdisc_root_sleeping_lock(q)); q 563 include/net/sch_generic.h static inline void sch_tree_unlock(const struct Qdisc *q) q 565 include/net/sch_generic.h spin_unlock_bh(qdisc_root_sleeping_lock(q)); q 645 include/net/sch_generic.h int qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type, q 653 include/net/sch_generic.h qdisc_offload_dump_helper(struct Qdisc *q, enum tc_setup_type type, q 656 include/net/sch_generic.h q->flags &= ~TCQ_F_OFFLOADED; q 727 include/net/sch_generic.h const struct Qdisc *q = rcu_dereference(txq->qdisc); q 729 include/net/sch_generic.h if (!qdisc_is_empty(q)) { q 971 include/net/sch_generic.h __qdisc_enqueue_tail(skb, &sch->q); q 1004 include/net/sch_generic.h struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); q 1053 include/net/sch_generic.h return __qdisc_queue_drop_head(sch, &sch->q, to_free); q 1058 include/net/sch_generic.h const struct qdisc_skb_head *qh = &sch->q; q 1076 include/net/sch_generic.h sch->q.qlen++; q 1093 include/net/sch_generic.h sch->q.qlen--; q 1105 include/net/sch_generic.h sch->q.qlen++; q 1121 include/net/sch_generic.h sch->q.qlen--; q 1148 include/net/sch_generic.h __qdisc_reset_queue(&sch->q); q 37 include/net/sctp/stream_interleave.h void (*generate_ftsn)(struct sctp_outq *q, __u32 ctsn); q 33 include/net/sctp/stream_sched.h void (*enqueue)(struct sctp_outq *q, struct sctp_datamsg *msg); q 35 include/net/sctp/stream_sched.h struct sctp_chunk *(*dequeue)(struct sctp_outq *q); q 37 include/net/sctp/stream_sched.h void (*dequeue_done)(struct sctp_outq *q, struct sctp_chunk *chunk); q 51 include/net/sctp/stream_sched.h void sctp_sched_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch); q 53 include/net/sctp/stream_sched.h void sctp_sched_dequeue_common(struct sctp_outq *q, struct sctp_chunk *ch); q 1106 include/net/sctp/structs.h void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, q 1112 include/net/sctp/structs.h void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn); q 1114 include/net/sctp/structs.h static inline void sctp_outq_cork(struct sctp_outq *q) q 1116 include/net/sctp/structs.h q->cork = 1; q 338 include/scsi/scsi_device.h extern struct scsi_device *scsi_device_from_queue(struct request_queue *q); q 80 include/scsi/scsi_dh.h static inline const char *scsi_dh_attached_handler_name(struct request_queue *q, q 86 include/scsi/scsi_transport.h void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q); q 99 include/scsi/scsi_transport_sas.h struct request_queue *q; q 69 include/sound/asequencer.h #define snd_seq_queue_sync_port(q) ((q) + 16) q 410 include/sound/core.h #define snd_pci_quirk_name(q) ((q)->name) q 418 include/sound/core.h #define snd_pci_quirk_name(q) "" q 112 include/target/target_core_backend.h struct request_queue *q); q 76 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct request *rq), q 78 include/trace/events/block.h TP_ARGS(q, rq), q 150 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct request *rq), q 152 include/trace/events/block.h TP_ARGS(q, rq), q 194 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct request *rq), q 196 include/trace/events/block.h TP_ARGS(q, rq) q 209 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct request *rq), q 211 include/trace/events/block.h TP_ARGS(q, rq) q 227 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct bio *bio), q 229 include/trace/events/block.h TP_ARGS(q, bio), q 264 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct bio *bio, int error), q 266 include/trace/events/block.h TP_ARGS(q, bio, error), q 292 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio), q 294 include/trace/events/block.h TP_ARGS(q, rq, bio), q 329 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio), q 331 include/trace/events/block.h TP_ARGS(q, rq, bio) q 345 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio), q 347 include/trace/events/block.h TP_ARGS(q, rq, bio) q 359 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct bio *bio), q 361 include/trace/events/block.h TP_ARGS(q, bio), q 387 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct bio *bio, int rw), q 389 include/trace/events/block.h TP_ARGS(q, bio, rw), q 425 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct bio *bio, int rw), q 427 include/trace/events/block.h TP_ARGS(q, bio, rw) q 443 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct bio *bio, int rw), q 445 include/trace/events/block.h TP_ARGS(q, bio, rw) q 458 include/trace/events/block.h TP_PROTO(struct request_queue *q), q 460 include/trace/events/block.h TP_ARGS(q), q 475 include/trace/events/block.h TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), q 477 include/trace/events/block.h TP_ARGS(q, depth, explicit), q 503 include/trace/events/block.h TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit), q 505 include/trace/events/block.h TP_ARGS(q, depth, explicit) q 521 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct bio *bio, q 524 include/trace/events/block.h TP_ARGS(q, bio, new_sector), q 561 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev, q 564 include/trace/events/block.h TP_ARGS(q, bio, dev, from), q 605 include/trace/events/block.h TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev, q 608 include/trace/events/block.h TP_ARGS(q, rq, dev, from), q 16 include/trace/events/kyber.h TP_PROTO(struct request_queue *q, const char *domain, const char *type, q 20 include/trace/events/kyber.h TP_ARGS(q, domain, type, percentile, numerator, denominator, samples), q 33 include/trace/events/kyber.h __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent))); q 50 include/trace/events/kyber.h TP_PROTO(struct request_queue *q, const char *domain, q 53 include/trace/events/kyber.h TP_ARGS(q, domain, depth), q 62 include/trace/events/kyber.h __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent))); q 74 include/trace/events/kyber.h TP_PROTO(struct request_queue *q, const char *domain), q 76 include/trace/events/kyber.h TP_ARGS(q, domain), q 84 include/trace/events/kyber.h __entry->dev = disk_devt(dev_to_disk(kobj_to_dev(q->kobj.parent))); q 171 include/trace/events/sunrpc.h TP_PROTO(const struct rpc_task *task, const struct rpc_wait_queue *q), q 173 include/trace/events/sunrpc.h TP_ARGS(task, q), q 182 include/trace/events/sunrpc.h __string(q_name, rpc_qname(q)) q 193 include/trace/events/sunrpc.h __assign_str(q_name, rpc_qname(q)); q 209 include/trace/events/sunrpc.h const struct rpc_wait_queue *q \ q 211 include/trace/events/sunrpc.h TP_ARGS(task, q)) q 182 include/trace/events/v4l2.h TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb), q 183 include/trace/events/v4l2.h TP_ARGS(q, vb), q 205 include/trace/events/v4l2.h struct v4l2_fh *owner = q->owner; q 246 include/trace/events/v4l2.h TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb), q 247 include/trace/events/v4l2.h TP_ARGS(q, vb) q 251 include/trace/events/v4l2.h TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb), q 252 include/trace/events/v4l2.h TP_ARGS(q, vb) q 256 include/trace/events/v4l2.h TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb), q 257 include/trace/events/v4l2.h TP_ARGS(q, vb) q 261 include/trace/events/v4l2.h TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb), q 262 include/trace/events/v4l2.h TP_ARGS(q, vb) q 12 include/trace/events/vb2.h TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb), q 13 include/trace/events/vb2.h TP_ARGS(q, vb), q 26 include/trace/events/vb2.h __entry->owner = q->owner; q 27 include/trace/events/vb2.h __entry->queued_count = q->queued_count; q 29 include/trace/events/vb2.h atomic_read(&q->owned_by_drv_count); q 47 include/trace/events/vb2.h TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb), q 48 include/trace/events/vb2.h TP_ARGS(q, vb) q 52 include/trace/events/vb2.h TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb), q 53 include/trace/events/vb2.h TP_ARGS(q, vb) q 57 include/trace/events/vb2.h TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb), q 58 include/trace/events/vb2.h TP_ARGS(q, vb) q 62 include/trace/events/vb2.h TP_PROTO(struct vb2_queue *q, struct vb2_buffer *vb), q 63 include/trace/events/vb2.h TP_ARGS(q, vb) q 16 include/xen/arm/interface.h typedef struct { union { type *p; uint64_aligned_t q; }; } \ q 65 init/initramfs.c struct hash **p, *q; q 77 init/initramfs.c q = kmalloc(sizeof(struct hash), GFP_KERNEL); q 78 init/initramfs.c if (!q) q 80 init/initramfs.c q->major = major; q 81 init/initramfs.c q->minor = minor; q 82 init/initramfs.c q->ino = ino; q 83 init/initramfs.c q->mode = mode; q 84 init/initramfs.c strcpy(q->name, name); q 85 init/initramfs.c q->next = NULL; q 86 init/initramfs.c *p = q; q 92 init/initramfs.c struct hash **p, *q; q 95 init/initramfs.c q = *p; q 96 init/initramfs.c *p = q->next; q 97 init/initramfs.c kfree(q); q 260 ipc/sem.c struct sem_queue *q, *tq; q 270 ipc/sem.c list_for_each_entry_safe(q, tq, &sma->pending_alter, list) { q 272 ipc/sem.c curr = &sma->sems[q->sops[0].sem_num]; q 274 ipc/sem.c list_add_tail(&q->list, &curr->pending_alter); q 625 ipc/sem.c static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q) q 634 ipc/sem.c sops = q->sops; q 635 ipc/sem.c nsops = q->nsops; q 636 ipc/sem.c un = q->undo; q 665 ipc/sem.c pid = q->pid; q 678 ipc/sem.c q->blocking = sop; q 698 ipc/sem.c static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q) q 706 ipc/sem.c sops = q->sops; q 707 ipc/sem.c nsops = q->nsops; q 708 ipc/sem.c un = q->undo; q 710 ipc/sem.c if (unlikely(q->dupsop)) q 711 ipc/sem.c return perform_atomic_semop_slow(sma, q); q 756 ipc/sem.c ipc_update_pid(&curr->sempid, q->pid); q 762 ipc/sem.c q->blocking = sop; q 766 ipc/sem.c static inline void wake_up_sem_queue_prepare(struct sem_queue *q, int error, q 769 ipc/sem.c wake_q_add(wake_q, q->sleeper); q 777 ipc/sem.c WRITE_ONCE(q->status, error); q 780 ipc/sem.c static void unlink_queue(struct sem_array *sma, struct sem_queue *q) q 782 ipc/sem.c list_del(&q->list); q 783 ipc/sem.c if (q->nsops > 1) q 797 ipc/sem.c static inline int check_restart(struct sem_array *sma, struct sem_queue *q) q 804 ipc/sem.c if (q->nsops > 1) q 838 ipc/sem.c struct sem_queue *q, *tmp; q 847 ipc/sem.c list_for_each_entry_safe(q, tmp, pending_list, list) { q 848 ipc/sem.c int error = perform_atomic_semop(sma, q); q 853 ipc/sem.c unlink_queue(sma, q); q 855 ipc/sem.c wake_up_sem_queue_prepare(q, error, wake_q); q 932 ipc/sem.c struct sem_queue *q, *tmp; q 942 ipc/sem.c list_for_each_entry_safe(q, tmp, pending_list, list) { q 955 ipc/sem.c error = perform_atomic_semop(sma, q); q 961 ipc/sem.c unlink_queue(sma, q); q 967 ipc/sem.c do_smart_wakeup_zero(sma, q->sops, q->nsops, wake_q); q 968 ipc/sem.c restart = check_restart(sma, q); q 971 ipc/sem.c wake_up_sem_queue_prepare(q, error, wake_q); q 1053 ipc/sem.c static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q, q 1056 ipc/sem.c struct sembuf *sop = q->blocking; q 1091 ipc/sem.c struct sem_queue *q; q 1101 ipc/sem.c list_for_each_entry(q, l, list) { q 1109 ipc/sem.c list_for_each_entry(q, &sma->pending_alter, list) { q 1110 ipc/sem.c semcnt += check_qop(sma, semnum, q, count_zero); q 1113 ipc/sem.c list_for_each_entry(q, &sma->pending_const, list) { q 1114 ipc/sem.c semcnt += check_qop(sma, semnum, q, count_zero); q 1127 ipc/sem.c struct sem_queue *q, *tq; q 1144 ipc/sem.c list_for_each_entry_safe(q, tq, &sma->pending_const, list) { q 1145 ipc/sem.c unlink_queue(sma, q); q 1146 ipc/sem.c wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); q 1149 ipc/sem.c list_for_each_entry_safe(q, tq, &sma->pending_alter, list) { q 1150 ipc/sem.c unlink_queue(sma, q); q 1151 ipc/sem.c wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); q 1155 ipc/sem.c list_for_each_entry_safe(q, tq, &sem->pending_const, list) { q 1156 ipc/sem.c unlink_queue(sma, q); q 1157 ipc/sem.c wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); q 1159 ipc/sem.c list_for_each_entry_safe(q, tq, &sem->pending_alter, list) { q 1160 ipc/sem.c unlink_queue(sma, q); q 1161 ipc/sem.c wake_up_sem_queue_prepare(q, -EIDRM, &wake_q); q 892 kernel/audit.c while ((skb = __skb_dequeue(&dest->q)) != NULL) q 229 kernel/audit.h struct sk_buff_head q; q 613 kernel/audit_tree.c struct list_head *p, *q; q 620 kernel/audit_tree.c for (p = tree->chunks.next; p != &tree->chunks; p = q) { q 622 kernel/audit_tree.c q = p->next; q 1069 kernel/auditfilter.c static void audit_list_rules(int seq, struct sk_buff_head *q) q 1088 kernel/auditfilter.c skb_queue_tail(q, skb); q 1094 kernel/auditfilter.c skb_queue_tail(q, skb); q 1181 kernel/auditfilter.c skb_queue_head_init(&dest->q); q 1184 kernel/auditfilter.c audit_list_rules(seq, &dest->q); q 1189 kernel/auditfilter.c skb_queue_purge(&dest->q); q 251 kernel/auditsc.c struct audit_tree_refs *q; q 262 kernel/auditsc.c for (q = p; q != ctx->trees; q = q->next, n = 31) { q 264 kernel/auditsc.c audit_put_chunk(q->c[n]); q 265 kernel/auditsc.c q->c[n] = NULL; q 269 kernel/auditsc.c audit_put_chunk(q->c[n]); q 270 kernel/auditsc.c q->c[n] = NULL; q 278 kernel/auditsc.c struct audit_tree_refs *p, *q; q 279 kernel/auditsc.c for (p = ctx->first_trees; p; p = q) { q 280 kernel/auditsc.c q = p->next; q 45 kernel/bpf/cpumap.c void *q[CPU_MAP_BULK_SIZE]; q 607 kernel/bpf/cpumap.c struct ptr_ring *q; q 613 kernel/bpf/cpumap.c q = rcpu->queue; q 614 kernel/bpf/cpumap.c spin_lock(&q->producer_lock); q 617 kernel/bpf/cpumap.c struct xdp_frame *xdpf = bq->q[i]; q 620 kernel/bpf/cpumap.c err = __ptr_ring_produce(q, xdpf); q 631 kernel/bpf/cpumap.c spin_unlock(&q->producer_lock); q 660 kernel/bpf/cpumap.c bq->q[bq->count++] = xdpf; q 59 kernel/bpf/devmap.c struct xdp_frame *q[DEV_MAP_BULK_SIZE]; q 361 kernel/bpf/devmap.c struct xdp_frame *xdpf = bq->q[i]; q 366 kernel/bpf/devmap.c sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags); q 386 kernel/bpf/devmap.c struct xdp_frame *xdpf = bq->q[i]; q 453 kernel/bpf/devmap.c bq->q[bq->count++] = xdpf; q 444 kernel/cgroup/cpuset.c static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) q 446 kernel/cgroup/cpuset.c return cpumask_subset(p->cpus_allowed, q->cpus_allowed) && q 447 kernel/cgroup/cpuset.c nodes_subset(p->mems_allowed, q->mems_allowed) && q 448 kernel/cgroup/cpuset.c is_cpu_exclusive(p) <= is_cpu_exclusive(q) && q 449 kernel/cgroup/cpuset.c is_mem_exclusive(p) <= is_mem_exclusive(q); q 145 kernel/cgroup/pids.c struct pids_cgroup *p, *q; q 163 kernel/cgroup/pids.c for (q = pids; q != p; q = parent_pids(q)) q 164 kernel/cgroup/pids.c pids_cancel(q, num); q 196 kernel/crash_core.c char *q; q 206 kernel/crash_core.c q = end_p - strlen(suffix_tbl[i]); q 207 kernel/crash_core.c if (!strncmp(q, suffix_tbl[i], q 213 kernel/crash_core.c q = end_p - strlen(suffix); q 214 kernel/crash_core.c if (!strncmp(q, suffix, strlen(suffix))) q 330 kernel/events/uprobes.c struct list_head *pos, *q; q 336 kernel/events/uprobes.c list_for_each_safe(pos, q, &delayed_uprobe_list) { q 1336 kernel/events/uprobes.c struct list_head *pos, *q; q 1342 kernel/events/uprobes.c list_for_each_safe(pos, q, &delayed_uprobe_list) { q 1521 kernel/futex.c static void __unqueue_futex(struct futex_q *q) q 1525 kernel/futex.c if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list))) q 1527 kernel/futex.c lockdep_assert_held(q->lock_ptr); q 1529 kernel/futex.c hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); q 1530 kernel/futex.c plist_del(&q->list, &hb->chain); q 1540 kernel/futex.c static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q) q 1542 kernel/futex.c struct task_struct *p = q->task; q 1544 kernel/futex.c if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n")) q 1548 kernel/futex.c __unqueue_futex(q); q 1556 kernel/futex.c smp_store_release(&q->lock_ptr, NULL); q 1872 kernel/futex.c void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1, q 1881 kernel/futex.c plist_del(&q->list, &hb1->chain); q 1884 kernel/futex.c plist_add(&q->list, &hb2->chain); q 1885 kernel/futex.c q->lock_ptr = &hb2->lock; q 1888 kernel/futex.c q->key = *key2; q 1906 kernel/futex.c void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, q 1910 kernel/futex.c q->key = *key; q 1912 kernel/futex.c __unqueue_futex(q); q 1914 kernel/futex.c WARN_ON(!q->rt_waiter); q 1915 kernel/futex.c q->rt_waiter = NULL; q 1917 kernel/futex.c q->lock_ptr = &hb->lock; q 1919 kernel/futex.c wake_up_state(q->task, TASK_NORMAL); q 2315 kernel/futex.c static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) q 2320 kernel/futex.c hb = hash_futex(&q->key); q 2332 kernel/futex.c q->lock_ptr = &hb->lock; q 2346 kernel/futex.c static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb) q 2360 kernel/futex.c plist_node_init(&q->list, prio); q 2361 kernel/futex.c plist_add(&q->list, &hb->chain); q 2362 kernel/futex.c q->task = current; q 2377 kernel/futex.c static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) q 2380 kernel/futex.c __queue_me(q, hb); q 2395 kernel/futex.c static int unqueue_me(struct futex_q *q) q 2407 kernel/futex.c lock_ptr = READ_ONCE(q->lock_ptr); q 2423 kernel/futex.c if (unlikely(lock_ptr != q->lock_ptr)) { q 2427 kernel/futex.c __unqueue_futex(q); q 2429 kernel/futex.c BUG_ON(q->pi_state); q 2435 kernel/futex.c drop_futex_key_refs(&q->key); q 2444 kernel/futex.c static void unqueue_me_pi(struct futex_q *q) q 2445 kernel/futex.c __releases(q->lock_ptr) q 2447 kernel/futex.c __unqueue_futex(q); q 2449 kernel/futex.c BUG_ON(!q->pi_state); q 2450 kernel/futex.c put_pi_state(q->pi_state); q 2451 kernel/futex.c q->pi_state = NULL; q 2453 kernel/futex.c spin_unlock(q->lock_ptr); q 2456 kernel/futex.c static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, q 2459 kernel/futex.c struct futex_pi_state *pi_state = q->pi_state; q 2465 kernel/futex.c lockdep_assert_held(q->lock_ptr); q 2586 kernel/futex.c spin_unlock(q->lock_ptr); q 2604 kernel/futex.c spin_lock(q->lock_ptr); q 2642 kernel/futex.c static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) q 2655 kernel/futex.c if (q->pi_state->owner != current) q 2656 kernel/futex.c ret = fixup_pi_state_owner(uaddr, q, current); q 2668 kernel/futex.c if (q->pi_state->owner == current) { q 2669 kernel/futex.c ret = fixup_pi_state_owner(uaddr, q, NULL); q 2677 kernel/futex.c if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) { q 2680 kernel/futex.c q->pi_state->pi_mutex.owner, q 2681 kernel/futex.c q->pi_state->owner); q 2694 kernel/futex.c static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, q 2704 kernel/futex.c queue_me(q, hb); q 2714 kernel/futex.c if (likely(!plist_node_empty(&q->list))) { q 2744 kernel/futex.c struct futex_q *q, struct futex_hash_bucket **hb) q 2768 kernel/futex.c ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key, FUTEX_READ); q 2773 kernel/futex.c *hb = queue_lock(q); q 2787 kernel/futex.c put_futex_key(&q->key); q 2798 kernel/futex.c put_futex_key(&q->key); q 2808 kernel/futex.c struct futex_q q = futex_q_init; q 2813 kernel/futex.c q.bitset = bitset; q 2822 kernel/futex.c ret = futex_wait_setup(uaddr, val, flags, &q, &hb); q 2827 kernel/futex.c futex_wait_queue_me(hb, &q, to); q 2832 kernel/futex.c if (!unqueue_me(&q)) q 2901 kernel/futex.c struct futex_q q = futex_q_init; q 2913 kernel/futex.c ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, FUTEX_WRITE); q 2918 kernel/futex.c hb = queue_lock(&q); q 2920 kernel/futex.c ret = futex_lock_pi_atomic(uaddr, hb, &q.key, &q.pi_state, current, q 2943 kernel/futex.c put_futex_key(&q.key); q 2957 kernel/futex.c WARN_ON(!q.pi_state); q 2962 kernel/futex.c __queue_me(&q, hb); q 2965 kernel/futex.c ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex); q 2986 kernel/futex.c raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock); q 2987 kernel/futex.c spin_unlock(q.lock_ptr); q 2993 kernel/futex.c ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current); q 2994 kernel/futex.c raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock); q 3005 kernel/futex.c ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter); q 3008 kernel/futex.c spin_lock(q.lock_ptr); q 3018 kernel/futex.c if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter)) q 3026 kernel/futex.c res = fixup_owner(uaddr, &q, !ret); q 3038 kernel/futex.c if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) { q 3039 kernel/futex.c pi_state = q.pi_state; q 3044 kernel/futex.c unqueue_me_pi(&q); q 3057 kernel/futex.c put_futex_key(&q.key); q 3075 kernel/futex.c put_futex_key(&q.key); q 3240 kernel/futex.c struct futex_q *q, union futex_key *key2, q 3252 kernel/futex.c if (!match_futex(&q->key, key2)) { q 3253 kernel/futex.c WARN_ON(q->lock_ptr && (&hb->lock != q->lock_ptr)); q 3258 kernel/futex.c plist_del(&q->list, &hb->chain); q 3320 kernel/futex.c struct futex_q q = futex_q_init; q 3345 kernel/futex.c q.bitset = bitset; q 3346 kernel/futex.c q.rt_waiter = &rt_waiter; q 3347 kernel/futex.c q.requeue_pi_key = &key2; q 3353 kernel/futex.c ret = futex_wait_setup(uaddr, val, flags, &q, &hb); q 3361 kernel/futex.c if (match_futex(&q.key, &key2)) { q 3368 kernel/futex.c futex_wait_queue_me(hb, &q, to); q 3371 kernel/futex.c ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); q 3386 kernel/futex.c if (!q.rt_waiter) { q 3391 kernel/futex.c if (q.pi_state && (q.pi_state->owner != current)) { q 3392 kernel/futex.c spin_lock(q.lock_ptr); q 3393 kernel/futex.c ret = fixup_pi_state_owner(uaddr2, &q, current); q 3394 kernel/futex.c if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) { q 3395 kernel/futex.c pi_state = q.pi_state; q 3402 kernel/futex.c put_pi_state(q.pi_state); q 3403 kernel/futex.c spin_unlock(q.lock_ptr); q 3413 kernel/futex.c WARN_ON(!q.pi_state); q 3414 kernel/futex.c pi_mutex = &q.pi_state->pi_mutex; q 3417 kernel/futex.c spin_lock(q.lock_ptr); q 3426 kernel/futex.c res = fixup_owner(uaddr2, &q, !ret); q 3439 kernel/futex.c if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) { q 3440 kernel/futex.c pi_state = q.pi_state; q 3445 kernel/futex.c unqueue_me_pi(&q); q 3465 kernel/futex.c put_futex_key(&q.key); q 97 kernel/latencytop.c int q, same = 1; q 105 kernel/latencytop.c for (q = 0; q < LT_BACKTRACEDEPTH; q++) { q 106 kernel/latencytop.c unsigned long record = lat->backtrace[q]; q 108 kernel/latencytop.c if (latency_record[i].backtrace[q] != record) { q 154 kernel/latencytop.c int i, q; q 182 kernel/latencytop.c for (q = 0; q < LT_BACKTRACEDEPTH; q++) { q 183 kernel/latencytop.c unsigned long record = lat.backtrace[q]; q 185 kernel/latencytop.c if (mylat->backtrace[q] != record) { q 227 kernel/latencytop.c int q; q 230 kernel/latencytop.c for (q = 0; q < LT_BACKTRACEDEPTH; q++) { q 231 kernel/latencytop.c unsigned long bt = lr->backtrace[q]; q 712 kernel/ptrace.c struct sigqueue *q; q 741 kernel/ptrace.c list_for_each_entry(q, &pending->list, list) { q 744 kernel/ptrace.c copy_siginfo(&info, &q->info); q 7 kernel/sched/swait.c void __init_swait_queue_head(struct swait_queue_head *q, const char *name, q 10 kernel/sched/swait.c raw_spin_lock_init(&q->lock); q 11 kernel/sched/swait.c lockdep_set_class_and_name(&q->lock, key, name); q 12 kernel/sched/swait.c INIT_LIST_HEAD(&q->task_list); q 22 kernel/sched/swait.c void swake_up_locked(struct swait_queue_head *q) q 26 kernel/sched/swait.c if (list_empty(&q->task_list)) q 29 kernel/sched/swait.c curr = list_first_entry(&q->task_list, typeof(*curr), task_list); q 35 kernel/sched/swait.c void swake_up_one(struct swait_queue_head *q) q 39 kernel/sched/swait.c raw_spin_lock_irqsave(&q->lock, flags); q 40 kernel/sched/swait.c swake_up_locked(q); q 41 kernel/sched/swait.c raw_spin_unlock_irqrestore(&q->lock, flags); q 49 kernel/sched/swait.c void swake_up_all(struct swait_queue_head *q) q 54 kernel/sched/swait.c raw_spin_lock_irq(&q->lock); q 55 kernel/sched/swait.c list_splice_init(&q->task_list, &tmp); q 65 kernel/sched/swait.c raw_spin_unlock_irq(&q->lock); q 66 kernel/sched/swait.c raw_spin_lock_irq(&q->lock); q 68 kernel/sched/swait.c raw_spin_unlock_irq(&q->lock); q 72 kernel/sched/swait.c static void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait) q 76 kernel/sched/swait.c list_add_tail(&wait->task_list, &q->task_list); q 79 kernel/sched/swait.c void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state) q 83 kernel/sched/swait.c raw_spin_lock_irqsave(&q->lock, flags); q 84 kernel/sched/swait.c __prepare_to_swait(q, wait); q 86 kernel/sched/swait.c raw_spin_unlock_irqrestore(&q->lock, flags); q 90 kernel/sched/swait.c long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state) q 95 kernel/sched/swait.c raw_spin_lock_irqsave(&q->lock, flags); q 104 kernel/sched/swait.c __prepare_to_swait(q, wait); q 107 kernel/sched/swait.c raw_spin_unlock_irqrestore(&q->lock, flags); q 113 kernel/sched/swait.c void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait) q 120 kernel/sched/swait.c void finish_swait(struct swait_queue_head *q, struct swait_queue *wait) q 127 kernel/sched/swait.c raw_spin_lock_irqsave(&q->lock, flags); q 129 kernel/sched/swait.c raw_spin_unlock_irqrestore(&q->lock, flags); q 414 kernel/signal.c struct sigqueue *q = NULL; q 434 kernel/signal.c q = kmem_cache_alloc(sigqueue_cachep, flags); q 439 kernel/signal.c if (unlikely(q == NULL)) { q 443 kernel/signal.c INIT_LIST_HEAD(&q->list); q 444 kernel/signal.c q->flags = 0; q 445 kernel/signal.c q->user = user; q 448 kernel/signal.c return q; q 451 kernel/signal.c static void __sigqueue_free(struct sigqueue *q) q 453 kernel/signal.c if (q->flags & SIGQUEUE_PREALLOC) q 455 kernel/signal.c if (atomic_dec_and_test(&q->user->sigpending)) q 456 kernel/signal.c free_uid(q->user); q 457 kernel/signal.c kmem_cache_free(sigqueue_cachep, q); q 462 kernel/signal.c struct sigqueue *q; q 466 kernel/signal.c q = list_entry(queue->list.next, struct sigqueue , list); q 467 kernel/signal.c list_del_init(&q->list); q 468 kernel/signal.c __sigqueue_free(q); q 491 kernel/signal.c struct sigqueue *q, *n; q 496 kernel/signal.c list_for_each_entry_safe(q, n, &pending->list, list) { q 497 kernel/signal.c int sig = q->info.si_signo; q 499 kernel/signal.c if (likely(q->info.si_code != SI_TIMER)) { q 503 kernel/signal.c list_del_init(&q->list); q 504 kernel/signal.c __sigqueue_free(q); q 570 kernel/signal.c struct sigqueue *q, *first = NULL; q 576 kernel/signal.c list_for_each_entry(q, &list->list, list) { q 577 kernel/signal.c if (q->info.si_signo == sig) { q 580 kernel/signal.c first = q; q 710 kernel/signal.c struct sigqueue *q, *sync = NULL; q 721 kernel/signal.c list_for_each_entry(q, &pending->list, list) { q 723 kernel/signal.c if ((q->info.si_code > SI_USER) && q 724 kernel/signal.c (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) { q 725 kernel/signal.c sync = q; q 734 kernel/signal.c list_for_each_entry_continue(q, &pending->list, list) { q 735 kernel/signal.c if (q->info.si_signo == sync->info.si_signo) q 781 kernel/signal.c struct sigqueue *q, *n; q 789 kernel/signal.c list_for_each_entry_safe(q, n, &s->list, list) { q 790 kernel/signal.c if (sigismember(mask, q->info.si_signo)) { q 791 kernel/signal.c list_del_init(&q->list); q 792 kernel/signal.c __sigqueue_free(q); q 1074 kernel/signal.c struct sigqueue *q; q 1115 kernel/signal.c q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit); q 1116 kernel/signal.c if (q) { q 1117 kernel/signal.c list_add_tail(&q->list, &pending->list); q 1120 kernel/signal.c clear_siginfo(&q->info); q 1121 kernel/signal.c q->info.si_signo = sig; q 1122 kernel/signal.c q->info.si_errno = 0; q 1123 kernel/signal.c q->info.si_code = SI_USER; q 1124 kernel/signal.c q->info.si_pid = task_tgid_nr_ns(current, q 1127 kernel/signal.c q->info.si_uid = q 1133 kernel/signal.c clear_siginfo(&q->info); q 1134 kernel/signal.c q->info.si_signo = sig; q 1135 kernel/signal.c q->info.si_errno = 0; q 1136 kernel/signal.c q->info.si_code = SI_KERNEL; q 1137 kernel/signal.c q->info.si_pid = 0; q 1138 kernel/signal.c q->info.si_uid = 0; q 1141 kernel/signal.c copy_siginfo(&q->info, info); q 1813 kernel/signal.c struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); q 1815 kernel/signal.c if (q) q 1816 kernel/signal.c q->flags |= SIGQUEUE_PREALLOC; q 1818 kernel/signal.c return q; q 1821 kernel/signal.c void sigqueue_free(struct sigqueue *q) q 1826 kernel/signal.c BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); q 1833 kernel/signal.c q->flags &= ~SIGQUEUE_PREALLOC; q 1838 kernel/signal.c if (!list_empty(&q->list)) q 1839 kernel/signal.c q = NULL; q 1842 kernel/signal.c if (q) q 1843 kernel/signal.c __sigqueue_free(q); q 1846 kernel/signal.c int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type) q 1848 kernel/signal.c int sig = q->info.si_signo; q 1854 kernel/signal.c BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); q 1868 kernel/signal.c if (unlikely(!list_empty(&q->list))) { q 1873 kernel/signal.c BUG_ON(q->info.si_code != SI_TIMER); q 1874 kernel/signal.c q->info.si_overrun++; q 1878 kernel/signal.c q->info.si_overrun = 0; q 1882 kernel/signal.c list_add_tail(&q->list, &pending->list); q 1887 kernel/signal.c trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result); q 344 kernel/trace/blktrace.c static int __blk_trace_remove(struct request_queue *q) q 348 kernel/trace/blktrace.c bt = xchg(&q->blk_trace, NULL); q 358 kernel/trace/blktrace.c int blk_trace_remove(struct request_queue *q) q 362 kernel/trace/blktrace.c mutex_lock(&q->blk_trace_mutex); q 363 kernel/trace/blktrace.c ret = __blk_trace_remove(q); q 364 kernel/trace/blktrace.c mutex_unlock(&q->blk_trace_mutex); q 475 kernel/trace/blktrace.c static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, q 548 kernel/trace/blktrace.c if (cmpxchg(&q->blk_trace, NULL, bt)) q 562 kernel/trace/blktrace.c static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev, q 572 kernel/trace/blktrace.c ret = do_blk_trace_setup(q, name, dev, bdev, &buts); q 577 kernel/trace/blktrace.c __blk_trace_remove(q); q 583 kernel/trace/blktrace.c int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, q 589 kernel/trace/blktrace.c mutex_lock(&q->blk_trace_mutex); q 590 kernel/trace/blktrace.c ret = __blk_trace_setup(q, name, dev, bdev, arg); q 591 kernel/trace/blktrace.c mutex_unlock(&q->blk_trace_mutex); q 598 kernel/trace/blktrace.c static int compat_blk_trace_setup(struct request_queue *q, char *name, q 618 kernel/trace/blktrace.c ret = do_blk_trace_setup(q, name, dev, bdev, &buts); q 623 kernel/trace/blktrace.c __blk_trace_remove(q); q 631 kernel/trace/blktrace.c static int __blk_trace_startstop(struct request_queue *q, int start) q 636 kernel/trace/blktrace.c bt = rcu_dereference_protected(q->blk_trace, q 637 kernel/trace/blktrace.c lockdep_is_held(&q->blk_trace_mutex)); q 673 kernel/trace/blktrace.c int blk_trace_startstop(struct request_queue *q, int start) q 677 kernel/trace/blktrace.c mutex_lock(&q->blk_trace_mutex); q 678 kernel/trace/blktrace.c ret = __blk_trace_startstop(q, start); q 679 kernel/trace/blktrace.c mutex_unlock(&q->blk_trace_mutex); q 700 kernel/trace/blktrace.c struct request_queue *q; q 704 kernel/trace/blktrace.c q = bdev_get_queue(bdev); q 705 kernel/trace/blktrace.c if (!q) q 708 kernel/trace/blktrace.c mutex_lock(&q->blk_trace_mutex); q 713 kernel/trace/blktrace.c ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); q 718 kernel/trace/blktrace.c ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); q 725 kernel/trace/blktrace.c ret = __blk_trace_startstop(q, start); q 728 kernel/trace/blktrace.c ret = __blk_trace_remove(q); q 735 kernel/trace/blktrace.c mutex_unlock(&q->blk_trace_mutex); q 744 kernel/trace/blktrace.c void blk_trace_shutdown(struct request_queue *q) q 746 kernel/trace/blktrace.c mutex_lock(&q->blk_trace_mutex); q 747 kernel/trace/blktrace.c if (rcu_dereference_protected(q->blk_trace, q 748 kernel/trace/blktrace.c lockdep_is_held(&q->blk_trace_mutex))) { q 749 kernel/trace/blktrace.c __blk_trace_startstop(q, 0); q 750 kernel/trace/blktrace.c __blk_trace_remove(q); q 753 kernel/trace/blktrace.c mutex_unlock(&q->blk_trace_mutex); q 758 kernel/trace/blktrace.c blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio) q 763 kernel/trace/blktrace.c bt = rcu_dereference_protected(q->blk_trace, 1); q 773 kernel/trace/blktrace.c blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio) q 780 kernel/trace/blktrace.c blk_trace_request_get_cgid(struct request_queue *q, struct request *rq) q 785 kernel/trace/blktrace.c return blk_trace_bio_get_cgid(q, rq->bio); q 811 kernel/trace/blktrace.c bt = rcu_dereference(rq->q->blk_trace); q 828 kernel/trace/blktrace.c struct request_queue *q, struct request *rq) q 831 kernel/trace/blktrace.c blk_trace_request_get_cgid(q, rq)); q 835 kernel/trace/blktrace.c struct request_queue *q, struct request *rq) q 838 kernel/trace/blktrace.c blk_trace_request_get_cgid(q, rq)); q 842 kernel/trace/blktrace.c struct request_queue *q, q 846 kernel/trace/blktrace.c blk_trace_request_get_cgid(q, rq)); q 853 kernel/trace/blktrace.c blk_trace_request_get_cgid(rq->q, rq)); q 867 kernel/trace/blktrace.c static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, q 873 kernel/trace/blktrace.c bt = rcu_dereference(q->blk_trace); q 881 kernel/trace/blktrace.c blk_trace_bio_get_cgid(q, bio)); q 886 kernel/trace/blktrace.c struct request_queue *q, struct bio *bio) q 888 kernel/trace/blktrace.c blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0); q 892 kernel/trace/blktrace.c struct request_queue *q, struct bio *bio, q 895 kernel/trace/blktrace.c blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error); q 899 kernel/trace/blktrace.c struct request_queue *q, q 903 kernel/trace/blktrace.c blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0); q 907 kernel/trace/blktrace.c struct request_queue *q, q 911 kernel/trace/blktrace.c blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0); q 915 kernel/trace/blktrace.c struct request_queue *q, struct bio *bio) q 917 kernel/trace/blktrace.c blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0); q 921 kernel/trace/blktrace.c struct request_queue *q, q 925 kernel/trace/blktrace.c blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0); q 930 kernel/trace/blktrace.c bt = rcu_dereference(q->blk_trace); q 940 kernel/trace/blktrace.c struct request_queue *q, q 944 kernel/trace/blktrace.c blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0); q 949 kernel/trace/blktrace.c bt = rcu_dereference(q->blk_trace); q 957 kernel/trace/blktrace.c static void blk_add_trace_plug(void *ignore, struct request_queue *q) q 962 kernel/trace/blktrace.c bt = rcu_dereference(q->blk_trace); q 968 kernel/trace/blktrace.c static void blk_add_trace_unplug(void *ignore, struct request_queue *q, q 974 kernel/trace/blktrace.c bt = rcu_dereference(q->blk_trace); q 990 kernel/trace/blktrace.c struct request_queue *q, struct bio *bio, q 996 kernel/trace/blktrace.c bt = rcu_dereference(q->blk_trace); q 1003 kernel/trace/blktrace.c &rpdu, blk_trace_bio_get_cgid(q, bio)); q 1022 kernel/trace/blktrace.c struct request_queue *q, struct bio *bio, q 1029 kernel/trace/blktrace.c bt = rcu_dereference(q->blk_trace); q 1041 kernel/trace/blktrace.c sizeof(r), &r, blk_trace_bio_get_cgid(q, bio)); q 1059 kernel/trace/blktrace.c struct request_queue *q, q 1067 kernel/trace/blktrace.c bt = rcu_dereference(q->blk_trace); q 1079 kernel/trace/blktrace.c sizeof(r), &r, blk_trace_request_get_cgid(q, rq)); q 1094 kernel/trace/blktrace.c void blk_add_driver_data(struct request_queue *q, q 1101 kernel/trace/blktrace.c bt = rcu_dereference(q->blk_trace); q 1109 kernel/trace/blktrace.c blk_trace_request_get_cgid(q, rq)); q 1628 kernel/trace/blktrace.c static int blk_trace_remove_queue(struct request_queue *q) q 1632 kernel/trace/blktrace.c bt = xchg(&q->blk_trace, NULL); q 1645 kernel/trace/blktrace.c static int blk_trace_setup_queue(struct request_queue *q, q 1665 kernel/trace/blktrace.c if (cmpxchg(&q->blk_trace, NULL, bt)) q 1797 kernel/trace/blktrace.c struct request_queue *q; q 1806 kernel/trace/blktrace.c q = blk_trace_get_queue(bdev); q 1807 kernel/trace/blktrace.c if (q == NULL) q 1810 kernel/trace/blktrace.c mutex_lock(&q->blk_trace_mutex); q 1812 kernel/trace/blktrace.c bt = rcu_dereference_protected(q->blk_trace, q 1813 kernel/trace/blktrace.c lockdep_is_held(&q->blk_trace_mutex)); q 1831 kernel/trace/blktrace.c mutex_unlock(&q->blk_trace_mutex); q 1843 kernel/trace/blktrace.c struct request_queue *q; q 1870 kernel/trace/blktrace.c q = blk_trace_get_queue(bdev); q 1871 kernel/trace/blktrace.c if (q == NULL) q 1874 kernel/trace/blktrace.c mutex_lock(&q->blk_trace_mutex); q 1876 kernel/trace/blktrace.c bt = rcu_dereference_protected(q->blk_trace, q 1877 kernel/trace/blktrace.c lockdep_is_held(&q->blk_trace_mutex)); q 1884 kernel/trace/blktrace.c ret = blk_trace_setup_queue(q, bdev); q 1886 kernel/trace/blktrace.c ret = blk_trace_remove_queue(q); q 1892 kernel/trace/blktrace.c ret = blk_trace_setup_queue(q, bdev); q 1893 kernel/trace/blktrace.c bt = rcu_dereference_protected(q->blk_trace, q 1894 kernel/trace/blktrace.c lockdep_is_held(&q->blk_trace_mutex)); q 1909 kernel/trace/blktrace.c mutex_unlock(&q->blk_trace_mutex); q 1161 kernel/trace/trace_events_filter.c char q; q 1244 kernel/trace/trace_events_filter.c q = str[i]; q 1246 kernel/trace/trace_events_filter.c q = 0; q 1249 kernel/trace/trace_events_filter.c if (q && str[i] == q) q 1251 kernel/trace/trace_events_filter.c if (!q && (str[i] == ')' || str[i] == '&' || q 1256 kernel/trace/trace_events_filter.c if (q) q 1270 kernel/trace/trace_events_filter.c char q = str[i]; q 1292 kernel/trace/trace_events_filter.c if (str[i] == q) q 759 lib/bch.c const struct gf_poly *b, struct gf_poly *q) q 762 lib/bch.c q->deg = a->deg-b->deg; q 766 lib/bch.c memcpy(q->c, &a->c[b->deg], (1+q->deg)*sizeof(unsigned int)); q 768 lib/bch.c q->deg = 0; q 769 lib/bch.c q->c[0] = 0; q 852 lib/bch.c struct gf_poly *q = bch->poly_2t[1]; q 871 lib/bch.c gf_poly_div(bch, f, gcd, q); q 875 lib/bch.c gf_poly_copy(*h, q); q 62 lib/crc32.c # define DO_CRC4 (t3[(q) & 255] ^ t2[(q >> 8) & 255] ^ \ q 63 lib/crc32.c t1[(q >> 16) & 255] ^ t0[(q >> 24) & 255]) q 64 lib/crc32.c # define DO_CRC8 (t7[(q) & 255] ^ t6[(q >> 8) & 255] ^ \ q 65 lib/crc32.c t5[(q >> 16) & 255] ^ t4[(q >> 24) & 255]) q 68 lib/crc32.c # define DO_CRC4 (t0[(q) & 255] ^ t1[(q >> 8) & 255] ^ \ q 69 lib/crc32.c t2[(q >> 16) & 255] ^ t3[(q >> 24) & 255]) q 70 lib/crc32.c # define DO_CRC8 (t4[(q) & 255] ^ t5[(q >> 8) & 255] ^ \ q 71 lib/crc32.c t6[(q >> 16) & 255] ^ t7[(q >> 24) & 255]) q 82 lib/crc32.c u32 q; q 106 lib/crc32.c q = crc ^ *++b; /* use pre increment for speed */ q 111 lib/crc32.c q = *++b; q 346 lib/inflate.c register struct huft *q; /* points to current table */ q 449 lib/inflate.c q = (struct huft *)NULL; /* ditto */ q 488 lib/inflate.c if ((q = (struct huft *)malloc((z + 1)*sizeof(struct huft))) == q 498 lib/inflate.c *t = q + 1; /* link to list for huft_free() */ q 499 lib/inflate.c *(t = &(q->v.t)) = (struct huft *)NULL; q 500 lib/inflate.c u[h] = ++q; /* table starts after link */ q 509 lib/inflate.c r.v.t = q; /* pointer to this table */ q 537 lib/inflate.c q[j] = r; q 574 lib/inflate.c register struct huft *p, *q; q 581 lib/inflate.c q = (--p)->v.t; q 583 lib/inflate.c p = q; q 55 lib/math/cordic.c coord.q = 0; q 73 lib/math/cordic.c valtmp = coord.i - (coord.q >> iter); q 74 lib/math/cordic.c coord.q += (coord.i >> iter); q 77 lib/math/cordic.c valtmp = coord.i + (coord.q >> iter); q 78 lib/math/cordic.c coord.q -= (coord.i >> iter); q 85 lib/math/cordic.c coord.q *= signx; q 144 lib/mpi/longlong.h #define udiv_qrnnd(q, r, n1, n0, d) \ q 146 lib/mpi/longlong.h : "=r" ((USItype)(q)), \ q 162 lib/mpi/longlong.h #define udiv_qrnnd(q, r, n1, n0, d) \ q 164 lib/mpi/longlong.h (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \ q 287 lib/mpi/longlong.h #define udiv_qrnnd(q, r, nh, nl, d) \ q 289 lib/mpi/longlong.h : "=g" ((USItype)(q)), \ q 339 lib/mpi/longlong.h #define udiv_qrnnd(q, r, n1, n0, d) \ q 341 lib/mpi/longlong.h (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \ q 379 lib/mpi/longlong.h #define sdiv_qrnnd(q, r, n1, n0, d) \ q 388 lib/mpi/longlong.h (q) = __xx.__i.__l; (r) = __xx.__i.__h; \ q 421 lib/mpi/longlong.h #define udiv_qrnnd(q, r, n1, n0, d) \ q 423 lib/mpi/longlong.h : "=a" (q), \ q 486 lib/mpi/longlong.h #define udiv_qrnnd(q, r, nh, nl, d) \ q 496 lib/mpi/longlong.h (r) = __rq.__i.__l; (q) = __rq.__i.__h; \ q 541 lib/mpi/longlong.h #define udiv_qrnnd(q, r, n1, n0, d) \ q 543 lib/mpi/longlong.h : "=d" ((USItype)(q)), \ q 549 lib/mpi/longlong.h #define sdiv_qrnnd(q, r, n1, n0, d) \ q 551 lib/mpi/longlong.h : "=d" ((USItype)(q)), \ q 622 lib/mpi/longlong.h #define udiv_qrnnd(q, r, n1, n0, d) \ q 629 lib/mpi/longlong.h (r) = (n0) - __q.__l * (d); (q) = __q.__l; }) q 705 lib/mpi/longlong.h #define udiv_qrnnd(q, r, n1, n0, d) \ q 714 lib/mpi/longlong.h (r) = __xx.__i.__l; (q) = __xx.__i.__h; }) q 827 lib/mpi/longlong.h #define sdiv_qrnnd(q, r, nh, nl, d) \ q 829 lib/mpi/longlong.h : "=r" ((SItype)(q)), "=q" ((SItype)(r)) \ q 983 lib/mpi/longlong.h #define udiv_qrnnd(q, r, n1, n0, d) \ q 992 lib/mpi/longlong.h (q) = __q; \ q 1007 lib/mpi/longlong.h #define udiv_qrnnd(q, r, n1, n0, d) \ q 1047 lib/mpi/longlong.h : "=r" ((USItype)(q)), \ q 1107 lib/mpi/longlong.h #define udiv_qrnnd(q, r, n1, n0, d) \ q 1132 lib/mpi/longlong.h : "=&r" ((USItype)(q)), \ q 1177 lib/mpi/longlong.h #define sdiv_qrnnd(q, r, n1, n0, d) \ q 1184 lib/mpi/longlong.h : "=g" (q), "=g" (r) \ q 1308 lib/mpi/longlong.h #define __udiv_qrnnd_c(q, r, n1, n0, d) \ q 1338 lib/mpi/longlong.h (q) = (UWtype) __q1 * __ll_B | __q0; \ q 1345 lib/mpi/longlong.h #define udiv_qrnnd(q, r, nh, nl, d) \ q 1348 lib/mpi/longlong.h (q) = __MPN(udiv_w_sdiv) (&__r, nh, nl, d); \ q 104 lib/mpi/mpih-div.c mpi_limb_t q; q 116 lib/mpi/mpih-div.c q = ~(mpi_limb_t) 0; q 122 lib/mpi/mpih-div.c qp[i] = q; q 128 lib/mpi/mpih-div.c udiv_qrnnd(q, r, n1, n0, d1); q 129 lib/mpi/mpih-div.c umul_ppmm(n1, n0, d0, q); q 136 lib/mpi/mpih-div.c q--; q 143 lib/mpi/mpih-div.c qp[i] = q; q 171 lib/mpi/mpih-div.c mpi_limb_t q; q 187 lib/mpi/mpih-div.c q = ~(mpi_limb_t) 0; q 191 lib/mpi/mpih-div.c udiv_qrnnd(q, r, n0, np[dsize - 1], dX); q 192 lib/mpi/mpih-div.c umul_ppmm(n1, n0, d1, q); q 197 lib/mpi/mpih-div.c q--; q 209 lib/mpi/mpih-div.c cy_limb = mpihelp_submul_1(np, dp, dsize, q); q 213 lib/mpi/mpih-div.c q--; q 216 lib/mpi/mpih-div.c qp[i] = q; q 39 lib/raid6/avx2.c u8 *p, *q; q 44 lib/raid6/avx2.c q = dptr[z0+2]; /* RS syndrome */ q 76 lib/raid6/avx2.c asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d])); q 88 lib/raid6/avx2.c u8 *p, *q; q 93 lib/raid6/avx2.c q = dptr[disks-1]; /* RS syndrome */ q 122 lib/raid6/avx2.c asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d])); q 124 lib/raid6/avx2.c asm volatile("vmovdqa %%ymm4,%0" : "=m" (q[d])); q 146 lib/raid6/avx2.c u8 *p, *q; q 151 lib/raid6/avx2.c q = dptr[z0+2]; /* RS syndrome */ q 186 lib/raid6/avx2.c asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d])); q 187 lib/raid6/avx2.c asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32])); q 198 lib/raid6/avx2.c u8 *p, *q; q 203 lib/raid6/avx2.c q = dptr[disks-1]; /* RS syndrome */ q 249 lib/raid6/avx2.c asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d])); q 250 lib/raid6/avx2.c asm volatile("vpxor %0,%%ymm6,%%ymm6" : : "m" (q[d+32])); q 252 lib/raid6/avx2.c asm volatile("vmovdqa %%ymm4,%0" : "=m" (q[d])); q 253 lib/raid6/avx2.c asm volatile("vmovdqa %%ymm6,%0" : "=m" (q[d+32])); q 278 lib/raid6/avx2.c u8 *p, *q; q 283 lib/raid6/avx2.c q = dptr[z0+2]; /* RS syndrome */ q 341 lib/raid6/avx2.c asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d])); q 343 lib/raid6/avx2.c asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32])); q 345 lib/raid6/avx2.c asm volatile("vmovntdq %%ymm12,%0" : "=m" (q[d+64])); q 347 lib/raid6/avx2.c asm volatile("vmovntdq %%ymm14,%0" : "=m" (q[d+96])); q 359 lib/raid6/avx2.c u8 *p, *q; q 364 lib/raid6/avx2.c q = dptr[disks-1]; /* RS syndrome */ q 423 lib/raid6/avx2.c asm volatile("prefetchnta %0" :: "m" (q[d])); q 424 lib/raid6/avx2.c asm volatile("prefetchnta %0" :: "m" (q[d+64])); q 452 lib/raid6/avx2.c asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d])); q 453 lib/raid6/avx2.c asm volatile("vpxor %0,%%ymm6,%%ymm6" : : "m" (q[d+32])); q 454 lib/raid6/avx2.c asm volatile("vpxor %0,%%ymm12,%%ymm12" : : "m" (q[d+64])); q 455 lib/raid6/avx2.c asm volatile("vpxor %0,%%ymm14,%%ymm14" : : "m" (q[d+96])); q 456 lib/raid6/avx2.c asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d])); q 457 lib/raid6/avx2.c asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32])); q 458 lib/raid6/avx2.c asm volatile("vmovntdq %%ymm12,%0" : "=m" (q[d+64])); q 459 lib/raid6/avx2.c asm volatile("vmovntdq %%ymm14,%0" : "=m" (q[d+96])); q 47 lib/raid6/avx512.c u8 *p, *q; q 52 lib/raid6/avx512.c q = dptr[z0+2]; /* RS syndrome */ q 94 lib/raid6/avx512.c : "m" (p[d]), "m" (q[d])); q 105 lib/raid6/avx512.c u8 *p, *q; q 110 lib/raid6/avx512.c q = dptr[disks-1]; /* RS syndrome */ q 153 lib/raid6/avx512.c : "m" (q[d]), "m" (p[d])); q 174 lib/raid6/avx512.c u8 *p, *q; q 179 lib/raid6/avx512.c q = dptr[z0+2]; /* RS syndrome */ q 225 lib/raid6/avx512.c : "m" (p[d]), "m" (p[d+64]), "m" (q[d]), q 226 lib/raid6/avx512.c "m" (q[d+64])); q 237 lib/raid6/avx512.c u8 *p, *q; q 242 lib/raid6/avx512.c q = dptr[disks-1]; /* RS syndrome */ q 309 lib/raid6/avx512.c : "m" (q[d]), "m" (q[d+64]), "m" (p[d]), q 333 lib/raid6/avx512.c u8 *p, *q; q 338 lib/raid6/avx512.c q = dptr[z0+2]; /* RS syndrome */ q 415 lib/raid6/avx512.c "m" (p[d+192]), "m" (q[d]), "m" (q[d+64]), q 416 lib/raid6/avx512.c "m" (q[d+128]), "m" (q[d+192])); q 427 lib/raid6/avx512.c u8 *p, *q; q 432 lib/raid6/avx512.c q = dptr[disks-1]; /* RS syndrome */ q 505 lib/raid6/avx512.c : "m" (q[d]), "m" (q[d+128])); q 549 lib/raid6/avx512.c "m" (p[d+192]), "m" (q[d]), "m" (q[d+64]), q 550 lib/raid6/avx512.c "m" (q[d+128]), "m" (q[d+192])); q 38 lib/raid6/mmx.c u8 *p, *q; q 43 lib/raid6/mmx.c q = dptr[z0+2]; /* RS syndrome */ q 65 lib/raid6/mmx.c asm volatile("movq %%mm4,%0" : "=m" (q[d])); q 86 lib/raid6/mmx.c u8 *p, *q; q 91 lib/raid6/mmx.c q = dptr[z0+2]; /* RS syndrome */ q 124 lib/raid6/mmx.c asm volatile("movq %%mm4,%0" : "=m" (q[d])); q 125 lib/raid6/mmx.c asm volatile("movq %%mm6,%0" : "=m" (q[d+8])); q 23 lib/raid6/recov.c u8 *p, *q, *dp, *dq; q 29 lib/raid6/recov.c q = (u8 *)ptrs[disks-1]; q 47 lib/raid6/recov.c ptrs[disks-1] = q; q 56 lib/raid6/recov.c qx = qmul[*q ^ *dq]; q 59 lib/raid6/recov.c p++; q++; q 67 lib/raid6/recov.c u8 *p, *q, *dq; q 71 lib/raid6/recov.c q = (u8 *)ptrs[disks-1]; q 83 lib/raid6/recov.c ptrs[disks-1] = q; q 90 lib/raid6/recov.c *p++ ^= *dq = qmul[*q ^ *dq]; q 91 lib/raid6/recov.c q++; dq++; q 21 lib/raid6/recov_avx2.c u8 *p, *q, *dp, *dq; q 27 lib/raid6/recov_avx2.c q = (u8 *)ptrs[disks-1]; q 45 lib/raid6/recov_avx2.c ptrs[disks-1] = q; q 59 lib/raid6/recov_avx2.c asm volatile("vmovdqa %0, %%ymm1" : : "m" (q[0])); q 60 lib/raid6/recov_avx2.c asm volatile("vmovdqa %0, %%ymm9" : : "m" (q[32])); q 132 lib/raid6/recov_avx2.c q += 64; q 136 lib/raid6/recov_avx2.c asm volatile("vmovdqa %0, %%ymm1" : : "m" (*q)); q 179 lib/raid6/recov_avx2.c q += 32; q 191 lib/raid6/recov_avx2.c u8 *p, *q, *dq; q 196 lib/raid6/recov_avx2.c q = (u8 *)ptrs[disks-1]; q 208 lib/raid6/recov_avx2.c ptrs[disks-1] = q; q 221 lib/raid6/recov_avx2.c asm volatile("vpxor %0, %%ymm3, %%ymm3" : : "m" (q[0])); q 222 lib/raid6/recov_avx2.c asm volatile("vpxor %0, %%ymm8, %%ymm8" : : "m" (q[32])); q 267 lib/raid6/recov_avx2.c q += 64; q 271 lib/raid6/recov_avx2.c asm volatile("vpxor %0, %%ymm3, %%ymm3" : : "m" (q[0])); q 297 lib/raid6/recov_avx2.c q += 32; q 27 lib/raid6/recov_avx512.c u8 *p, *q, *dp, *dq; q 33 lib/raid6/recov_avx512.c q = (u8 *)ptrs[disks-1]; q 54 lib/raid6/recov_avx512.c ptrs[disks-1] = q; q 77 lib/raid6/recov_avx512.c : "m" (q[0]), "m" (q[64]), "m" (p[0]), q 157 lib/raid6/recov_avx512.c q += 128; q 166 lib/raid6/recov_avx512.c : "m" (*q), "m" (*p), "m"(*dq), "m" (*dp)); q 218 lib/raid6/recov_avx512.c q += 64; q 230 lib/raid6/recov_avx512.c u8 *p, *q, *dq; q 235 lib/raid6/recov_avx512.c q = (u8 *)ptrs[disks-1]; q 250 lib/raid6/recov_avx512.c ptrs[disks-1] = q; q 266 lib/raid6/recov_avx512.c : "m" (dq[0]), "m" (dq[64]), "m" (q[0]), q 267 lib/raid6/recov_avx512.c "m" (q[64])); q 321 lib/raid6/recov_avx512.c q += 128; q 327 lib/raid6/recov_avx512.c : "m" (dq[0]), "m" (q[0])); q 361 lib/raid6/recov_avx512.c q += 64; q 22 lib/raid6/recov_neon.c void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp, q 26 lib/raid6/recov_neon.c void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq, q 32 lib/raid6/recov_neon.c u8 *p, *q, *dp, *dq; q 37 lib/raid6/recov_neon.c q = (u8 *)ptrs[disks - 1]; q 57 lib/raid6/recov_neon.c ptrs[disks - 1] = q; q 65 lib/raid6/recov_neon.c __raid6_2data_recov_neon(bytes, p, q, dp, dq, pbmul, qmul); q 72 lib/raid6/recov_neon.c u8 *p, *q, *dq; q 76 lib/raid6/recov_neon.c q = (u8 *)ptrs[disks - 1]; q 90 lib/raid6/recov_neon.c ptrs[disks - 1] = q; q 96 lib/raid6/recov_neon.c __raid6_datap_recov_neon(bytes, p, q, dq, qmul); q 27 lib/raid6/recov_neon_inner.c void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp, q 53 lib/raid6/recov_neon_inner.c vx = veorq_u8(vld1q_u8(q), vld1q_u8(dq)); q 71 lib/raid6/recov_neon_inner.c q += 16; q 77 lib/raid6/recov_neon_inner.c void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq, q 94 lib/raid6/recov_neon_inner.c vx = veorq_u8(vld1q_u8(q), vld1q_u8(dq)); q 107 lib/raid6/recov_neon_inner.c q += 16; q 26 lib/raid6/recov_s390xc.c u8 *p, *q, *dp, *dq; q 32 lib/raid6/recov_s390xc.c q = (u8 *)ptrs[disks-1]; q 50 lib/raid6/recov_s390xc.c ptrs[disks-1] = q; q 59 lib/raid6/recov_s390xc.c xor_block(dq, q); q 64 lib/raid6/recov_s390xc.c q += 256; q 75 lib/raid6/recov_s390xc.c u8 *p, *q, *dq; q 80 lib/raid6/recov_s390xc.c q = (u8 *)ptrs[disks-1]; q 92 lib/raid6/recov_s390xc.c ptrs[disks-1] = q; q 99 lib/raid6/recov_s390xc.c xor_block(dq, q); q 104 lib/raid6/recov_s390xc.c q += 256; q 21 lib/raid6/recov_ssse3.c u8 *p, *q, *dp, *dq; q 29 lib/raid6/recov_ssse3.c q = (u8 *)ptrs[disks-1]; q 47 lib/raid6/recov_ssse3.c ptrs[disks-1] = q; q 69 lib/raid6/recov_ssse3.c asm volatile("movdqa %0,%%xmm1" : : "m" (q[0])); q 70 lib/raid6/recov_ssse3.c asm volatile("movdqa %0,%%xmm9" : : "m" (q[16])); q 136 lib/raid6/recov_ssse3.c q += 32; q 140 lib/raid6/recov_ssse3.c asm volatile("movdqa %0,%%xmm1" : : "m" (*q)); q 183 lib/raid6/recov_ssse3.c q += 16; q 196 lib/raid6/recov_ssse3.c u8 *p, *q, *dq; q 203 lib/raid6/recov_ssse3.c q = (u8 *)ptrs[disks-1]; q 215 lib/raid6/recov_ssse3.c ptrs[disks-1] = q; q 228 lib/raid6/recov_ssse3.c asm volatile("pxor %0, %%xmm3" : : "m" (q[0])); q 233 lib/raid6/recov_ssse3.c asm volatile("pxor %0, %%xmm4" : : "m" (q[16])); q 281 lib/raid6/recov_ssse3.c q += 32; q 287 lib/raid6/recov_ssse3.c asm volatile("pxor %0, %%xmm3" : : "m" (q[0])); q 312 lib/raid6/recov_ssse3.c q += 16; q 43 lib/raid6/sse1.c u8 *p, *q; q 48 lib/raid6/sse1.c q = dptr[z0+2]; /* RS syndrome */ q 81 lib/raid6/sse1.c asm volatile("movntq %%mm4,%0" : "=m" (q[d])); q 102 lib/raid6/sse1.c u8 *p, *q; q 107 lib/raid6/sse1.c q = dptr[z0+2]; /* RS syndrome */ q 143 lib/raid6/sse1.c asm volatile("movntq %%mm4,%0" : "=m" (q[d])); q 144 lib/raid6/sse1.c asm volatile("movntq %%mm6,%0" : "=m" (q[d+8])); q 39 lib/raid6/sse2.c u8 *p, *q; q 44 lib/raid6/sse2.c q = dptr[z0+2]; /* RS syndrome */ q 78 lib/raid6/sse2.c asm volatile("movntdq %%xmm4,%0" : "=m" (q[d])); q 91 lib/raid6/sse2.c u8 *p, *q; q 96 lib/raid6/sse2.c q = dptr[disks-1]; /* RS syndrome */ q 125 lib/raid6/sse2.c asm volatile("pxor %0,%%xmm4" : : "m" (q[d])); q 127 lib/raid6/sse2.c asm volatile("movdqa %%xmm4,%0" : "=m" (q[d])); q 149 lib/raid6/sse2.c u8 *p, *q; q 154 lib/raid6/sse2.c q = dptr[z0+2]; /* RS syndrome */ q 190 lib/raid6/sse2.c asm volatile("movntdq %%xmm4,%0" : "=m" (q[d])); q 191 lib/raid6/sse2.c asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16])); q 202 lib/raid6/sse2.c u8 *p, *q; q 207 lib/raid6/sse2.c q = dptr[disks-1]; /* RS syndrome */ q 252 lib/raid6/sse2.c asm volatile("pxor %0,%%xmm4" : : "m" (q[d])); q 253 lib/raid6/sse2.c asm volatile("pxor %0,%%xmm6" : : "m" (q[d+16])); q 255 lib/raid6/sse2.c asm volatile("movdqa %%xmm4,%0" : "=m" (q[d])); q 256 lib/raid6/sse2.c asm volatile("movdqa %%xmm6,%0" : "=m" (q[d+16])); q 281 lib/raid6/sse2.c u8 *p, *q; q 286 lib/raid6/sse2.c q = dptr[z0+2]; /* RS syndrome */ q 350 lib/raid6/sse2.c asm volatile("movntdq %%xmm4,%0" : "=m" (q[d])); q 352 lib/raid6/sse2.c asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16])); q 354 lib/raid6/sse2.c asm volatile("movntdq %%xmm12,%0" : "=m" (q[d+32])); q 356 lib/raid6/sse2.c asm volatile("movntdq %%xmm14,%0" : "=m" (q[d+48])); q 368 lib/raid6/sse2.c u8 *p, *q; q 373 lib/raid6/sse2.c q = dptr[disks-1]; /* RS syndrome */ q 429 lib/raid6/sse2.c asm volatile("prefetchnta %0" :: "m" (q[d])); q 430 lib/raid6/sse2.c asm volatile("prefetchnta %0" :: "m" (q[d+32])); q 458 lib/raid6/sse2.c asm volatile("pxor %0,%%xmm4" : : "m" (q[d])); q 459 lib/raid6/sse2.c asm volatile("pxor %0,%%xmm6" : : "m" (q[d+16])); q 460 lib/raid6/sse2.c asm volatile("pxor %0,%%xmm12" : : "m" (q[d+32])); q 461 lib/raid6/sse2.c asm volatile("pxor %0,%%xmm14" : : "m" (q[d+48])); q 462 lib/raid6/sse2.c asm volatile("movntdq %%xmm4,%0" : "=m" (q[d])); q 463 lib/raid6/sse2.c asm volatile("movntdq %%xmm6,%0" : "=m" (q[d+16])); q 464 lib/raid6/sse2.c asm volatile("movntdq %%xmm12,%0" : "=m" (q[d+32])); q 465 lib/raid6/sse2.c asm volatile("movntdq %%xmm14,%0" : "=m" (q[d+48])); q 23 lib/reed_solomon/decode_rs.c uint16_t u, q, tmp, num1, num2, den, discr_r, syn_error; q 201 lib/reed_solomon/decode_rs.c q = 1; /* lambda[0] is always 0 */ q 205 lib/reed_solomon/decode_rs.c q ^= alpha_to[reg[j]]; q 208 lib/reed_solomon/decode_rs.c if (q != 0) q 135 lib/string_helpers.c char *p = *dst, *q = *src; q 137 lib/string_helpers.c switch (*q) { q 163 lib/string_helpers.c char *p = *dst, *q = *src; q 166 lib/string_helpers.c if (isodigit(*q) == 0) q 169 lib/string_helpers.c num = (*q++) & 7; q 170 lib/string_helpers.c while (num < 32 && isodigit(*q) && (q - *src < 3)) { q 172 lib/string_helpers.c num += (*q++) & 7; q 176 lib/string_helpers.c *src = q; q 182 lib/string_helpers.c char *p = *dst, *q = *src; q 186 lib/string_helpers.c if (*q++ != 'x') q 189 lib/string_helpers.c num = digit = hex_to_bin(*q++); q 193 lib/string_helpers.c digit = hex_to_bin(*q); q 195 lib/string_helpers.c q++; q 200 lib/string_helpers.c *src = q; q 206 lib/string_helpers.c char *p = *dst, *q = *src; q 208 lib/string_helpers.c switch (*q) { q 99 lib/test_hexdump.c const char *q = *result++; q 100 lib/test_hexdump.c size_t amount = strlen(q); q 102 lib/test_hexdump.c memcpy(p, q, amount); q 45 lib/ts_kmp.c unsigned int i, q = 0, text_len, consumed = state->offset; q 56 lib/ts_kmp.c while (q > 0 && kmp->pattern[q] q 58 lib/ts_kmp.c q = kmp->prefix_tbl[q - 1]; q 59 lib/ts_kmp.c if (kmp->pattern[q] q 61 lib/ts_kmp.c q++; q 62 lib/ts_kmp.c if (unlikely(q == kmp->pattern_len)) { q 77 lib/ts_kmp.c unsigned int k, q; q 80 lib/ts_kmp.c for (k = 0, q = 1; q < len; q++) { q 82 lib/ts_kmp.c != (icase ? toupper(pattern[q]) : pattern[q])) q 85 lib/ts_kmp.c == (icase ? toupper(pattern[q]) : pattern[q])) q 87 lib/ts_kmp.c prefix_tbl[q] = k; q 187 lib/vsprintf.c unsigned q; q 194 lib/vsprintf.c q = (r * (u64)0x28f5c29) >> 32; q 195 lib/vsprintf.c *((u16 *)buf) = decpair[r - 100*q]; q 199 lib/vsprintf.c if (q < 100) q 203 lib/vsprintf.c r = (q * (u64)0x28f5c29) >> 32; q 204 lib/vsprintf.c *((u16 *)buf) = decpair[q - 100*r]; q 212 lib/vsprintf.c q = (r * 0x147b) >> 19; q 213 lib/vsprintf.c *((u16 *)buf) = decpair[r - 100*q]; q 217 lib/vsprintf.c r = q; q 229 lib/vsprintf.c unsigned q; q 232 lib/vsprintf.c q = (r * (u64)0x28f5c29) >> 32; q 233 lib/vsprintf.c *((u16 *)buf) = decpair[r - 100*q]; q 237 lib/vsprintf.c r = (q * (u64)0x28f5c29) >> 32; q 238 lib/vsprintf.c *((u16 *)buf) = decpair[q - 100*r]; q 242 lib/vsprintf.c q = (r * 0x147b) >> 19; q 243 lib/vsprintf.c *((u16 *)buf) = decpair[r - 100*q]; q 247 lib/vsprintf.c *((u16 *)buf) = decpair[q]; q 269 lib/vsprintf.c unsigned q; q 272 lib/vsprintf.c q = (r * 0x147b) >> 19; q 273 lib/vsprintf.c *((u16 *)buf) = decpair[r - 100*q]; q 276 lib/vsprintf.c *((u16 *)buf) = decpair[q]; q 289 lib/vsprintf.c uint32_t q = (x * (uint64_t)0x346DC5D7) >> 43; q 291 lib/vsprintf.c put_dec_full4(buf, x - q * 10000); q 292 lib/vsprintf.c return q; q 303 lib/vsprintf.c uint32_t d3, d2, d1, q, h; q 315 lib/vsprintf.c q = 656 * d3 + 7296 * d2 + 5536 * d1 + ((uint32_t)n & 0xffff); q 316 lib/vsprintf.c q = put_dec_helper4(buf, q); q 318 lib/vsprintf.c q += 7671 * d3 + 9496 * d2 + 6 * d1; q 319 lib/vsprintf.c q = put_dec_helper4(buf+4, q); q 321 lib/vsprintf.c q += 4749 * d3 + 42 * d2; q 322 lib/vsprintf.c q = put_dec_helper4(buf+8, q); q 324 lib/vsprintf.c q += 281 * d3; q 326 lib/vsprintf.c if (q) q 327 lib/vsprintf.c buf = put_dec_trunc8(buf, q); q 231 lib/zlib_inflate/inflate.c #define REVERSE(q) \ q 232 lib/zlib_inflate/inflate.c ((((q) >> 24) & 0xff) + (((q) >> 8) & 0xff00) + \ q 233 lib/zlib_inflate/inflate.c (((q) & 0xff00) << 8) + (((q) & 0xff) << 24)) q 1057 mm/filemap.c wait_queue_head_t *q = page_waitqueue(page); q 1071 mm/filemap.c spin_lock_irqsave(&q->lock, flags); q 1072 mm/filemap.c __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark); q 1081 mm/filemap.c spin_unlock_irqrestore(&q->lock, flags); q 1083 mm/filemap.c spin_lock_irqsave(&q->lock, flags); q 1084 mm/filemap.c __wake_up_locked_key_bookmark(q, TASK_NORMAL, &key, &bookmark); q 1096 mm/filemap.c if (!waitqueue_active(q) || !key.page_match) { q 1106 mm/filemap.c spin_unlock_irqrestore(&q->lock, flags); q 1131 mm/filemap.c static inline int wait_on_page_bit_common(wait_queue_head_t *q, q 1159 mm/filemap.c spin_lock_irq(&q->lock); q 1162 mm/filemap.c __add_wait_queue_entry_tail(q, wait); q 1168 mm/filemap.c spin_unlock_irq(&q->lock); q 1202 mm/filemap.c finish_wait(q, wait); q 1223 mm/filemap.c wait_queue_head_t *q = page_waitqueue(page); q 1224 mm/filemap.c wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, SHARED); q 1230 mm/filemap.c wait_queue_head_t *q = page_waitqueue(page); q 1231 mm/filemap.c return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, SHARED); q 1247 mm/filemap.c wait_queue_head_t *q; q 1250 mm/filemap.c q = page_waitqueue(page); q 1251 mm/filemap.c wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, DROP); q 1263 mm/filemap.c wait_queue_head_t *q = page_waitqueue(page); q 1266 mm/filemap.c spin_lock_irqsave(&q->lock, flags); q 1267 mm/filemap.c __add_wait_queue_entry_tail(q, waiter); q 1269 mm/filemap.c spin_unlock_irqrestore(&q->lock, flags); q 1382 mm/filemap.c wait_queue_head_t *q = page_waitqueue(page); q 1383 mm/filemap.c wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, q 1391 mm/filemap.c wait_queue_head_t *q = page_waitqueue(page); q 1392 mm/filemap.c return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE, q 50 mm/kasan/quarantine.c static bool qlist_empty(struct qlist_head *q) q 52 mm/kasan/quarantine.c return !q->head; q 55 mm/kasan/quarantine.c static void qlist_init(struct qlist_head *q) q 57 mm/kasan/quarantine.c q->head = q->tail = NULL; q 58 mm/kasan/quarantine.c q->bytes = 0; q 61 mm/kasan/quarantine.c static void qlist_put(struct qlist_head *q, struct qlist_node *qlink, q 64 mm/kasan/quarantine.c if (unlikely(qlist_empty(q))) q 65 mm/kasan/quarantine.c q->head = qlink; q 67 mm/kasan/quarantine.c q->tail->next = qlink; q 68 mm/kasan/quarantine.c q->tail = qlink; q 70 mm/kasan/quarantine.c q->bytes += size; q 154 mm/kasan/quarantine.c static void qlist_free_all(struct qlist_head *q, struct kmem_cache *cache) q 158 mm/kasan/quarantine.c if (unlikely(qlist_empty(q))) q 161 mm/kasan/quarantine.c qlink = q->head; q 170 mm/kasan/quarantine.c qlist_init(q); q 176 mm/kasan/quarantine.c struct qlist_head *q; q 189 mm/kasan/quarantine.c q = this_cpu_ptr(&cpu_quarantine); q 190 mm/kasan/quarantine.c qlist_put(q, &info->quarantine_link, cache->size); q 191 mm/kasan/quarantine.c if (unlikely(q->bytes > QUARANTINE_PERCPU_SIZE)) { q 192 mm/kasan/quarantine.c qlist_move_all(q, &temp); q 292 mm/kasan/quarantine.c struct qlist_head *q; q 294 mm/kasan/quarantine.c q = this_cpu_ptr(&cpu_quarantine); q 295 mm/kasan/quarantine.c qlist_move_cache(q, &to_free, cache); q 1162 mm/swapfile.c struct swap_info_struct *q) q 1168 mm/swapfile.c if (p != q) { q 1169 mm/swapfile.c if (q != NULL) q 1170 mm/swapfile.c spin_unlock(&q->lock); q 3090 mm/swapfile.c struct request_queue *q = bdev_get_queue(si->bdev); q 3092 mm/swapfile.c if (!q || !blk_queue_discard(q)) q 877 net/atm/lec.c int q; q 879 net/atm/lec.c for (q = state->misc_table; q < ARRAY_SIZE(lec_misc_tables); q++) { q 880 net/atm/lec.c v = lec_tbl_walk(state, lec_misc_tables[q], l); q 884 net/atm/lec.c state->misc_table = q; q 3850 net/bluetooth/hci_core.c int cnt, q; q 3868 net/bluetooth/hci_core.c q = cnt / num; q 3869 net/bluetooth/hci_core.c *quote = q ? q : 1; q 3905 net/bluetooth/hci_core.c int cnt, q, conn_num = 0; q 3974 net/bluetooth/hci_core.c q = cnt / num; q 3975 net/bluetooth/hci_core.c *quote = q ? q : 1; q 201 net/caif/caif_dev.c if (!(sch->flags & TCQ_F_NOLOCK) && likely(sch->q.qlen < high)) q 2030 net/core/dev.c int q = netdev_get_prio_tc_map(dev, i); q 2032 net/core/dev.c tc = &dev->tc_to_txq[q]; q 2035 net/core/dev.c i, q); q 2659 net/core/dev.c static void __netif_reschedule(struct Qdisc *q) q 2666 net/core/dev.c q->next_sched = NULL; q 2667 net/core/dev.c *sd->output_queue_tailp = q; q 2668 net/core/dev.c sd->output_queue_tailp = &q->next_sched; q 2673 net/core/dev.c void __netif_schedule(struct Qdisc *q) q 2675 net/core/dev.c if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) q 2676 net/core/dev.c __netif_reschedule(q); q 2693 net/core/dev.c struct Qdisc *q = rcu_dereference(txq->qdisc); q 2695 net/core/dev.c __netif_schedule(q); q 2704 net/core/dev.c struct Qdisc *q; q 2707 net/core/dev.c q = rcu_dereference(dev_queue->qdisc); q 2708 net/core/dev.c __netif_schedule(q); q 3378 net/core/dev.c static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, q 3382 net/core/dev.c spinlock_t *root_lock = qdisc_lock(q); q 3387 net/core/dev.c qdisc_calculate_pkt_len(skb, q); q 3389 net/core/dev.c if (q->flags & TCQ_F_NOLOCK) { q 3390 net/core/dev.c rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; q 3391 net/core/dev.c qdisc_run(q); q 3404 net/core/dev.c contended = qdisc_is_running(q); q 3406 net/core/dev.c spin_lock(&q->busylock); q 3409 net/core/dev.c if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { q 3412 net/core/dev.c } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) && q 3413 net/core/dev.c qdisc_run_begin(q)) { q 3420 net/core/dev.c qdisc_bstats_update(q, skb); q 3422 net/core/dev.c if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) { q 3424 net/core/dev.c spin_unlock(&q->busylock); q 3427 net/core/dev.c __qdisc_run(q); q 3430 net/core/dev.c qdisc_run_end(q); q 3433 net/core/dev.c rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; q 3434 net/core/dev.c if (qdisc_run_begin(q)) { q 3436 net/core/dev.c spin_unlock(&q->busylock); q 3439 net/core/dev.c __qdisc_run(q); q 3440 net/core/dev.c qdisc_run_end(q); q 3447 net/core/dev.c spin_unlock(&q->busylock); q 3704 net/core/dev.c struct Qdisc *q; q 3740 net/core/dev.c q = rcu_dereference_bh(txq->qdisc); q 3743 net/core/dev.c if (q->enqueue) { q 3744 net/core/dev.c rc = __dev_xmit_skb(skb, q, dev, txq); q 4521 net/core/dev.c struct Qdisc *q = head; q 4526 net/core/dev.c if (!(q->flags & TCQ_F_NOLOCK)) { q 4527 net/core/dev.c root_lock = qdisc_lock(q); q 4534 net/core/dev.c clear_bit(__QDISC_STATE_SCHED, &q->state); q 4535 net/core/dev.c qdisc_run(q); q 283 net/core/gen_stats.c const struct gnet_stats_queue __percpu *q) q 288 net/core/gen_stats.c const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i); q 300 net/core/gen_stats.c const struct gnet_stats_queue *q, q 306 net/core/gen_stats.c qstats->qlen = q->qlen; q 307 net/core/gen_stats.c qstats->backlog = q->backlog; q 308 net/core/gen_stats.c qstats->drops = q->drops; q 309 net/core/gen_stats.c qstats->requeues = q->requeues; q 310 net/core/gen_stats.c qstats->overlimits = q->overlimits; q 334 net/core/gen_stats.c struct gnet_stats_queue *q, __u32 qlen) q 338 net/core/gen_stats.c __gnet_stats_copy_queue(&qstats, cpu_q, q, qlen); q 3231 net/core/pktgen.c struct list_head *q, *n; q 3236 net/core/pktgen.c list_for_each_safe(q, n, &t->if_list) { q 3237 net/core/pktgen.c cur = list_entry(q, struct pktgen_dev, list); q 3253 net/core/pktgen.c struct list_head *q, *n; q 3260 net/core/pktgen.c list_for_each_safe(q, n, &t->if_list) { q 3261 net/core/pktgen.c cur = list_entry(q, struct pktgen_dev, list); q 3736 net/core/pktgen.c struct list_head *q, *n; q 3740 net/core/pktgen.c list_for_each_safe(q, n, &t->if_list) { q 3741 net/core/pktgen.c p = list_entry(q, struct pktgen_dev, list); q 3832 net/core/pktgen.c struct list_head *q, *n; q 3842 net/core/pktgen.c list_for_each_safe(q, n, &list) { q 3843 net/core/pktgen.c t = list_entry(q, struct pktgen_thread, th_list); q 1201 net/core/skbuff.c struct sk_buff_head *q; q 1227 net/core/skbuff.c q = &sk->sk_error_queue; q 1228 net/core/skbuff.c spin_lock_irqsave(&q->lock, flags); q 1229 net/core/skbuff.c tail = skb_peek_tail(q); q 1232 net/core/skbuff.c __skb_queue_tail(q, skb); q 1235 net/core/skbuff.c spin_unlock_irqrestore(&q->lock, flags); q 4445 net/core/skbuff.c struct sk_buff_head *q = &sk->sk_error_queue; q 4450 net/core/skbuff.c spin_lock_irqsave(&q->lock, flags); q 4451 net/core/skbuff.c skb = __skb_dequeue(q); q 4452 net/core/skbuff.c if (skb && (skb_next = skb_peek(q))) { q 4457 net/core/skbuff.c spin_unlock_irqrestore(&q->lock, flags); q 1640 net/decnet/af_decnet.c static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int target) q 1646 net/decnet/af_decnet.c return !skb_queue_empty(q) ? 1 : 0; q 1648 net/decnet/af_decnet.c skb_queue_walk(q, skb) { q 369 net/decnet/dn_nsp_out.c int dn_nsp_check_xmit_queue(struct sock *sk, struct sk_buff *skb, struct sk_buff_head *q, unsigned short acknum) q 381 net/decnet/dn_nsp_out.c skb_queue_walk_safe(q, skb2, n) { q 403 net/decnet/dn_nsp_out.c skb_unlink(ack, q); q 30 net/ieee802154/6lowpan/6lowpan_i.h struct inet_frag_queue q; q 36 net/ieee802154/6lowpan/reassembly.c static void lowpan_frag_init(struct inet_frag_queue *q, const void *a) q 40 net/ieee802154/6lowpan/reassembly.c BUILD_BUG_ON(sizeof(*key) > sizeof(q->key)); q 41 net/ieee802154/6lowpan/reassembly.c memcpy(&q->key, key, sizeof(*key)); q 49 net/ieee802154/6lowpan/reassembly.c fq = container_of(frag, struct frag_queue, q); q 51 net/ieee802154/6lowpan/reassembly.c spin_lock(&fq->q.lock); q 53 net/ieee802154/6lowpan/reassembly.c if (fq->q.flags & INET_FRAG_COMPLETE) q 56 net/ieee802154/6lowpan/reassembly.c inet_frag_kill(&fq->q); q 58 net/ieee802154/6lowpan/reassembly.c spin_unlock(&fq->q.lock); q 59 net/ieee802154/6lowpan/reassembly.c inet_frag_put(&fq->q); q 70 net/ieee802154/6lowpan/reassembly.c struct inet_frag_queue *q; q 77 net/ieee802154/6lowpan/reassembly.c q = inet_frag_find(ieee802154_lowpan->fqdir, &key); q 78 net/ieee802154/6lowpan/reassembly.c if (!q) q 81 net/ieee802154/6lowpan/reassembly.c return container_of(q, struct lowpan_frag_queue, q); q 97 net/ieee802154/6lowpan/reassembly.c if (fq->q.flags & INET_FRAG_COMPLETE) q 108 net/ieee802154/6lowpan/reassembly.c if (end < fq->q.len || q 109 net/ieee802154/6lowpan/reassembly.c ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) q 111 net/ieee802154/6lowpan/reassembly.c fq->q.flags |= INET_FRAG_LAST_IN; q 112 net/ieee802154/6lowpan/reassembly.c fq->q.len = end; q 114 net/ieee802154/6lowpan/reassembly.c if (end > fq->q.len) { q 116 net/ieee802154/6lowpan/reassembly.c if (fq->q.flags & INET_FRAG_LAST_IN) q 118 net/ieee802154/6lowpan/reassembly.c fq->q.len = end; q 127 net/ieee802154/6lowpan/reassembly.c prev_tail = fq->q.fragments_tail; q 128 net/ieee802154/6lowpan/reassembly.c err = inet_frag_queue_insert(&fq->q, skb, offset, end); q 132 net/ieee802154/6lowpan/reassembly.c fq->q.stamp = skb->tstamp; q 134 net/ieee802154/6lowpan/reassembly.c fq->q.flags |= INET_FRAG_FIRST_IN; q 136 net/ieee802154/6lowpan/reassembly.c fq->q.meat += skb->len; q 137 net/ieee802154/6lowpan/reassembly.c add_frag_mem_limit(fq->q.fqdir, skb->truesize); q 139 net/ieee802154/6lowpan/reassembly.c if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && q 140 net/ieee802154/6lowpan/reassembly.c fq->q.meat == fq->q.len) { q 168 net/ieee802154/6lowpan/reassembly.c inet_frag_kill(&fq->q); q 170 net/ieee802154/6lowpan/reassembly.c reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail); q 173 net/ieee802154/6lowpan/reassembly.c inet_frag_reasm_finish(&fq->q, skb, reasm_data, false); q 176 net/ieee802154/6lowpan/reassembly.c skb->tstamp = fq->q.stamp; q 177 net/ieee802154/6lowpan/reassembly.c fq->q.rb_fragments = RB_ROOT; q 178 net/ieee802154/6lowpan/reassembly.c fq->q.fragments_tail = NULL; q 179 net/ieee802154/6lowpan/reassembly.c fq->q.last_run_head = NULL; q 306 net/ieee802154/6lowpan/reassembly.c spin_lock(&fq->q.lock); q 308 net/ieee802154/6lowpan/reassembly.c spin_unlock(&fq->q.lock); q 310 net/ieee802154/6lowpan/reassembly.c inet_frag_put(&fq->q); q 1911 net/ipv4/af_inet.c struct inet_protosw *q; q 1962 net/ipv4/af_inet.c for (q = inetsw_array; q < &inetsw_array[INETSW_ARRAY_LEN]; ++q) q 1963 net/ipv4/af_inet.c inet_register_protosw(q); q 54 net/ipv4/inet_fragment.c static void fragrun_append_to_last(struct inet_frag_queue *q, q 59 net/ipv4/inet_fragment.c FRAG_CB(q->last_run_head)->frag_run_len += skb->len; q 60 net/ipv4/inet_fragment.c FRAG_CB(q->fragments_tail)->next_frag = skb; q 61 net/ipv4/inet_fragment.c q->fragments_tail = skb; q 65 net/ipv4/inet_fragment.c static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb) q 70 net/ipv4/inet_fragment.c if (q->last_run_head) q 71 net/ipv4/inet_fragment.c rb_link_node(&skb->rbnode, &q->last_run_head->rbnode, q 72 net/ipv4/inet_fragment.c &q->last_run_head->rbnode.rb_right); q 74 net/ipv4/inet_fragment.c rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node); q 75 net/ipv4/inet_fragment.c rb_insert_color(&skb->rbnode, &q->rb_fragments); q 77 net/ipv4/inet_fragment.c q->fragments_tail = skb; q 78 net/ipv4/inet_fragment.c q->last_run_head = skb; q 223 net/ipv4/inet_fragment.c struct inet_frag_queue *q = container_of(head, struct inet_frag_queue, q 225 net/ipv4/inet_fragment.c struct inet_frags *f = q->fqdir->f; q 228 net/ipv4/inet_fragment.c f->destructor(q); q 229 net/ipv4/inet_fragment.c kmem_cache_free(f->frags_cachep, q); q 254 net/ipv4/inet_fragment.c void inet_frag_destroy(struct inet_frag_queue *q) q 260 net/ipv4/inet_fragment.c WARN_ON(!(q->flags & INET_FRAG_COMPLETE)); q 261 net/ipv4/inet_fragment.c WARN_ON(del_timer(&q->timer) != 0); q 264 net/ipv4/inet_fragment.c fqdir = q->fqdir; q 266 net/ipv4/inet_fragment.c sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments); q 269 net/ipv4/inet_fragment.c call_rcu(&q->rcu, inet_frag_destroy_rcu); q 279 net/ipv4/inet_fragment.c struct inet_frag_queue *q; q 281 net/ipv4/inet_fragment.c q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC); q 282 net/ipv4/inet_fragment.c if (!q) q 285 net/ipv4/inet_fragment.c q->fqdir = fqdir; q 286 net/ipv4/inet_fragment.c f->constructor(q, arg); q 289 net/ipv4/inet_fragment.c timer_setup(&q->timer, f->frag_expire, 0); q 290 net/ipv4/inet_fragment.c spin_lock_init(&q->lock); q 291 net/ipv4/inet_fragment.c refcount_set(&q->refcnt, 3); q 293 net/ipv4/inet_fragment.c return q; q 301 net/ipv4/inet_fragment.c struct inet_frag_queue *q; q 303 net/ipv4/inet_fragment.c q = inet_frag_alloc(fqdir, f, arg); q 304 net/ipv4/inet_fragment.c if (!q) { q 308 net/ipv4/inet_fragment.c mod_timer(&q->timer, jiffies + fqdir->timeout); q 310 net/ipv4/inet_fragment.c *prev = rhashtable_lookup_get_insert_key(&fqdir->rhashtable, &q->key, q 311 net/ipv4/inet_fragment.c &q->node, f->rhash_params); q 313 net/ipv4/inet_fragment.c q->flags |= INET_FRAG_COMPLETE; q 314 net/ipv4/inet_fragment.c inet_frag_kill(q); q 315 net/ipv4/inet_fragment.c inet_frag_destroy(q); q 318 net/ipv4/inet_fragment.c return q; q 344 net/ipv4/inet_fragment.c int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb, q 347 net/ipv4/inet_fragment.c struct sk_buff *last = q->fragments_tail; q 359 net/ipv4/inet_fragment.c fragrun_create(q, skb); /* First fragment. */ q 366 net/ipv4/inet_fragment.c fragrun_append_to_last(q, skb); q 368 net/ipv4/inet_fragment.c fragrun_create(q, skb); q 375 net/ipv4/inet_fragment.c rbn = &q->rb_fragments.rb_node; q 399 net/ipv4/inet_fragment.c rb_insert_color(&skb->rbnode, &q->rb_fragments); q 408 net/ipv4/inet_fragment.c void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb, q 411 net/ipv4/inet_fragment.c struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments); q 424 net/ipv4/inet_fragment.c &q->rb_fragments); q 425 net/ipv4/inet_fragment.c if (q->fragments_tail == skb) q 426 net/ipv4/inet_fragment.c q->fragments_tail = fp; q 430 net/ipv4/inet_fragment.c &q->rb_fragments); q 444 net/ipv4/inet_fragment.c add_frag_mem_limit(q->fqdir, delta); q 466 net/ipv4/inet_fragment.c add_frag_mem_limit(q->fqdir, clone->truesize); q 477 net/ipv4/inet_fragment.c void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head, q 490 net/ipv4/inet_fragment.c rb_erase(&head->rbnode, &q->rb_fragments); q 532 net/ipv4/inet_fragment.c rb_erase(rbn, &q->rb_fragments); q 536 net/ipv4/inet_fragment.c sub_frag_mem_limit(q->fqdir, sum_truesize); q 541 net/ipv4/inet_fragment.c head->tstamp = q->stamp; q 545 net/ipv4/inet_fragment.c struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q) q 549 net/ipv4/inet_fragment.c head = skb_rb_first(&q->rb_fragments); q 555 net/ipv4/inet_fragment.c &q->rb_fragments); q 557 net/ipv4/inet_fragment.c rb_erase(&head->rbnode, &q->rb_fragments); q 561 net/ipv4/inet_fragment.c if (head == q->fragments_tail) q 562 net/ipv4/inet_fragment.c q->fragments_tail = NULL; q 564 net/ipv4/inet_fragment.c sub_frag_mem_limit(q->fqdir, head->truesize); q 62 net/ipv4/ip_fragment.c struct inet_frag_queue q; q 82 net/ipv4/ip_fragment.c static void ip4_frag_init(struct inet_frag_queue *q, const void *a) q 84 net/ipv4/ip_fragment.c struct ipq *qp = container_of(q, struct ipq, q); q 85 net/ipv4/ip_fragment.c struct net *net = q->fqdir->net; q 89 net/ipv4/ip_fragment.c q->key.v4 = *key; q 91 net/ipv4/ip_fragment.c qp->peer = q->fqdir->max_dist ? q 96 net/ipv4/ip_fragment.c static void ip4_frag_free(struct inet_frag_queue *q) q 100 net/ipv4/ip_fragment.c qp = container_of(q, struct ipq, q); q 110 net/ipv4/ip_fragment.c inet_frag_put(&ipq->q); q 118 net/ipv4/ip_fragment.c inet_frag_kill(&ipq->q); q 142 net/ipv4/ip_fragment.c qp = container_of(frag, struct ipq, q); q 143 net/ipv4/ip_fragment.c net = qp->q.fqdir->net; q 147 net/ipv4/ip_fragment.c if (qp->q.fqdir->dead) q 150 net/ipv4/ip_fragment.c spin_lock(&qp->q.lock); q 152 net/ipv4/ip_fragment.c if (qp->q.flags & INET_FRAG_COMPLETE) q 159 net/ipv4/ip_fragment.c if (!(qp->q.flags & INET_FRAG_FIRST_IN)) q 166 net/ipv4/ip_fragment.c head = inet_frag_pull_head(&qp->q); q 184 net/ipv4/ip_fragment.c if (frag_expire_skip_icmp(qp->q.key.v4.user) && q 188 net/ipv4/ip_fragment.c spin_unlock(&qp->q.lock); q 193 net/ipv4/ip_fragment.c spin_unlock(&qp->q.lock); q 214 net/ipv4/ip_fragment.c struct inet_frag_queue *q; q 216 net/ipv4/ip_fragment.c q = inet_frag_find(net->ipv4.fqdir, &key); q 217 net/ipv4/ip_fragment.c if (!q) q 220 net/ipv4/ip_fragment.c return container_of(q, struct ipq, q); q 227 net/ipv4/ip_fragment.c unsigned int max = qp->q.fqdir->max_dist; q 239 net/ipv4/ip_fragment.c rc = qp->q.fragments_tail && (end - start) > max; q 242 net/ipv4/ip_fragment.c __IP_INC_STATS(qp->q.fqdir->net, IPSTATS_MIB_REASMFAILS); q 251 net/ipv4/ip_fragment.c if (!mod_timer(&qp->q.timer, jiffies + qp->q.fqdir->timeout)) { q 252 net/ipv4/ip_fragment.c refcount_inc(&qp->q.refcnt); q 256 net/ipv4/ip_fragment.c sum_truesize = inet_frag_rbtree_purge(&qp->q.rb_fragments); q 257 net/ipv4/ip_fragment.c sub_frag_mem_limit(qp->q.fqdir, sum_truesize); q 259 net/ipv4/ip_fragment.c qp->q.flags = 0; q 260 net/ipv4/ip_fragment.c qp->q.len = 0; q 261 net/ipv4/ip_fragment.c qp->q.meat = 0; q 262 net/ipv4/ip_fragment.c qp->q.rb_fragments = RB_ROOT; q 263 net/ipv4/ip_fragment.c qp->q.fragments_tail = NULL; q 264 net/ipv4/ip_fragment.c qp->q.last_run_head = NULL; q 274 net/ipv4/ip_fragment.c struct net *net = qp->q.fqdir->net; q 282 net/ipv4/ip_fragment.c if (qp->q.flags & INET_FRAG_COMPLETE) q 308 net/ipv4/ip_fragment.c if (end < qp->q.len || q 309 net/ipv4/ip_fragment.c ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len)) q 311 net/ipv4/ip_fragment.c qp->q.flags |= INET_FRAG_LAST_IN; q 312 net/ipv4/ip_fragment.c qp->q.len = end; q 319 net/ipv4/ip_fragment.c if (end > qp->q.len) { q 321 net/ipv4/ip_fragment.c if (qp->q.flags & INET_FRAG_LAST_IN) q 323 net/ipv4/ip_fragment.c qp->q.len = end; q 342 net/ipv4/ip_fragment.c prev_tail = qp->q.fragments_tail; q 343 net/ipv4/ip_fragment.c err = inet_frag_queue_insert(&qp->q, skb, offset, end); q 350 net/ipv4/ip_fragment.c qp->q.stamp = skb->tstamp; q 351 net/ipv4/ip_fragment.c qp->q.meat += skb->len; q 353 net/ipv4/ip_fragment.c add_frag_mem_limit(qp->q.fqdir, skb->truesize); q 355 net/ipv4/ip_fragment.c qp->q.flags |= INET_FRAG_FIRST_IN; q 359 net/ipv4/ip_fragment.c if (fragsize > qp->q.max_size) q 360 net/ipv4/ip_fragment.c qp->q.max_size = fragsize; q 366 net/ipv4/ip_fragment.c if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && q 367 net/ipv4/ip_fragment.c qp->q.meat == qp->q.len) { q 374 net/ipv4/ip_fragment.c inet_frag_kill(&qp->q); q 389 net/ipv4/ip_fragment.c inet_frag_kill(&qp->q); q 398 net/ipv4/ip_fragment.c return qp->q.key.v4.user == IP_DEFRAG_LOCAL_DELIVER; q 405 net/ipv4/ip_fragment.c struct net *net = qp->q.fqdir->net; q 420 net/ipv4/ip_fragment.c reasm_data = inet_frag_reasm_prepare(&qp->q, skb, prev_tail); q 424 net/ipv4/ip_fragment.c len = ip_hdrlen(skb) + qp->q.len; q 429 net/ipv4/ip_fragment.c inet_frag_reasm_finish(&qp->q, skb, reasm_data, q 433 net/ipv4/ip_fragment.c IPCB(skb)->frag_max_size = max(qp->max_df_size, qp->q.max_size); q 447 net/ipv4/ip_fragment.c if (qp->max_df_size == qp->q.max_size) { q 457 net/ipv4/ip_fragment.c qp->q.rb_fragments = RB_ROOT; q 458 net/ipv4/ip_fragment.c qp->q.fragments_tail = NULL; q 459 net/ipv4/ip_fragment.c qp->q.last_run_head = NULL; q 467 net/ipv4/ip_fragment.c net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->q.key.v4.saddr); q 488 net/ipv4/ip_fragment.c spin_lock(&qp->q.lock); q 492 net/ipv4/ip_fragment.c spin_unlock(&qp->q.lock); q 73 net/ipv4/tcp_fastopen.c struct fastopen_queue *q; q 94 net/ipv4/tcp_fastopen.c q = &inet_csk(sk)->icsk_accept_queue.fastopenq; q 95 net/ipv4/tcp_fastopen.c octx = rcu_dereference_protected(q->ctx, q 97 net/ipv4/tcp_fastopen.c rcu_assign_pointer(q->ctx, ctx); q 828 net/ipv4/tcp_output.c struct list_head *q, *n; q 836 net/ipv4/tcp_output.c list_for_each_safe(q, n, &list) { q 837 net/ipv4/tcp_output.c tp = list_entry(q, struct tcp_sock, tsq_node); q 143 net/ipv6/netfilter/nf_conntrack_reasm.c fq = container_of(frag, struct frag_queue, q); q 145 net/ipv6/netfilter/nf_conntrack_reasm.c ip6frag_expire_frag_queue(fq->q.fqdir->net, fq); q 159 net/ipv6/netfilter/nf_conntrack_reasm.c struct inet_frag_queue *q; q 161 net/ipv6/netfilter/nf_conntrack_reasm.c q = inet_frag_find(net->nf_frag.fqdir, &key); q 162 net/ipv6/netfilter/nf_conntrack_reasm.c if (!q) q 165 net/ipv6/netfilter/nf_conntrack_reasm.c return container_of(q, struct frag_queue, q); q 178 net/ipv6/netfilter/nf_conntrack_reasm.c if (fq->q.flags & INET_FRAG_COMPLETE) { q 208 net/ipv6/netfilter/nf_conntrack_reasm.c if (end < fq->q.len || q 209 net/ipv6/netfilter/nf_conntrack_reasm.c ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) { q 213 net/ipv6/netfilter/nf_conntrack_reasm.c fq->q.flags |= INET_FRAG_LAST_IN; q 214 net/ipv6/netfilter/nf_conntrack_reasm.c fq->q.len = end; q 224 net/ipv6/netfilter/nf_conntrack_reasm.c inet_frag_kill(&fq->q); q 227 net/ipv6/netfilter/nf_conntrack_reasm.c if (end > fq->q.len) { q 229 net/ipv6/netfilter/nf_conntrack_reasm.c if (fq->q.flags & INET_FRAG_LAST_IN) { q 233 net/ipv6/netfilter/nf_conntrack_reasm.c fq->q.len = end; q 255 net/ipv6/netfilter/nf_conntrack_reasm.c prev = fq->q.fragments_tail; q 256 net/ipv6/netfilter/nf_conntrack_reasm.c err = inet_frag_queue_insert(&fq->q, skb, offset, end); q 269 net/ipv6/netfilter/nf_conntrack_reasm.c fq->q.stamp = skb->tstamp; q 270 net/ipv6/netfilter/nf_conntrack_reasm.c fq->q.meat += skb->len; q 272 net/ipv6/netfilter/nf_conntrack_reasm.c if (payload_len > fq->q.max_size) q 273 net/ipv6/netfilter/nf_conntrack_reasm.c fq->q.max_size = payload_len; q 274 net/ipv6/netfilter/nf_conntrack_reasm.c add_frag_mem_limit(fq->q.fqdir, skb->truesize); q 281 net/ipv6/netfilter/nf_conntrack_reasm.c fq->q.flags |= INET_FRAG_FIRST_IN; q 284 net/ipv6/netfilter/nf_conntrack_reasm.c if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && q 285 net/ipv6/netfilter/nf_conntrack_reasm.c fq->q.meat == fq->q.len) { q 302 net/ipv6/netfilter/nf_conntrack_reasm.c inet_frag_kill(&fq->q); q 322 net/ipv6/netfilter/nf_conntrack_reasm.c inet_frag_kill(&fq->q); q 328 net/ipv6/netfilter/nf_conntrack_reasm.c reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail); q 333 net/ipv6/netfilter/nf_conntrack_reasm.c sizeof(struct ipv6hdr) + fq->q.len - q 351 net/ipv6/netfilter/nf_conntrack_reasm.c inet_frag_reasm_finish(&fq->q, skb, reasm_data, false); q 357 net/ipv6/netfilter/nf_conntrack_reasm.c IP6CB(skb)->frag_max_size = sizeof(struct ipv6hdr) + fq->q.max_size; q 365 net/ipv6/netfilter/nf_conntrack_reasm.c fq->q.rb_fragments = RB_ROOT; q 366 net/ipv6/netfilter/nf_conntrack_reasm.c fq->q.fragments_tail = NULL; q 367 net/ipv6/netfilter/nf_conntrack_reasm.c fq->q.last_run_head = NULL; q 372 net/ipv6/netfilter/nf_conntrack_reasm.c inet_frag_kill(&fq->q); q 472 net/ipv6/netfilter/nf_conntrack_reasm.c spin_lock_bh(&fq->q.lock); q 480 net/ipv6/netfilter/nf_conntrack_reasm.c spin_unlock_bh(&fq->q.lock); q 481 net/ipv6/netfilter/nf_conntrack_reasm.c inet_frag_put(&fq->q); q 76 net/ipv6/reassembly.c fq = container_of(frag, struct frag_queue, q); q 78 net/ipv6/reassembly.c ip6frag_expire_frag_queue(fq->q.fqdir->net, fq); q 91 net/ipv6/reassembly.c struct inet_frag_queue *q; q 97 net/ipv6/reassembly.c q = inet_frag_find(net->ipv6.fqdir, &key); q 98 net/ipv6/reassembly.c if (!q) q 101 net/ipv6/reassembly.c return container_of(q, struct frag_queue, q); q 115 net/ipv6/reassembly.c if (fq->q.flags & INET_FRAG_COMPLETE) q 145 net/ipv6/reassembly.c if (end < fq->q.len || q 146 net/ipv6/reassembly.c ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) q 148 net/ipv6/reassembly.c fq->q.flags |= INET_FRAG_LAST_IN; q 149 net/ipv6/reassembly.c fq->q.len = end; q 161 net/ipv6/reassembly.c if (end > fq->q.len) { q 163 net/ipv6/reassembly.c if (fq->q.flags & INET_FRAG_LAST_IN) q 165 net/ipv6/reassembly.c fq->q.len = end; q 186 net/ipv6/reassembly.c prev_tail = fq->q.fragments_tail; q 187 net/ipv6/reassembly.c err = inet_frag_queue_insert(&fq->q, skb, offset, end); q 194 net/ipv6/reassembly.c fq->q.stamp = skb->tstamp; q 195 net/ipv6/reassembly.c fq->q.meat += skb->len; q 197 net/ipv6/reassembly.c add_frag_mem_limit(fq->q.fqdir, skb->truesize); q 200 net/ipv6/reassembly.c if (fragsize > fq->q.max_size) q 201 net/ipv6/reassembly.c fq->q.max_size = fragsize; q 208 net/ipv6/reassembly.c fq->q.flags |= INET_FRAG_FIRST_IN; q 211 net/ipv6/reassembly.c if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && q 212 net/ipv6/reassembly.c fq->q.meat == fq->q.len) { q 233 net/ipv6/reassembly.c inet_frag_kill(&fq->q); q 251 net/ipv6/reassembly.c struct net *net = fq->q.fqdir->net; q 257 net/ipv6/reassembly.c inet_frag_kill(&fq->q); q 263 net/ipv6/reassembly.c reasm_data = inet_frag_reasm_prepare(&fq->q, skb, prev_tail); q 268 net/ipv6/reassembly.c sizeof(struct ipv6hdr) + fq->q.len - q 285 net/ipv6/reassembly.c inet_frag_reasm_finish(&fq->q, skb, reasm_data, true); q 292 net/ipv6/reassembly.c IP6CB(skb)->frag_max_size = fq->q.max_size; q 301 net/ipv6/reassembly.c fq->q.rb_fragments = RB_ROOT; q 302 net/ipv6/reassembly.c fq->q.fragments_tail = NULL; q 303 net/ipv6/reassembly.c fq->q.last_run_head = NULL; q 315 net/ipv6/reassembly.c inet_frag_kill(&fq->q); q 360 net/ipv6/reassembly.c spin_lock(&fq->q.lock); q 366 net/ipv6/reassembly.c spin_unlock(&fq->q.lock); q 367 net/ipv6/reassembly.c inet_frag_put(&fq->q); q 345 net/mac80211/debugfs.c int q, res = 0; q 348 net/mac80211/debugfs.c for (q = 0; q < local->hw.queues; q++) q 349 net/mac80211/debugfs.c res += sprintf(buf + res, "%02d: %#.8lx/%d\n", q, q 350 net/mac80211/debugfs.c local->queue_stop_reasons[q], q 351 net/mac80211/debugfs.c skb_queue_len(&local->pending[q])); q 75 net/mac80211/ethtool.c int i, q; q 157 net/mac80211/ethtool.c q = 0; q 160 net/mac80211/ethtool.c if (drv_get_survey(local, q, &survey) != 0) { q 164 net/mac80211/ethtool.c q++; q 1746 net/mac80211/mlme.c int q; q 1772 net/mac80211/mlme.c for (q = 0; q < local->hw.queues; q++) { q 1773 net/mac80211/mlme.c if (local->queue_stop_reasons[q]) { q 2674 net/mac80211/rx.c u16 ac, q, hdrlen; q 2747 net/mac80211/rx.c q = sdata->vif.hw_queue[ac]; q 2748 net/mac80211/rx.c if (ieee80211_queue_stopped(&local->hw, q)) { q 2752 net/mac80211/rx.c skb_set_queue_mapping(skb, q); q 1636 net/mac80211/tx.c int q = info->hw_queue; q 1639 net/mac80211/tx.c if (WARN_ON_ONCE(q >= local->hw.queues)) { q 1647 net/mac80211/tx.c if (local->queue_stop_reasons[q] || q 1648 net/mac80211/tx.c (!txpending && !skb_queue_empty(&local->pending[q]))) { q 1651 net/mac80211/tx.c if (local->queue_stop_reasons[q] & q 1675 net/mac80211/tx.c &local->pending[q]); q 1678 net/mac80211/tx.c &local->pending[q]); q 102 net/netfilter/nfnetlink_queue.c instance_lookup(struct nfnl_queue_net *q, u_int16_t queue_num) q 107 net/netfilter/nfnetlink_queue.c head = &q->instance_table[instance_hashfn(queue_num)]; q 116 net/netfilter/nfnetlink_queue.c instance_create(struct nfnl_queue_net *q, u_int16_t queue_num, u32 portid) q 122 net/netfilter/nfnetlink_queue.c spin_lock(&q->instances_lock); q 123 net/netfilter/nfnetlink_queue.c if (instance_lookup(q, queue_num)) { q 148 net/netfilter/nfnetlink_queue.c hlist_add_head_rcu(&inst->hlist, &q->instance_table[h]); q 150 net/netfilter/nfnetlink_queue.c spin_unlock(&q->instances_lock); q 157 net/netfilter/nfnetlink_queue.c spin_unlock(&q->instances_lock); q 183 net/netfilter/nfnetlink_queue.c instance_destroy(struct nfnl_queue_net *q, struct nfqnl_instance *inst) q 185 net/netfilter/nfnetlink_queue.c spin_lock(&q->instances_lock); q 187 net/netfilter/nfnetlink_queue.c spin_unlock(&q->instances_lock); q 784 net/netfilter/nfnetlink_queue.c struct nfnl_queue_net *q = nfnl_queue_pernet(net); q 787 net/netfilter/nfnetlink_queue.c queue = instance_lookup(q, queuenum); q 928 net/netfilter/nfnetlink_queue.c struct nfnl_queue_net *q = nfnl_queue_pernet(net); q 934 net/netfilter/nfnetlink_queue.c struct hlist_head *head = &q->instance_table[i]; q 961 net/netfilter/nfnetlink_queue.c struct nfnl_queue_net *q = nfnl_queue_pernet(net); q 966 net/netfilter/nfnetlink_queue.c struct hlist_head *head = &q->instance_table[i]; q 978 net/netfilter/nfnetlink_queue.c struct nfnl_queue_net *q = nfnl_queue_pernet(n->net); q 984 net/netfilter/nfnetlink_queue.c spin_lock(&q->instances_lock); q 988 net/netfilter/nfnetlink_queue.c struct hlist_head *head = &q->instance_table[i]; q 995 net/netfilter/nfnetlink_queue.c spin_unlock(&q->instances_lock); q 1024 net/netfilter/nfnetlink_queue.c verdict_instance_lookup(struct nfnl_queue_net *q, u16 queue_num, u32 nlportid) q 1028 net/netfilter/nfnetlink_queue.c queue = instance_lookup(q, queue_num); q 1072 net/netfilter/nfnetlink_queue.c struct nfnl_queue_net *q = nfnl_queue_pernet(net); q 1074 net/netfilter/nfnetlink_queue.c queue = verdict_instance_lookup(q, queue_num, q 1182 net/netfilter/nfnetlink_queue.c struct nfnl_queue_net *q = nfnl_queue_pernet(net); q 1185 net/netfilter/nfnetlink_queue.c queue = verdict_instance_lookup(q, queue_num, q 1263 net/netfilter/nfnetlink_queue.c struct nfnl_queue_net *q = nfnl_queue_pernet(net); q 1312 net/netfilter/nfnetlink_queue.c queue = instance_lookup(q, queue_num); q 1325 net/netfilter/nfnetlink_queue.c queue = instance_create(q, queue_num, q 1337 net/netfilter/nfnetlink_queue.c instance_destroy(q, queue); q 1412 net/netfilter/nfnetlink_queue.c struct nfnl_queue_net *q; q 1418 net/netfilter/nfnetlink_queue.c q = nfnl_queue_pernet(net); q 1420 net/netfilter/nfnetlink_queue.c if (!hlist_empty(&q->instance_table[st->bucket])) q 1421 net/netfilter/nfnetlink_queue.c return q->instance_table[st->bucket].first; q 1433 net/netfilter/nfnetlink_queue.c struct nfnl_queue_net *q; q 1438 net/netfilter/nfnetlink_queue.c q = nfnl_queue_pernet(net); q 1439 net/netfilter/nfnetlink_queue.c h = q->instance_table[st->bucket].first; q 1498 net/netfilter/nfnetlink_queue.c struct nfnl_queue_net *q = nfnl_queue_pernet(net); q 1501 net/netfilter/nfnetlink_queue.c INIT_HLIST_HEAD(&q->instance_table[i]); q 1503 net/netfilter/nfnetlink_queue.c spin_lock_init(&q->instances_lock); q 1516 net/netfilter/nfnetlink_queue.c struct nfnl_queue_net *q = nfnl_queue_pernet(net); q 1524 net/netfilter/nfnetlink_queue.c WARN_ON_ONCE(!hlist_empty(&q->instance_table[i])); q 29 net/netfilter/xt_quota.c struct xt_quota_info *q = (void *)par->matchinfo; q 30 net/netfilter/xt_quota.c struct xt_quota_priv *priv = q->master; q 31 net/netfilter/xt_quota.c bool ret = q->flags & XT_QUOTA_INVERT; q 48 net/netfilter/xt_quota.c struct xt_quota_info *q = par->matchinfo; q 50 net/netfilter/xt_quota.c if (q->flags & ~XT_QUOTA_MASK) q 53 net/netfilter/xt_quota.c q->master = kmalloc(sizeof(*q->master), GFP_KERNEL); q 54 net/netfilter/xt_quota.c if (q->master == NULL) q 57 net/netfilter/xt_quota.c spin_lock_init(&q->master->lock); q 58 net/netfilter/xt_quota.c q->master->quota = q->quota; q 64 net/netfilter/xt_quota.c const struct xt_quota_info *q = par->matchinfo; q 66 net/netfilter/xt_quota.c kfree(q->master); q 75 net/rds/message.c void rds_notify_msg_zcopy_purge(struct rds_msg_zcopy_queue *q) q 81 net/rds/message.c spin_lock_irqsave(&q->lock, flags); q 82 net/rds/message.c list_splice(&q->zcookie_head, ©); q 83 net/rds/message.c INIT_LIST_HEAD(&q->zcookie_head); q 84 net/rds/message.c spin_unlock_irqrestore(&q->lock, flags); q 96 net/rds/message.c struct rds_msg_zcopy_queue *q; q 103 net/rds/message.c q = &rs->rs_zcookie_queue; q 104 net/rds/message.c spin_lock_irqsave(&q->lock, flags); q 105 net/rds/message.c head = &q->zcookie_head; q 110 net/rds/message.c spin_unlock_irqrestore(&q->lock, flags); q 121 net/rds/message.c list_add_tail(&q->zcookie_head, &info->rs_zcookie_next); q 123 net/rds/message.c spin_unlock_irqrestore(&q->lock, flags); q 391 net/rds/rds.h static inline void rds_message_zcopy_queue_init(struct rds_msg_zcopy_queue *q) q 393 net/rds/rds.h spin_lock_init(&q->lock); q 394 net/rds/rds.h INIT_LIST_HEAD(&q->zcookie_head); q 599 net/rds/recv.c struct rds_msg_zcopy_queue *q = &rs->rs_zcookie_queue; q 611 net/rds/recv.c spin_lock_irqsave(&q->lock, flags); q 612 net/rds/recv.c if (!list_empty(&q->zcookie_head)) { q 613 net/rds/recv.c info = list_entry(q->zcookie_head.next, q 617 net/rds/recv.c spin_unlock_irqrestore(&q->lock, flags); q 623 net/rds/recv.c spin_lock_irqsave(&q->lock, flags); q 624 net/rds/recv.c list_add(&info->rs_zcookie_next, &q->zcookie_head); q 625 net/rds/recv.c spin_unlock_irqrestore(&q->lock, flags); q 101 net/rose/rose_in.c static int rose_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m) q 265 net/rose/rose_in.c int queued = 0, frametype, ns, nr, q, d, m; q 270 net/rose/rose_in.c frametype = rose_decode(skb, &ns, &nr, &q, &d, &m); q 280 net/rose/rose_in.c queued = rose_state3_machine(sk, skb, frametype, ns, nr, q, d, m); q 201 net/rose/rose_subr.c int rose_decode(struct sk_buff *skb, int *ns, int *nr, int *q, int *d, int *m) q 207 net/rose/rose_subr.c *ns = *nr = *q = *d = *m = 0; q 228 net/rose/rose_subr.c *q = (frame[0] & ROSE_Q_BIT) == ROSE_Q_BIT; q 912 net/rxrpc/rxkad.c u8 *p, *q, *name, *end; q 954 net/rxrpc/rxkad.c q = memchr(p, 0, end - p); \ q 955 net/rxrpc/rxkad.c if (!q || q - p > (field##_SZ)) \ q 957 net/rxrpc/rxkad.c for (; p < q; p++) \ q 729 net/sched/cls_api.c static int tcf_block_offload_bind(struct tcf_block *block, struct Qdisc *q, q 733 net/sched/cls_api.c struct net_device *dev = q->dev_queue->dev; q 772 net/sched/cls_api.c static void tcf_block_offload_unbind(struct tcf_block *block, struct Qdisc *q, q 775 net/sched/cls_api.c struct net_device *dev = q->dev_queue->dev; q 894 net/sched/cls_api.c static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, q 919 net/sched/cls_api.c block->q = q; q 1060 net/sched/cls_api.c static int __tcf_qdisc_find(struct net *net, struct Qdisc **q, q 1082 net/sched/cls_api.c *q = dev->qdisc; q 1083 net/sched/cls_api.c *parent = (*q)->handle; q 1085 net/sched/cls_api.c *q = qdisc_lookup_rcu(dev, TC_H_MAJ(*parent)); q 1086 net/sched/cls_api.c if (!*q) { q 1093 net/sched/cls_api.c *q = qdisc_refcount_inc_nz(*q); q 1094 net/sched/cls_api.c if (!*q) { q 1101 net/sched/cls_api.c cops = (*q)->ops->cl_ops; q 1127 net/sched/cls_api.c qdisc_put(*q); q 1129 net/sched/cls_api.c qdisc_put_unlocked(*q); q 1130 net/sched/cls_api.c *q = NULL; q 1135 net/sched/cls_api.c static int __tcf_qdisc_cl_find(struct Qdisc *q, u32 parent, unsigned long *cl, q 1143 net/sched/cls_api.c const struct Qdisc_class_ops *cops = q->ops->cl_ops; q 1145 net/sched/cls_api.c *cl = cops->find(q, parent); q 1155 net/sched/cls_api.c static struct tcf_block *__tcf_block_find(struct net *net, struct Qdisc *q, q 1169 net/sched/cls_api.c const struct Qdisc_class_ops *cops = q->ops->cl_ops; q 1171 net/sched/cls_api.c block = cops->tcf_block(q, cl, extack); q 1192 net/sched/cls_api.c static void __tcf_block_put(struct tcf_block *block, struct Qdisc *q, q 1208 net/sched/cls_api.c if (q) q 1209 net/sched/cls_api.c tcf_block_offload_unbind(block, q, ei); q 1215 net/sched/cls_api.c } else if (q) { q 1216 net/sched/cls_api.c tcf_block_offload_unbind(block, q, ei); q 1229 net/sched/cls_api.c static struct tcf_block *tcf_block_find(struct net *net, struct Qdisc **q, q 1239 net/sched/cls_api.c err = __tcf_qdisc_find(net, q, parent, ifindex, true, extack); q 1243 net/sched/cls_api.c err = __tcf_qdisc_cl_find(*q, *parent, cl, ifindex, extack); q 1247 net/sched/cls_api.c block = __tcf_block_find(net, *q, *cl, ifindex, block_index, extack); q 1256 net/sched/cls_api.c if (*q) q 1257 net/sched/cls_api.c qdisc_put(*q); q 1259 net/sched/cls_api.c *q = NULL; q 1263 net/sched/cls_api.c static void tcf_block_release(struct Qdisc *q, struct tcf_block *block, q 1269 net/sched/cls_api.c if (q) { q 1271 net/sched/cls_api.c qdisc_put(q); q 1273 net/sched/cls_api.c qdisc_put_unlocked(q); q 1279 net/sched/cls_api.c struct Qdisc *q; q 1285 net/sched/cls_api.c struct Qdisc *q, q 1291 net/sched/cls_api.c netif_keep_dst(qdisc_dev(q)); q 1300 net/sched/cls_api.c tcf_block_owner_netif_keep_dst(block, item->q, q 1306 net/sched/cls_api.c struct Qdisc *q, q 1314 net/sched/cls_api.c item->q = q; q 1321 net/sched/cls_api.c struct Qdisc *q, q 1327 net/sched/cls_api.c if (item->q == q && item->binder_type == binder_type) { q 1336 net/sched/cls_api.c int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, q 1340 net/sched/cls_api.c struct net *net = qdisc_net(q); q 1349 net/sched/cls_api.c block = tcf_block_create(net, q, ei->block_index, extack); q 1359 net/sched/cls_api.c err = tcf_block_owner_add(block, q, ei->binder_type); q 1363 net/sched/cls_api.c tcf_block_owner_netif_keep_dst(block, q, ei->binder_type); q 1369 net/sched/cls_api.c err = tcf_block_offload_bind(block, q, ei, extack); q 1379 net/sched/cls_api.c tcf_block_owner_del(block, q, ei->binder_type); q 1395 net/sched/cls_api.c struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q, q 1404 net/sched/cls_api.c return tcf_block_get_ext(p_block, q, &ei, extack); q 1411 net/sched/cls_api.c void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, q 1417 net/sched/cls_api.c tcf_block_owner_del(block, q, ei->binder_type); q 1419 net/sched/cls_api.c __tcf_block_put(block, q, ei, true); q 1429 net/sched/cls_api.c tcf_block_put_ext(block, block->q, &ei); q 1783 net/sched/cls_api.c struct Qdisc *q, u32 parent, void *fh, q 1798 net/sched/cls_api.c if (q) { q 1799 net/sched/cls_api.c tcm->tcm_ifindex = qdisc_dev(q)->ifindex; q 1828 net/sched/cls_api.c struct tcf_block *block, struct Qdisc *q, q 1840 net/sched/cls_api.c if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, q 1860 net/sched/cls_api.c struct tcf_block *block, struct Qdisc *q, q 1872 net/sched/cls_api.c if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid, q 1900 net/sched/cls_api.c struct tcf_block *block, struct Qdisc *q, q 1910 net/sched/cls_api.c q, parent, NULL, event, false, rtnl_held); q 1931 net/sched/cls_api.c struct Qdisc *q = NULL; q 1977 net/sched/cls_api.c err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); q 1992 net/sched/cls_api.c (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || q 1998 net/sched/cls_api.c err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); q 2002 net/sched/cls_api.c block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, q 2108 net/sched/cls_api.c tfilter_notify(net, skb, n, tp, block, q, parent, fh, q 2112 net/sched/cls_api.c if (q) q 2113 net/sched/cls_api.c q->flags &= ~TCQ_F_CAN_BYPASS; q 2126 net/sched/cls_api.c tcf_block_release(q, block, rtnl_held); q 2157 net/sched/cls_api.c struct Qdisc *q = NULL; q 2187 net/sched/cls_api.c err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); q 2201 net/sched/cls_api.c (q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || q 2207 net/sched/cls_api.c err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); q 2211 net/sched/cls_api.c block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, q 2239 net/sched/cls_api.c tfilter_notify_chain(net, skb, block, q, parent, n, q 2263 net/sched/cls_api.c tfilter_notify(net, skb, n, tp, block, q, parent, fh, q 2279 net/sched/cls_api.c q, parent, fh, false, &last, q 2294 net/sched/cls_api.c tcf_block_release(q, block, rtnl_held); q 2317 net/sched/cls_api.c struct Qdisc *q = NULL; q 2344 net/sched/cls_api.c err = __tcf_qdisc_find(net, &q, &parent, t->tcm_ifindex, false, extack); q 2357 net/sched/cls_api.c if ((q && !(q->ops->cl_ops->flags & QDISC_CLASS_OPS_DOIT_UNLOCKED)) || q 2363 net/sched/cls_api.c err = __tcf_qdisc_cl_find(q, parent, &cl, t->tcm_ifindex, extack); q 2367 net/sched/cls_api.c block = __tcf_block_find(net, q, cl, t->tcm_ifindex, t->tcm_block_index, q 2407 net/sched/cls_api.c err = tfilter_notify(net, skb, n, tp, block, q, parent, q 2420 net/sched/cls_api.c tcf_block_release(q, block, rtnl_held); q 2433 net/sched/cls_api.c struct Qdisc *q; q 2442 net/sched/cls_api.c return tcf_fill_node(net, a->skb, tp, a->block, a->q, a->parent, q 2448 net/sched/cls_api.c static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent, q 2476 net/sched/cls_api.c if (tcf_fill_node(net, skb, tp, block, q, parent, NULL, q 2489 net/sched/cls_api.c arg.q = q; q 2514 net/sched/cls_api.c struct Qdisc *q = NULL; q 2552 net/sched/cls_api.c q = dev->qdisc; q 2554 net/sched/cls_api.c q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); q 2555 net/sched/cls_api.c if (!q) q 2557 net/sched/cls_api.c cops = q->ops->cl_ops; q 2563 net/sched/cls_api.c cl = cops->find(q, tcm->tcm_parent); q 2567 net/sched/cls_api.c block = cops->tcf_block(q, cl, NULL); q 2572 net/sched/cls_api.c q = NULL; q 2586 net/sched/cls_api.c if (!tcf_chain_dump(chain, q, parent, skb, cb, q 2628 net/sched/cls_api.c if (block->q) { q 2629 net/sched/cls_api.c tcm->tcm_ifindex = qdisc_dev(block->q)->ifindex; q 2630 net/sched/cls_api.c tcm->tcm_parent = block->q->handle; q 2767 net/sched/cls_api.c struct Qdisc *q = NULL; q 2787 net/sched/cls_api.c block = tcf_block_find(net, &q, &parent, &cl, q 2858 net/sched/cls_api.c tfilter_notify_chain(net, skb, block, q, parent, n, q 2882 net/sched/cls_api.c tcf_block_release(q, block, true); q 2898 net/sched/cls_api.c struct Qdisc *q = NULL; q 2937 net/sched/cls_api.c q = dev->qdisc; q 2938 net/sched/cls_api.c parent = q->handle; q 2940 net/sched/cls_api.c q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent)); q 2942 net/sched/cls_api.c if (!q) q 2944 net/sched/cls_api.c cops = q->ops->cl_ops; q 2950 net/sched/cls_api.c cl = cops->find(q, tcm->tcm_parent); q 2954 net/sched/cls_api.c block = cops->tcf_block(q, cl, NULL); q 2958 net/sched/cls_api.c q = NULL; q 266 net/sched/cls_basic.c static void basic_bind_class(void *fh, u32 classid, unsigned long cl, void *q, q 273 net/sched/cls_basic.c __tcf_bind_filter(q, &f->res, base); q 275 net/sched/cls_basic.c __tcf_unbind_filter(q, &f->res); q 635 net/sched/cls_bpf.c void *q, unsigned long base) q 641 net/sched/cls_bpf.c __tcf_bind_filter(q, &prog->res, base); q 643 net/sched/cls_bpf.c __tcf_unbind_filter(q, &prog->res); q 503 net/sched/cls_flow.c struct Qdisc *q = tcf_block_q(tp->chain->block); q 505 net/sched/cls_flow.c baseclass = TC_H_MAKE(q->handle, baseclass); q 2516 net/sched/cls_flower.c static void fl_bind_class(void *fh, u32 classid, unsigned long cl, void *q, q 2523 net/sched/cls_flower.c __tcf_bind_filter(q, &f->res, base); q 2525 net/sched/cls_flower.c __tcf_unbind_filter(q, &f->res); q 75 net/sched/cls_fw.c struct Qdisc *q = tcf_block_q(tp->chain->block); q 79 net/sched/cls_fw.c !(TC_H_MAJ(id ^ q->handle)))) { q 422 net/sched/cls_fw.c static void fw_bind_class(void *fh, u32 classid, unsigned long cl, void *q, q 429 net/sched/cls_fw.c __tcf_bind_filter(q, &f->res, base); q 431 net/sched/cls_fw.c __tcf_unbind_filter(q, &f->res); q 397 net/sched/cls_matchall.c static void mall_bind_class(void *fh, u32 classid, unsigned long cl, void *q, q 404 net/sched/cls_matchall.c __tcf_bind_filter(q, &head->res, base); q 406 net/sched/cls_matchall.c __tcf_unbind_filter(q, &head->res); q 644 net/sched/cls_route.c static void route4_bind_class(void *fh, u32 classid, unsigned long cl, void *q, q 651 net/sched/cls_route.c __tcf_bind_filter(q, &f->res, base); q 653 net/sched/cls_route.c __tcf_unbind_filter(q, &f->res); q 739 net/sched/cls_rsvp.h static void rsvp_bind_class(void *fh, u32 classid, unsigned long cl, void *q, q 746 net/sched/cls_rsvp.h __tcf_bind_filter(q, &f->res, base); q 748 net/sched/cls_rsvp.h __tcf_unbind_filter(q, &f->res); q 114 net/sched/cls_tcindex.c struct Qdisc *q = tcf_block_q(tp->chain->block); q 118 net/sched/cls_tcindex.c res->classid = TC_H_MAKE(TC_H_MAJ(q->handle), key); q 693 net/sched/cls_tcindex.c void *q, unsigned long base) q 699 net/sched/cls_tcindex.c __tcf_bind_filter(q, &r->res, base); q 701 net/sched/cls_tcindex.c __tcf_unbind_filter(q, &r->res); q 332 net/sched/cls_u32.c return block->q; q 1258 net/sched/cls_u32.c static void u32_bind_class(void *fh, u32 classid, unsigned long cl, void *q, q 1265 net/sched/cls_u32.c __tcf_bind_filter(q, &n->res, base); q 1267 net/sched/cls_u32.c __tcf_unbind_filter(q, &n->res); q 130 net/sched/sch_api.c struct Qdisc_ops *q, **qp; q 134 net/sched/sch_api.c for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) q 135 net/sched/sch_api.c if (!strcmp(qops->id, q->id)) q 174 net/sched/sch_api.c struct Qdisc_ops *q, **qp; q 178 net/sched/sch_api.c for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) q 179 net/sched/sch_api.c if (q == qops) q 181 net/sched/sch_api.c if (q) { q 182 net/sched/sch_api.c *qp = q->next; q 183 net/sched/sch_api.c q->next = NULL; q 201 net/sched/sch_api.c struct Qdisc_ops *q = NULL; q 203 net/sched/sch_api.c for (q = qdisc_base; q; q = q->next) { q 204 net/sched/sch_api.c if (!strcmp(name, q->id)) { q 205 net/sched/sch_api.c if (!try_module_get(q->owner)) q 206 net/sched/sch_api.c q = NULL; q 211 net/sched/sch_api.c return q; q 259 net/sched/sch_api.c struct Qdisc *q; q 268 net/sched/sch_api.c hash_for_each_possible_rcu(qdisc_dev(root)->qdisc_hash, q, hash, handle) { q 269 net/sched/sch_api.c if (q->handle == handle) q 270 net/sched/sch_api.c return q; q 275 net/sched/sch_api.c void qdisc_hash_add(struct Qdisc *q, bool invisible) q 277 net/sched/sch_api.c if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { q 279 net/sched/sch_api.c hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle); q 281 net/sched/sch_api.c q->flags |= TCQ_F_INVISIBLE; q 286 net/sched/sch_api.c void qdisc_hash_del(struct Qdisc *q) q 288 net/sched/sch_api.c if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { q 290 net/sched/sch_api.c hash_del_rcu(&q->hash); q 297 net/sched/sch_api.c struct Qdisc *q; q 301 net/sched/sch_api.c q = qdisc_match_from_root(dev->qdisc, handle); q 302 net/sched/sch_api.c if (q) q 306 net/sched/sch_api.c q = qdisc_match_from_root( q 310 net/sched/sch_api.c return q; q 316 net/sched/sch_api.c struct Qdisc *q; q 320 net/sched/sch_api.c q = qdisc_match_from_root(dev->qdisc, handle); q 321 net/sched/sch_api.c if (q) q 326 net/sched/sch_api.c q = qdisc_match_from_root(nq->qdisc_sleeping, handle); q 328 net/sched/sch_api.c return q; q 349 net/sched/sch_api.c struct Qdisc_ops *q = NULL; q 353 net/sched/sch_api.c for (q = qdisc_base; q; q = q->next) { q 354 net/sched/sch_api.c if (nla_strcmp(kind, q->id) == 0) { q 355 net/sched/sch_api.c if (!try_module_get(q->owner)) q 356 net/sched/sch_api.c q = NULL; q 362 net/sched/sch_api.c return q; q 781 net/sched/sch_api.c notify = !sch->q.qlen && !WARN_ON_ONCE(!n && q 794 net/sched/sch_api.c sch->q.qlen -= n; q 867 net/sched/sch_api.c static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, q 888 net/sched/sch_api.c tcm->tcm_ifindex = qdisc_dev(q)->ifindex; q 890 net/sched/sch_api.c tcm->tcm_handle = q->handle; q 891 net/sched/sch_api.c tcm->tcm_info = refcount_read(&q->refcnt); q 892 net/sched/sch_api.c if (nla_put_string(skb, TCA_KIND, q->ops->id)) q 894 net/sched/sch_api.c if (q->ops->ingress_block_get) { q 895 net/sched/sch_api.c block_index = q->ops->ingress_block_get(q); q 900 net/sched/sch_api.c if (q->ops->egress_block_get) { q 901 net/sched/sch_api.c block_index = q->ops->egress_block_get(q); q 906 net/sched/sch_api.c if (q->ops->dump && q->ops->dump(q, skb) < 0) q 908 net/sched/sch_api.c if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED))) q 910 net/sched/sch_api.c qlen = qdisc_qlen_sum(q); q 912 net/sched/sch_api.c stab = rtnl_dereference(q->stab); q 920 net/sched/sch_api.c if (q->ops->dump_stats && q->ops->dump_stats(q, &d) < 0) q 923 net/sched/sch_api.c if (qdisc_is_percpu_stats(q)) { q 924 net/sched/sch_api.c cpu_bstats = q->cpu_bstats; q 925 net/sched/sch_api.c cpu_qstats = q->cpu_qstats; q 928 net/sched/sch_api.c if (gnet_stats_copy_basic(qdisc_root_sleeping_running(q), q 929 net/sched/sch_api.c &d, cpu_bstats, &q->bstats) < 0 || q 930 net/sched/sch_api.c gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 || q 931 net/sched/sch_api.c gnet_stats_copy_queue(&d, cpu_qstats, &q->qstats, qlen) < 0) q 946 net/sched/sch_api.c static bool tc_qdisc_dump_ignore(struct Qdisc *q, bool dump_invisible) q 948 net/sched/sch_api.c if (q->flags & TCQ_F_BUILTIN) q 950 net/sched/sch_api.c if ((q->flags & TCQ_F_INVISIBLE) && !dump_invisible) q 1025 net/sched/sch_api.c struct Qdisc *q = old; q 1033 net/sched/sch_api.c if ((q && q->flags & TCQ_F_INGRESS) || q 1358 net/sched/sch_api.c static int check_loop_fn(struct Qdisc *q, unsigned long cl, q 1361 net/sched/sch_api.c static int check_loop(struct Qdisc *q, struct Qdisc *p, int depth) q 1365 net/sched/sch_api.c if (q->ops->cl_ops == NULL) q 1372 net/sched/sch_api.c q->ops->cl_ops->walk(q, &arg.w); q 1377 net/sched/sch_api.c check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w) q 1380 net/sched/sch_api.c const struct Qdisc_class_ops *cops = q->ops->cl_ops; q 1383 net/sched/sch_api.c leaf = cops->leaf(q, cl); q 1415 net/sched/sch_api.c struct Qdisc *q = NULL; q 1441 net/sched/sch_api.c q = qdisc_leaf(p, clid); q 1443 net/sched/sch_api.c q = dev_ingress_queue(dev)->qdisc_sleeping; q 1446 net/sched/sch_api.c q = dev->qdisc; q 1448 net/sched/sch_api.c if (!q) { q 1453 net/sched/sch_api.c if (tcm->tcm_handle && q->handle != tcm->tcm_handle) { q 1458 net/sched/sch_api.c q = qdisc_lookup(dev, tcm->tcm_handle); q 1459 net/sched/sch_api.c if (!q) { q 1465 net/sched/sch_api.c if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) { q 1475 net/sched/sch_api.c if (q->handle == 0) { q 1479 net/sched/sch_api.c err = qdisc_graft(dev, p, skb, n, clid, NULL, q, extack); q 1483 net/sched/sch_api.c qdisc_notify(net, skb, n, clid, NULL, q); q 1500 net/sched/sch_api.c struct Qdisc *q, *p; q 1515 net/sched/sch_api.c q = p = NULL; q 1530 net/sched/sch_api.c q = qdisc_leaf(p, clid); q 1532 net/sched/sch_api.c q = dev_ingress_queue(dev)->qdisc_sleeping; q 1535 net/sched/sch_api.c q = dev->qdisc; q 1539 net/sched/sch_api.c if (q && q->handle == 0) q 1540 net/sched/sch_api.c q = NULL; q 1542 net/sched/sch_api.c if (!q || !tcm->tcm_handle || q->handle != tcm->tcm_handle) { q 1544 net/sched/sch_api.c if (q && !(n->nlmsg_flags & NLM_F_REPLACE)) { q 1552 net/sched/sch_api.c q = qdisc_lookup(dev, tcm->tcm_handle); q 1553 net/sched/sch_api.c if (!q) q 1560 net/sched/sch_api.c nla_strcmp(tca[TCA_KIND], q->ops->id)) { q 1564 net/sched/sch_api.c if (q == p || q 1565 net/sched/sch_api.c (p && check_loop(q, p, 0))) { q 1569 net/sched/sch_api.c qdisc_refcount_inc(q); q 1572 net/sched/sch_api.c if (!q) q 1598 net/sched/sch_api.c nla_strcmp(tca[TCA_KIND], q->ops->id)))) q 1607 net/sched/sch_api.c q = qdisc_lookup(dev, tcm->tcm_handle); q 1611 net/sched/sch_api.c if (!q) { q 1619 net/sched/sch_api.c if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) { q 1623 net/sched/sch_api.c err = qdisc_change(q, tca, extack); q 1625 net/sched/sch_api.c qdisc_notify(net, skb, n, clid, NULL, q); q 1635 net/sched/sch_api.c q = qdisc_create(dev, dev_ingress_queue(dev), p, q 1652 net/sched/sch_api.c q = qdisc_create(dev, dev_queue, p, q 1656 net/sched/sch_api.c if (q == NULL) { q 1663 net/sched/sch_api.c err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack); q 1665 net/sched/sch_api.c if (q) q 1666 net/sched/sch_api.c qdisc_put(q); q 1679 net/sched/sch_api.c struct Qdisc *q; q 1685 net/sched/sch_api.c q = root; q 1689 net/sched/sch_api.c if (!tc_qdisc_dump_ignore(q, dump_invisible) && q 1690 net/sched/sch_api.c tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid, q 1706 net/sched/sch_api.c hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) { q 1711 net/sched/sch_api.c if (!tc_qdisc_dump_ignore(q, dump_invisible) && q 1712 net/sched/sch_api.c tc_fill_qdisc(skb, q, q->parent, NETLINK_CB(cb->skb).portid, q 1785 net/sched/sch_api.c static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q, q 1793 net/sched/sch_api.c const struct Qdisc_class_ops *cl_ops = q->ops->cl_ops; q 1803 net/sched/sch_api.c tcm->tcm_ifindex = qdisc_dev(q)->ifindex; q 1804 net/sched/sch_api.c tcm->tcm_parent = q->handle; q 1805 net/sched/sch_api.c tcm->tcm_handle = q->handle; q 1807 net/sched/sch_api.c if (nla_put_string(skb, TCA_KIND, q->ops->id)) q 1809 net/sched/sch_api.c if (cl_ops->dump && cl_ops->dump(q, cl, skb, tcm) < 0) q 1816 net/sched/sch_api.c if (cl_ops->dump_stats && cl_ops->dump_stats(q, cl, &d) < 0) q 1832 net/sched/sch_api.c struct nlmsghdr *n, struct Qdisc *q, q 1843 net/sched/sch_api.c if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, event) < 0) { q 1858 net/sched/sch_api.c struct Qdisc *q, unsigned long cl) q 1871 net/sched/sch_api.c if (tc_fill_tclass(skb, q, cl, portid, n->nlmsg_seq, 0, q 1877 net/sched/sch_api.c err = cops->delete(q, cl); q 1904 net/sched/sch_api.c struct Qdisc *q = tcf_block_q(tp->chain->block); q 1906 net/sched/sch_api.c sch_tree_lock(q); q 1907 net/sched/sch_api.c tp->ops->bind_class(n, a->classid, a->cl, q, a->base); q 1908 net/sched/sch_api.c sch_tree_unlock(q); q 1920 net/sched/sch_api.c static int tc_bind_class_walker(struct Qdisc *q, unsigned long cl, q 1924 net/sched/sch_api.c const struct Qdisc_class_ops *cops = q->ops->cl_ops; q 1928 net/sched/sch_api.c block = cops->tcf_block(q, cl, NULL); q 1951 net/sched/sch_api.c static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid, q 1954 net/sched/sch_api.c const struct Qdisc_class_ops *cops = q->ops->cl_ops; q 1963 net/sched/sch_api.c q->ops->cl_ops->walk(q, &args.w); q 1968 net/sched/sch_api.c static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid, q 1982 net/sched/sch_api.c struct Qdisc *q = NULL; q 2048 net/sched/sch_api.c q = qdisc_lookup(dev, qid); q 2049 net/sched/sch_api.c if (!q) q 2053 net/sched/sch_api.c cops = q->ops->cl_ops; q 2065 net/sched/sch_api.c cl = cops->find(q, clid); q 2080 net/sched/sch_api.c err = tclass_del_notify(net, cops, skb, n, q, cl); q 2082 net/sched/sch_api.c tc_bind_tclass(q, portid, clid, 0); q 2085 net/sched/sch_api.c err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS); q 2101 net/sched/sch_api.c err = cops->change(q, clid, portid, tca, &new_cl, extack); q 2103 net/sched/sch_api.c tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS); q 2106 net/sched/sch_api.c tc_bind_tclass(q, portid, clid, new_cl); q 2118 net/sched/sch_api.c static int qdisc_class_dump(struct Qdisc *q, unsigned long cl, q 2123 net/sched/sch_api.c return tc_fill_tclass(a->skb, q, cl, NETLINK_CB(a->cb->skb).portid, q 2128 net/sched/sch_api.c static int tc_dump_tclass_qdisc(struct Qdisc *q, struct sk_buff *skb, q 2134 net/sched/sch_api.c if (tc_qdisc_dump_ignore(q, false) || q 2135 net/sched/sch_api.c *t_p < s_t || !q->ops->cl_ops || q 2137 net/sched/sch_api.c TC_H_MAJ(tcm->tcm_parent) != q->handle)) { q 2149 net/sched/sch_api.c q->ops->cl_ops->walk(q, &arg.w); q 2161 net/sched/sch_api.c struct Qdisc *q; q 2174 net/sched/sch_api.c q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent)); q 2175 net/sched/sch_api.c if (q && q != root && q 2176 net/sched/sch_api.c tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0) q 2180 net/sched/sch_api.c hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) { q 2181 net/sched/sch_api.c if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0) q 46 net/sched/sch_atm.c struct Qdisc *q; /* FIFO, TBF, etc. */ q 98 net/sched/sch_atm.c *old = flow->q; q 99 net/sched/sch_atm.c flow->q = new; q 110 net/sched/sch_atm.c return flow ? flow->q : NULL; q 153 net/sched/sch_atm.c pr_debug("atm_tc_put: qdisc %p\n", flow->q); q 154 net/sched/sch_atm.c qdisc_put(flow->q); q 295 net/sched/sch_atm.c flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid, q 297 net/sched/sch_atm.c if (!flow->q) q 298 net/sched/sch_atm.c flow->q = &noop_qdisc; q 299 net/sched/sch_atm.c pr_debug("atm_tc_change: qdisc %p\n", flow->q); q 435 net/sched/sch_atm.c ret = qdisc_enqueue(skb, flow->q, to_free); q 455 net/sched/sch_atm.c sch->q.qlen++; q 484 net/sched/sch_atm.c while ((skb = flow->q->ops->peek(flow->q))) { q 488 net/sched/sch_atm.c skb = qdisc_dequeue_peeked(flow->q); q 526 net/sched/sch_atm.c skb = qdisc_dequeue_peeked(p->link.q); q 528 net/sched/sch_atm.c sch->q.qlen--; q 538 net/sched/sch_atm.c return p->link.q->ops->peek(p->link.q); q 551 net/sched/sch_atm.c p->link.q = qdisc_create_dflt(sch->dev_queue, q 553 net/sched/sch_atm.c if (!p->link.q) q 554 net/sched/sch_atm.c p->link.q = &noop_qdisc; q 555 net/sched/sch_atm.c pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q); q 577 net/sched/sch_atm.c qdisc_reset(flow->q); q 578 net/sched/sch_atm.c sch->q.qlen = 0; q 612 net/sched/sch_atm.c tcm->tcm_info = flow->q->handle; q 656 net/sched/sch_atm.c gnet_stats_copy_queue(d, NULL, &flow->qstats, flow->q->q.qlen) < 0) q 625 net/sched/sch_cake.c static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, q 706 net/sched/sch_cake.c if (likely(q->tags[reduced_hash] == flow_hash && q 707 net/sched/sch_cake.c q->flows[reduced_hash].set)) { q 708 net/sched/sch_cake.c q->way_directs++; q 721 net/sched/sch_cake.c if (q->tags[outer_hash + k] == flow_hash) { q 723 net/sched/sch_cake.c q->way_hits++; q 725 net/sched/sch_cake.c if (!q->flows[outer_hash + k].set) { q 740 net/sched/sch_cake.c if (!q->flows[outer_hash + k].set) { q 741 net/sched/sch_cake.c q->way_misses++; q 751 net/sched/sch_cake.c q->way_collisions++; q 752 net/sched/sch_cake.c if (q->flows[outer_hash + k].set == CAKE_SET_BULK) { q 753 net/sched/sch_cake.c q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--; q 754 net/sched/sch_cake.c q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--; q 761 net/sched/sch_cake.c q->tags[reduced_hash] = flow_hash; q 769 net/sched/sch_cake.c if (q->hosts[outer_hash + k].srchost_tag == q 775 net/sched/sch_cake.c if (!q->hosts[outer_hash + k].srchost_bulk_flow_count) q 778 net/sched/sch_cake.c q->hosts[outer_hash + k].srchost_tag = srchost_hash; q 781 net/sched/sch_cake.c if (q->flows[reduced_hash].set == CAKE_SET_BULK) q 782 net/sched/sch_cake.c q->hosts[srchost_idx].srchost_bulk_flow_count++; q 783 net/sched/sch_cake.c q->flows[reduced_hash].srchost = srchost_idx; q 792 net/sched/sch_cake.c if (q->hosts[outer_hash + k].dsthost_tag == q 798 net/sched/sch_cake.c if (!q->hosts[outer_hash + k].dsthost_bulk_flow_count) q 801 net/sched/sch_cake.c q->hosts[outer_hash + k].dsthost_tag = dsthost_hash; q 804 net/sched/sch_cake.c if (q->flows[reduced_hash].set == CAKE_SET_BULK) q 805 net/sched/sch_cake.c q->hosts[dsthost_idx].dsthost_bulk_flow_count++; q 806 net/sched/sch_cake.c q->flows[reduced_hash].dsthost = dsthost_idx; q 1111 net/sched/sch_cake.c static struct sk_buff *cake_ack_filter(struct cake_sched_data *q, q 1114 net/sched/sch_cake.c bool aggressive = q->ack_filter == CAKE_ACK_AGGRESSIVE; q 1274 net/sched/sch_cake.c static u32 cake_calc_overhead(struct cake_sched_data *q, u32 len, u32 off) q 1276 net/sched/sch_cake.c if (q->rate_flags & CAKE_FLAG_OVERHEAD) q 1279 net/sched/sch_cake.c if (q->max_netlen < len) q 1280 net/sched/sch_cake.c q->max_netlen = len; q 1281 net/sched/sch_cake.c if (q->min_netlen > len) q 1282 net/sched/sch_cake.c q->min_netlen = len; q 1284 net/sched/sch_cake.c len += q->rate_overhead; q 1286 net/sched/sch_cake.c if (len < q->rate_mpu) q 1287 net/sched/sch_cake.c len = q->rate_mpu; q 1289 net/sched/sch_cake.c if (q->atm_mode == CAKE_ATM_ATM) { q 1293 net/sched/sch_cake.c } else if (q->atm_mode == CAKE_ATM_PTM) { q 1301 net/sched/sch_cake.c if (q->max_adjlen < len) q 1302 net/sched/sch_cake.c q->max_adjlen = len; q 1303 net/sched/sch_cake.c if (q->min_adjlen > len) q 1304 net/sched/sch_cake.c q->min_adjlen = len; q 1309 net/sched/sch_cake.c static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb) q 1317 net/sched/sch_cake.c q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8); q 1320 net/sched/sch_cake.c return cake_calc_overhead(q, len, off); q 1352 net/sched/sch_cake.c return (cake_calc_overhead(q, len, off) * (segs - 1) + q 1353 net/sched/sch_cake.c cake_calc_overhead(q, last_len, off)); q 1356 net/sched/sch_cake.c static void cake_heap_swap(struct cake_sched_data *q, u16 i, u16 j) q 1358 net/sched/sch_cake.c struct cake_heap_entry ii = q->overflow_heap[i]; q 1359 net/sched/sch_cake.c struct cake_heap_entry jj = q->overflow_heap[j]; q 1361 net/sched/sch_cake.c q->overflow_heap[i] = jj; q 1362 net/sched/sch_cake.c q->overflow_heap[j] = ii; q 1364 net/sched/sch_cake.c q->tins[ii.t].overflow_idx[ii.b] = j; q 1365 net/sched/sch_cake.c q->tins[jj.t].overflow_idx[jj.b] = i; q 1368 net/sched/sch_cake.c static u32 cake_heap_get_backlog(const struct cake_sched_data *q, u16 i) q 1370 net/sched/sch_cake.c struct cake_heap_entry ii = q->overflow_heap[i]; q 1372 net/sched/sch_cake.c return q->tins[ii.t].backlogs[ii.b]; q 1375 net/sched/sch_cake.c static void cake_heapify(struct cake_sched_data *q, u16 i) q 1378 net/sched/sch_cake.c u32 mb = cake_heap_get_backlog(q, i); q 1386 net/sched/sch_cake.c u32 lb = cake_heap_get_backlog(q, l); q 1395 net/sched/sch_cake.c u32 rb = cake_heap_get_backlog(q, r); q 1404 net/sched/sch_cake.c cake_heap_swap(q, i, m); q 1412 net/sched/sch_cake.c static void cake_heapify_up(struct cake_sched_data *q, u16 i) q 1416 net/sched/sch_cake.c u32 ib = cake_heap_get_backlog(q, i); q 1417 net/sched/sch_cake.c u32 pb = cake_heap_get_backlog(q, p); q 1420 net/sched/sch_cake.c cake_heap_swap(q, i, p); q 1428 net/sched/sch_cake.c static int cake_advance_shaper(struct cake_sched_data *q, q 1438 net/sched/sch_cake.c if (q->rate_ns) { q 1440 net/sched/sch_cake.c u64 global_dur = (len * q->rate_ns) >> q->rate_shft; q 1451 net/sched/sch_cake.c q->time_next_packet = ktime_add_ns(q->time_next_packet, q 1454 net/sched/sch_cake.c q->failsafe_next_packet = \ q 1455 net/sched/sch_cake.c ktime_add_ns(q->failsafe_next_packet, q 1463 net/sched/sch_cake.c struct cake_sched_data *q = qdisc_priv(sch); q 1471 net/sched/sch_cake.c if (!q->overflow_timeout) { q 1475 net/sched/sch_cake.c cake_heapify(q, i); q 1477 net/sched/sch_cake.c q->overflow_timeout = 65535; q 1480 net/sched/sch_cake.c qq = q->overflow_heap[0]; q 1484 net/sched/sch_cake.c b = &q->tins[tin]; q 1489 net/sched/sch_cake.c q->overflow_timeout = 0; q 1497 net/sched/sch_cake.c q->buffer_used -= skb->truesize; q 1507 net/sched/sch_cake.c if (q->rate_flags & CAKE_FLAG_INGRESS) q 1508 net/sched/sch_cake.c cake_advance_shaper(q, b, skb, now, true); q 1511 net/sched/sch_cake.c sch->q.qlen--; q 1513 net/sched/sch_cake.c cake_heapify(q, 0); q 1558 net/sched/sch_cake.c struct cake_sched_data *q = qdisc_priv(sch); q 1566 net/sched/sch_cake.c q->rate_flags & CAKE_FLAG_WASH); q 1567 net/sched/sch_cake.c mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft; q 1569 net/sched/sch_cake.c if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT) q 1572 net/sched/sch_cake.c else if (mark && mark <= q->tin_cnt) q 1573 net/sched/sch_cake.c tin = q->tin_order[mark - 1]; q 1577 net/sched/sch_cake.c TC_H_MIN(skb->priority) <= q->tin_cnt) q 1578 net/sched/sch_cake.c tin = q->tin_order[TC_H_MIN(skb->priority) - 1]; q 1581 net/sched/sch_cake.c tin = q->tin_index[dscp]; q 1583 net/sched/sch_cake.c if (unlikely(tin >= q->tin_cnt)) q 1587 net/sched/sch_cake.c return &q->tins[tin]; q 1593 net/sched/sch_cake.c struct cake_sched_data *q = qdisc_priv(sch); q 1599 net/sched/sch_cake.c filter = rcu_dereference_bh(q->filter_list); q 1633 net/sched/sch_cake.c struct cake_sched_data *q = qdisc_priv(sch); q 1643 net/sched/sch_cake.c idx = cake_classify(sch, &b, skb, q->flow_mode, &ret); q 1658 net/sched/sch_cake.c if (!sch->q.qlen) { q 1659 net/sched/sch_cake.c if (ktime_before(q->time_next_packet, now)) { q 1660 net/sched/sch_cake.c q->failsafe_next_packet = now; q 1661 net/sched/sch_cake.c q->time_next_packet = now; q 1662 net/sched/sch_cake.c } else if (ktime_after(q->time_next_packet, now) && q 1663 net/sched/sch_cake.c ktime_after(q->failsafe_next_packet, now)) { q 1665 net/sched/sch_cake.c min(ktime_to_ns(q->time_next_packet), q 1667 net/sched/sch_cake.c q->failsafe_next_packet)); q 1669 net/sched/sch_cake.c qdisc_watchdog_schedule_ns(&q->watchdog, next); q 1677 net/sched/sch_cake.c if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) { q 1691 net/sched/sch_cake.c get_cobalt_cb(segs)->adjusted_len = cake_overhead(q, q 1695 net/sched/sch_cake.c sch->q.qlen++; q 1698 net/sched/sch_cake.c q->buffer_used += segs->truesize; q 1708 net/sched/sch_cake.c q->avg_window_bytes += slen; q 1715 net/sched/sch_cake.c get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb); q 1718 net/sched/sch_cake.c if (q->ack_filter) q 1719 net/sched/sch_cake.c ack = cake_ack_filter(q, flow); q 1726 net/sched/sch_cake.c q->buffer_used += skb->truesize - ack->truesize; q 1727 net/sched/sch_cake.c if (q->rate_flags & CAKE_FLAG_INGRESS) q 1728 net/sched/sch_cake.c cake_advance_shaper(q, b, ack, now, true); q 1733 net/sched/sch_cake.c sch->q.qlen++; q 1734 net/sched/sch_cake.c q->buffer_used += skb->truesize; q 1743 net/sched/sch_cake.c q->avg_window_bytes += len; q 1746 net/sched/sch_cake.c if (q->overflow_timeout) q 1747 net/sched/sch_cake.c cake_heapify_up(q, b->overflow_idx[idx]); q 1750 net/sched/sch_cake.c if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) { q 1752 net/sched/sch_cake.c ktime_to_ns(ktime_sub(now, q->last_packet_time)); q 1758 net/sched/sch_cake.c q->avg_packet_interval = \ q 1759 net/sched/sch_cake.c cake_ewma(q->avg_packet_interval, q 1761 net/sched/sch_cake.c (packet_interval > q->avg_packet_interval ? q 1764 net/sched/sch_cake.c q->last_packet_time = now; q 1766 net/sched/sch_cake.c if (packet_interval > q->avg_packet_interval) { q 1769 net/sched/sch_cake.c q->avg_window_begin)); q 1770 net/sched/sch_cake.c u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC; q 1773 net/sched/sch_cake.c q->avg_peak_bandwidth = q 1774 net/sched/sch_cake.c cake_ewma(q->avg_peak_bandwidth, b, q 1775 net/sched/sch_cake.c b > q->avg_peak_bandwidth ? 2 : 8); q 1776 net/sched/sch_cake.c q->avg_window_bytes = 0; q 1777 net/sched/sch_cake.c q->avg_window_begin = now; q 1780 net/sched/sch_cake.c ktime_add_ms(q->last_reconfig_time, q 1782 net/sched/sch_cake.c q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4; q 1787 net/sched/sch_cake.c q->avg_window_bytes = 0; q 1788 net/sched/sch_cake.c q->last_packet_time = now; q 1806 net/sched/sch_cake.c if (cake_dsrc(q->flow_mode)) q 1809 net/sched/sch_cake.c if (cake_ddst(q->flow_mode)) q 1825 net/sched/sch_cake.c if (cake_dsrc(q->flow_mode)) q 1828 net/sched/sch_cake.c if (cake_ddst(q->flow_mode)) q 1833 net/sched/sch_cake.c if (q->buffer_used > q->buffer_max_used) q 1834 net/sched/sch_cake.c q->buffer_max_used = q->buffer_used; q 1836 net/sched/sch_cake.c if (q->buffer_used > q->buffer_limit) { q 1839 net/sched/sch_cake.c while (q->buffer_used > q->buffer_limit) { q 1850 net/sched/sch_cake.c struct cake_sched_data *q = qdisc_priv(sch); q 1851 net/sched/sch_cake.c struct cake_tin_data *b = &q->tins[q->cur_tin]; q 1852 net/sched/sch_cake.c struct cake_flow *flow = &b->flows[q->cur_flow]; q 1859 net/sched/sch_cake.c b->backlogs[q->cur_flow] -= len; q 1862 net/sched/sch_cake.c q->buffer_used -= skb->truesize; q 1863 net/sched/sch_cake.c sch->q.qlen--; q 1865 net/sched/sch_cake.c if (q->overflow_timeout) q 1866 net/sched/sch_cake.c cake_heapify(q, b->overflow_idx[q->cur_flow]); q 1874 net/sched/sch_cake.c struct cake_sched_data *q = qdisc_priv(sch); q 1877 net/sched/sch_cake.c q->cur_tin = tin; q 1878 net/sched/sch_cake.c for (q->cur_flow = 0; q->cur_flow < CAKE_QUEUES; q->cur_flow++) q 1885 net/sched/sch_cake.c struct cake_sched_data *q = qdisc_priv(sch); q 1886 net/sched/sch_cake.c struct cake_tin_data *b = &q->tins[q->cur_tin]; q 1898 net/sched/sch_cake.c if (!sch->q.qlen) q 1902 net/sched/sch_cake.c if (ktime_after(q->time_next_packet, now) && q 1903 net/sched/sch_cake.c ktime_after(q->failsafe_next_packet, now)) { q 1904 net/sched/sch_cake.c u64 next = min(ktime_to_ns(q->time_next_packet), q 1905 net/sched/sch_cake.c ktime_to_ns(q->failsafe_next_packet)); q 1908 net/sched/sch_cake.c qdisc_watchdog_schedule_ns(&q->watchdog, next); q 1913 net/sched/sch_cake.c if (!q->rate_ns) { q 1926 net/sched/sch_cake.c q->cur_tin++; q 1928 net/sched/sch_cake.c if (q->cur_tin >= q->tin_cnt) { q 1929 net/sched/sch_cake.c q->cur_tin = 0; q 1930 net/sched/sch_cake.c b = q->tins; q 1952 net/sched/sch_cake.c for (tin = 0; tin < q->tin_cnt; tin++) { q 1953 net/sched/sch_cake.c b = q->tins + tin; q 1967 net/sched/sch_cake.c q->cur_tin = best_tin; q 1968 net/sched/sch_cake.c b = q->tins + best_tin; q 1990 net/sched/sch_cake.c q->cur_flow = flow - b->flows; q 2009 net/sched/sch_cake.c if (cake_dsrc(q->flow_mode)) q 2012 net/sched/sch_cake.c if (cake_ddst(q->flow_mode)) q 2025 net/sched/sch_cake.c if (cake_dsrc(q->flow_mode)) q 2028 net/sched/sch_cake.c if (cake_ddst(q->flow_mode)) q 2061 net/sched/sch_cake.c if (cake_dsrc(q->flow_mode)) q 2064 net/sched/sch_cake.c if (cake_ddst(q->flow_mode)) q 2083 net/sched/sch_cake.c if (cake_dsrc(q->flow_mode)) q 2086 net/sched/sch_cake.c if (cake_ddst(q->flow_mode)) q 2100 net/sched/sch_cake.c !!(q->rate_flags & q 2106 net/sched/sch_cake.c if (q->rate_flags & CAKE_FLAG_INGRESS) { q 2107 net/sched/sch_cake.c len = cake_advance_shaper(q, b, skb, q 2117 net/sched/sch_cake.c if (q->rate_flags & CAKE_FLAG_INGRESS) q 2132 net/sched/sch_cake.c len = cake_advance_shaper(q, b, skb, now, false); q 2136 net/sched/sch_cake.c if (ktime_after(q->time_next_packet, now) && sch->q.qlen) { q 2137 net/sched/sch_cake.c u64 next = min(ktime_to_ns(q->time_next_packet), q 2138 net/sched/sch_cake.c ktime_to_ns(q->failsafe_next_packet)); q 2140 net/sched/sch_cake.c qdisc_watchdog_schedule_ns(&q->watchdog, next); q 2141 net/sched/sch_cake.c } else if (!sch->q.qlen) { q 2144 net/sched/sch_cake.c for (i = 0; i < q->tin_cnt; i++) { q 2145 net/sched/sch_cake.c if (q->tins[i].decaying_flow_count) { q 2148 net/sched/sch_cake.c q->tins[i].cparams.target); q 2150 net/sched/sch_cake.c qdisc_watchdog_schedule_ns(&q->watchdog, q 2157 net/sched/sch_cake.c if (q->overflow_timeout) q 2158 net/sched/sch_cake.c q->overflow_timeout--; q 2232 net/sched/sch_cake.c struct cake_sched_data *q = qdisc_priv(sch); q 2233 net/sched/sch_cake.c struct cake_tin_data *b = &q->tins[0]; q 2235 net/sched/sch_cake.c u64 rate = q->rate_bps; q 2237 net/sched/sch_cake.c q->tin_cnt = 1; q 2239 net/sched/sch_cake.c q->tin_index = besteffort; q 2240 net/sched/sch_cake.c q->tin_order = normal_order; q 2243 net/sched/sch_cake.c us_to_ns(q->target), us_to_ns(q->interval)); q 2253 net/sched/sch_cake.c struct cake_sched_data *q = qdisc_priv(sch); q 2255 net/sched/sch_cake.c u64 rate = q->rate_bps; q 2260 net/sched/sch_cake.c q->tin_cnt = 8; q 2261 net/sched/sch_cake.c q->tin_index = precedence; q 2262 net/sched/sch_cake.c q->tin_order = normal_order; q 2264 net/sched/sch_cake.c for (i = 0; i < q->tin_cnt; i++) { q 2265 net/sched/sch_cake.c struct cake_tin_data *b = &q->tins[i]; q 2267 net/sched/sch_cake.c cake_set_rate(b, rate, mtu, us_to_ns(q->target), q 2268 net/sched/sch_cake.c us_to_ns(q->interval)); q 2347 net/sched/sch_cake.c struct cake_sched_data *q = qdisc_priv(sch); q 2349 net/sched/sch_cake.c u64 rate = q->rate_bps; q 2354 net/sched/sch_cake.c q->tin_cnt = 8; q 2357 net/sched/sch_cake.c q->tin_index = diffserv8; q 2358 net/sched/sch_cake.c q->tin_order = normal_order; q 2361 net/sched/sch_cake.c for (i = 0; i < q->tin_cnt; i++) { q 2362 net/sched/sch_cake.c struct cake_tin_data *b = &q->tins[i]; q 2364 net/sched/sch_cake.c cake_set_rate(b, rate, mtu, us_to_ns(q->target), q 2365 net/sched/sch_cake.c us_to_ns(q->interval)); q 2396 net/sched/sch_cake.c struct cake_sched_data *q = qdisc_priv(sch); q 2398 net/sched/sch_cake.c u64 rate = q->rate_bps; q 2401 net/sched/sch_cake.c q->tin_cnt = 4; q 2404 net/sched/sch_cake.c q->tin_index = diffserv4; q 2405 net/sched/sch_cake.c q->tin_order = bulk_order; q 2408 net/sched/sch_cake.c cake_set_rate(&q->tins[0], rate, mtu, q 2409 net/sched/sch_cake.c us_to_ns(q->target), us_to_ns(q->interval)); q 2410 net/sched/sch_cake.c cake_set_rate(&q->tins[1], rate >> 4, mtu, q 2411 net/sched/sch_cake.c us_to_ns(q->target), us_to_ns(q->interval)); q 2412 net/sched/sch_cake.c cake_set_rate(&q->tins[2], rate >> 1, mtu, q 2413 net/sched/sch_cake.c us_to_ns(q->target), us_to_ns(q->interval)); q 2414 net/sched/sch_cake.c cake_set_rate(&q->tins[3], rate >> 2, mtu, q 2415 net/sched/sch_cake.c us_to_ns(q->target), us_to_ns(q->interval)); q 2418 net/sched/sch_cake.c q->tins[0].tin_quantum_prio = quantum; q 2419 net/sched/sch_cake.c q->tins[1].tin_quantum_prio = quantum >> 4; q 2420 net/sched/sch_cake.c q->tins[2].tin_quantum_prio = quantum << 2; q 2421 net/sched/sch_cake.c q->tins[3].tin_quantum_prio = quantum << 4; q 2424 net/sched/sch_cake.c q->tins[0].tin_quantum_band = quantum; q 2425 net/sched/sch_cake.c q->tins[1].tin_quantum_band = quantum >> 4; q 2426 net/sched/sch_cake.c q->tins[2].tin_quantum_band = quantum >> 1; q 2427 net/sched/sch_cake.c q->tins[3].tin_quantum_band = quantum >> 2; q 2439 net/sched/sch_cake.c struct cake_sched_data *q = qdisc_priv(sch); q 2441 net/sched/sch_cake.c u64 rate = q->rate_bps; q 2444 net/sched/sch_cake.c q->tin_cnt = 3; q 2447 net/sched/sch_cake.c q->tin_index = diffserv3; q 2448 net/sched/sch_cake.c q->tin_order = bulk_order; q 2451 net/sched/sch_cake.c cake_set_rate(&q->tins[0], rate, mtu, q 2452 net/sched/sch_cake.c us_to_ns(q->target), us_to_ns(q->interval)); q 2453 net/sched/sch_cake.c cake_set_rate(&q->tins[1], rate >> 4, mtu, q 2454 net/sched/sch_cake.c us_to_ns(q->target), us_to_ns(q->interval)); q 2455 net/sched/sch_cake.c cake_set_rate(&q->tins[2], rate >> 2, mtu, q 2456 net/sched/sch_cake.c us_to_ns(q->target), us_to_ns(q->interval)); q 2459 net/sched/sch_cake.c q->tins[0].tin_quantum_prio = quantum; q 2460 net/sched/sch_cake.c q->tins[1].tin_quantum_prio = quantum >> 4; q 2461 net/sched/sch_cake.c q->tins[2].tin_quantum_prio = quantum << 4; q 2464 net/sched/sch_cake.c q->tins[0].tin_quantum_band = quantum; q 2465 net/sched/sch_cake.c q->tins[1].tin_quantum_band = quantum >> 4; q 2466 net/sched/sch_cake.c q->tins[2].tin_quantum_band = quantum >> 2; q 2473 net/sched/sch_cake.c struct cake_sched_data *q = qdisc_priv(sch); q 2476 net/sched/sch_cake.c switch (q->tin_mode) { q 2499 net/sched/sch_cake.c for (c = q->tin_cnt; c < CAKE_MAX_TINS; c++) { q 2501 net/sched/sch_cake.c q->tins[c].cparams.mtu_time = q->tins[ft].cparams.mtu_time; q 2504 net/sched/sch_cake.c q->rate_ns = q->tins[ft].tin_rate_ns; q 2505 net/sched/sch_cake.c q->rate_shft = q->tins[ft].tin_rate_shft; q 2507 net/sched/sch_cake.c if (q->buffer_config_limit) { q 2508 net/sched/sch_cake.c q->buffer_limit = q->buffer_config_limit; q 2509 net/sched/sch_cake.c } else if (q->rate_bps) { q 2510 net/sched/sch_cake.c u64 t = q->rate_bps * q->interval; q 2513 net/sched/sch_cake.c q->buffer_limit = max_t(u32, t, 4U << 20); q 2515 net/sched/sch_cake.c q->buffer_limit = ~0; q 2520 net/sched/sch_cake.c q->buffer_limit = min(q->buffer_limit, q 2522 net/sched/sch_cake.c q->buffer_config_limit)); q 2528 net/sched/sch_cake.c struct cake_sched_data *q = qdisc_priv(sch); q 2542 net/sched/sch_cake.c q->flow_mode &= ~CAKE_FLOW_NAT_FLAG; q 2543 net/sched/sch_cake.c q->flow_mode |= CAKE_FLOW_NAT_FLAG * q 2553 net/sched/sch_cake.c q->rate_bps = nla_get_u64(tb[TCA_CAKE_BASE_RATE64]); q 2556 net/sched/sch_cake.c q->tin_mode = nla_get_u32(tb[TCA_CAKE_DIFFSERV_MODE]); q 2560 net/sched/sch_cake.c q->rate_flags |= CAKE_FLAG_WASH; q 2562 net/sched/sch_cake.c q->rate_flags &= ~CAKE_FLAG_WASH; q 2566 net/sched/sch_cake.c q->flow_mode = ((q->flow_mode & CAKE_FLOW_NAT_FLAG) | q 2571 net/sched/sch_cake.c q->atm_mode = nla_get_u32(tb[TCA_CAKE_ATM]); q 2574 net/sched/sch_cake.c q->rate_overhead = nla_get_s32(tb[TCA_CAKE_OVERHEAD]); q 2575 net/sched/sch_cake.c q->rate_flags |= CAKE_FLAG_OVERHEAD; q 2577 net/sched/sch_cake.c q->max_netlen = 0; q 2578 net/sched/sch_cake.c q->max_adjlen = 0; q 2579 net/sched/sch_cake.c q->min_netlen = ~0; q 2580 net/sched/sch_cake.c q->min_adjlen = ~0; q 2584 net/sched/sch_cake.c q->rate_flags &= ~CAKE_FLAG_OVERHEAD; q 2586 net/sched/sch_cake.c q->max_netlen = 0; q 2587 net/sched/sch_cake.c q->max_adjlen = 0; q 2588 net/sched/sch_cake.c q->min_netlen = ~0; q 2589 net/sched/sch_cake.c q->min_adjlen = ~0; q 2593 net/sched/sch_cake.c q->rate_mpu = nla_get_u32(tb[TCA_CAKE_MPU]); q 2596 net/sched/sch_cake.c q->interval = nla_get_u32(tb[TCA_CAKE_RTT]); q 2598 net/sched/sch_cake.c if (!q->interval) q 2599 net/sched/sch_cake.c q->interval = 1; q 2603 net/sched/sch_cake.c q->target = nla_get_u32(tb[TCA_CAKE_TARGET]); q 2605 net/sched/sch_cake.c if (!q->target) q 2606 net/sched/sch_cake.c q->target = 1; q 2611 net/sched/sch_cake.c q->rate_flags |= CAKE_FLAG_AUTORATE_INGRESS; q 2613 net/sched/sch_cake.c q->rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS; q 2618 net/sched/sch_cake.c q->rate_flags |= CAKE_FLAG_INGRESS; q 2620 net/sched/sch_cake.c q->rate_flags &= ~CAKE_FLAG_INGRESS; q 2624 net/sched/sch_cake.c q->ack_filter = nla_get_u32(tb[TCA_CAKE_ACK_FILTER]); q 2627 net/sched/sch_cake.c q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]); q 2631 net/sched/sch_cake.c q->rate_flags |= CAKE_FLAG_SPLIT_GSO; q 2633 net/sched/sch_cake.c q->rate_flags &= ~CAKE_FLAG_SPLIT_GSO; q 2637 net/sched/sch_cake.c q->fwmark_mask = nla_get_u32(tb[TCA_CAKE_FWMARK]); q 2638 net/sched/sch_cake.c q->fwmark_shft = q->fwmark_mask ? __ffs(q->fwmark_mask) : 0; q 2641 net/sched/sch_cake.c if (q->tins) { q 2652 net/sched/sch_cake.c struct cake_sched_data *q = qdisc_priv(sch); q 2654 net/sched/sch_cake.c qdisc_watchdog_cancel(&q->watchdog); q 2655 net/sched/sch_cake.c tcf_block_put(q->block); q 2656 net/sched/sch_cake.c kvfree(q->tins); q 2662 net/sched/sch_cake.c struct cake_sched_data *q = qdisc_priv(sch); q 2666 net/sched/sch_cake.c q->tin_mode = CAKE_DIFFSERV_DIFFSERV3; q 2667 net/sched/sch_cake.c q->flow_mode = CAKE_FLOW_TRIPLE; q 2669 net/sched/sch_cake.c q->rate_bps = 0; /* unlimited by default */ q 2671 net/sched/sch_cake.c q->interval = 100000; /* 100ms default */ q 2672 net/sched/sch_cake.c q->target = 5000; /* 5ms: codel RFC argues q 2675 net/sched/sch_cake.c q->rate_flags |= CAKE_FLAG_SPLIT_GSO; q 2676 net/sched/sch_cake.c q->cur_tin = 0; q 2677 net/sched/sch_cake.c q->cur_flow = 0; q 2679 net/sched/sch_cake.c qdisc_watchdog_init(&q->watchdog, sch); q 2688 net/sched/sch_cake.c err = tcf_block_get(&q->block, &q->filter_list, sch, extack); q 2696 net/sched/sch_cake.c q->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data), q 2698 net/sched/sch_cake.c if (!q->tins) q 2702 net/sched/sch_cake.c struct cake_tin_data *b = q->tins + i; q 2718 net/sched/sch_cake.c q->overflow_heap[k].t = i; q 2719 net/sched/sch_cake.c q->overflow_heap[k].b = j; q 2725 net/sched/sch_cake.c q->avg_peak_bandwidth = q->rate_bps; q 2726 net/sched/sch_cake.c q->min_netlen = ~0; q 2727 net/sched/sch_cake.c q->min_adjlen = ~0; q 2737 net/sched/sch_cake.c struct cake_sched_data *q = qdisc_priv(sch); q 2744 net/sched/sch_cake.c if (nla_put_u64_64bit(skb, TCA_CAKE_BASE_RATE64, q->rate_bps, q 2749 net/sched/sch_cake.c q->flow_mode & CAKE_FLOW_MASK)) q 2752 net/sched/sch_cake.c if (nla_put_u32(skb, TCA_CAKE_RTT, q->interval)) q 2755 net/sched/sch_cake.c if (nla_put_u32(skb, TCA_CAKE_TARGET, q->target)) q 2758 net/sched/sch_cake.c if (nla_put_u32(skb, TCA_CAKE_MEMORY, q->buffer_config_limit)) q 2762 net/sched/sch_cake.c !!(q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS))) q 2766 net/sched/sch_cake.c !!(q->rate_flags & CAKE_FLAG_INGRESS))) q 2769 net/sched/sch_cake.c if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter)) q 2773 net/sched/sch_cake.c !!(q->flow_mode & CAKE_FLOW_NAT_FLAG))) q 2776 net/sched/sch_cake.c if (nla_put_u32(skb, TCA_CAKE_DIFFSERV_MODE, q->tin_mode)) q 2780 net/sched/sch_cake.c !!(q->rate_flags & CAKE_FLAG_WASH))) q 2783 net/sched/sch_cake.c if (nla_put_u32(skb, TCA_CAKE_OVERHEAD, q->rate_overhead)) q 2786 net/sched/sch_cake.c if (!(q->rate_flags & CAKE_FLAG_OVERHEAD)) q 2790 net/sched/sch_cake.c if (nla_put_u32(skb, TCA_CAKE_ATM, q->atm_mode)) q 2793 net/sched/sch_cake.c if (nla_put_u32(skb, TCA_CAKE_MPU, q->rate_mpu)) q 2797 net/sched/sch_cake.c !!(q->rate_flags & CAKE_FLAG_SPLIT_GSO))) q 2800 net/sched/sch_cake.c if (nla_put_u32(skb, TCA_CAKE_FWMARK, q->fwmark_mask)) q 2812 net/sched/sch_cake.c struct cake_sched_data *q = qdisc_priv(sch); q 2829 net/sched/sch_cake.c PUT_STAT_U64(CAPACITY_ESTIMATE64, q->avg_peak_bandwidth); q 2830 net/sched/sch_cake.c PUT_STAT_U32(MEMORY_LIMIT, q->buffer_limit); q 2831 net/sched/sch_cake.c PUT_STAT_U32(MEMORY_USED, q->buffer_max_used); q 2832 net/sched/sch_cake.c PUT_STAT_U32(AVG_NETOFF, ((q->avg_netoff + 0x8000) >> 16)); q 2833 net/sched/sch_cake.c PUT_STAT_U32(MAX_NETLEN, q->max_netlen); q 2834 net/sched/sch_cake.c PUT_STAT_U32(MAX_ADJLEN, q->max_adjlen); q 2835 net/sched/sch_cake.c PUT_STAT_U32(MIN_NETLEN, q->min_netlen); q 2836 net/sched/sch_cake.c PUT_STAT_U32(MIN_ADJLEN, q->min_adjlen); q 2855 net/sched/sch_cake.c for (i = 0; i < q->tin_cnt; i++) { q 2856 net/sched/sch_cake.c struct cake_tin_data *b = &q->tins[q->tin_order[i]]; q 2924 net/sched/sch_cake.c static void cake_unbind(struct Qdisc *q, unsigned long cl) q 2931 net/sched/sch_cake.c struct cake_sched_data *q = qdisc_priv(sch); q 2935 net/sched/sch_cake.c return q->block; q 2948 net/sched/sch_cake.c struct cake_sched_data *q = qdisc_priv(sch); q 2954 net/sched/sch_cake.c if (idx < CAKE_QUEUES * q->tin_cnt) { q 2956 net/sched/sch_cake.c &q->tins[q->tin_order[idx / CAKE_QUEUES]]; q 3021 net/sched/sch_cake.c struct cake_sched_data *q = qdisc_priv(sch); q 3027 net/sched/sch_cake.c for (i = 0; i < q->tin_cnt; i++) { q 3028 net/sched/sch_cake.c struct cake_tin_data *b = &q->tins[q->tin_order[i]]; q 103 net/sched/sch_cbq.c struct Qdisc *q; /* Elementary queueing discipline */ q 166 net/sched/sch_cbq.c cbq_class_lookup(struct cbq_sched_data *q, u32 classid) q 170 net/sched/sch_cbq.c clc = qdisc_class_find(&q->clhash, classid); q 207 net/sched/sch_cbq.c struct cbq_sched_data *q = qdisc_priv(sch); q 208 net/sched/sch_cbq.c struct cbq_class *head = &q->link; q 219 net/sched/sch_cbq.c (cl = cbq_class_lookup(q, prio)) != NULL) q 238 net/sched/sch_cbq.c cl = cbq_class_lookup(q, res.classid); q 293 net/sched/sch_cbq.c struct cbq_sched_data *q = qdisc_priv(cl->qdisc); q 297 net/sched/sch_cbq.c cl_tail = q->active[prio]; q 298 net/sched/sch_cbq.c q->active[prio] = cl; q 305 net/sched/sch_cbq.c q->activemask |= (1<<prio); q 317 net/sched/sch_cbq.c struct cbq_sched_data *q = qdisc_priv(this->qdisc); q 320 net/sched/sch_cbq.c struct cbq_class *cl_prev = q->active[prio]; q 328 net/sched/sch_cbq.c if (cl == q->active[prio]) { q 329 net/sched/sch_cbq.c q->active[prio] = cl_prev; q 330 net/sched/sch_cbq.c if (cl == q->active[prio]) { q 331 net/sched/sch_cbq.c q->active[prio] = NULL; q 332 net/sched/sch_cbq.c q->activemask &= ~(1<<prio); q 338 net/sched/sch_cbq.c } while ((cl_prev = cl) != q->active[prio]); q 342 net/sched/sch_cbq.c cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl) q 344 net/sched/sch_cbq.c int toplevel = q->toplevel; q 351 net/sched/sch_cbq.c q->toplevel = cl->level; q 362 net/sched/sch_cbq.c struct cbq_sched_data *q = qdisc_priv(sch); q 367 net/sched/sch_cbq.c q->rx_class = cl; q 376 net/sched/sch_cbq.c ret = qdisc_enqueue(skb, cl->q, to_free); q 378 net/sched/sch_cbq.c sch->q.qlen++; q 379 net/sched/sch_cbq.c cbq_mark_toplevel(q, cl); q 387 net/sched/sch_cbq.c cbq_mark_toplevel(q, cl); q 396 net/sched/sch_cbq.c struct cbq_sched_data *q = qdisc_priv(cl->qdisc); q 397 net/sched/sch_cbq.c psched_tdiff_t delay = cl->undertime - q->now; q 415 net/sched/sch_cbq.c cl->undertime = q->now + delay; q 420 net/sched/sch_cbq.c if (q->wd_expires == 0 || q->wd_expires > delay) q 421 net/sched/sch_cbq.c q->wd_expires = delay; q 427 net/sched/sch_cbq.c if (q->toplevel == TC_CBQ_MAXLEVEL) { q 429 net/sched/sch_cbq.c psched_tdiff_t base_delay = q->wd_expires; q 432 net/sched/sch_cbq.c delay = b->undertime - q->now; q 440 net/sched/sch_cbq.c q->wd_expires = base_delay; q 444 net/sched/sch_cbq.c static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio, q 448 net/sched/sch_cbq.c struct cbq_class *cl_prev = q->active[prio]; q 463 net/sched/sch_cbq.c if (cl == q->active[prio]) { q 464 net/sched/sch_cbq.c q->active[prio] = cl_prev; q 465 net/sched/sch_cbq.c if (cl == q->active[prio]) { q 466 net/sched/sch_cbq.c q->active[prio] = NULL; q 474 net/sched/sch_cbq.c } while ((cl_prev = cl) != q->active[prio]); q 481 net/sched/sch_cbq.c struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data, q 483 net/sched/sch_cbq.c struct Qdisc *sch = q->watchdog.qdisc; q 490 net/sched/sch_cbq.c pmask = q->pmask; q 491 net/sched/sch_cbq.c q->pmask = 0; q 499 net/sched/sch_cbq.c tmp = cbq_undelay_prio(q, prio, now); q 501 net/sched/sch_cbq.c q->pmask |= 1<<prio; q 512 net/sched/sch_cbq.c hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED); q 529 net/sched/sch_cbq.c cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl, q 532 net/sched/sch_cbq.c if (cl && q->toplevel >= borrowed->level) { q 533 net/sched/sch_cbq.c if (cl->q->q.qlen > 1) { q 536 net/sched/sch_cbq.c q->toplevel = borrowed->level; q 545 net/sched/sch_cbq.c q->toplevel = TC_CBQ_MAXLEVEL; q 551 net/sched/sch_cbq.c cbq_update(struct cbq_sched_data *q) q 553 net/sched/sch_cbq.c struct cbq_class *this = q->tx_class; q 555 net/sched/sch_cbq.c int len = q->tx_len; q 558 net/sched/sch_cbq.c q->tx_class = NULL; q 562 net/sched/sch_cbq.c now = q->now + L2T(&q->link, len); q 619 net/sched/sch_cbq.c idle -= L2T(&q->link, len); q 636 net/sched/sch_cbq.c cbq_update_toplevel(q, this, q->tx_borrowed); q 642 net/sched/sch_cbq.c struct cbq_sched_data *q = qdisc_priv(cl->qdisc); q 648 net/sched/sch_cbq.c if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) { q 670 net/sched/sch_cbq.c if (cl->level > q->toplevel) q 672 net/sched/sch_cbq.c } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime); q 681 net/sched/sch_cbq.c struct cbq_sched_data *q = qdisc_priv(sch); q 686 net/sched/sch_cbq.c cl_tail = cl_prev = q->active[prio]; q 696 net/sched/sch_cbq.c if (cl->q->q.qlen && q 709 net/sched/sch_cbq.c skb = cl->q->dequeue(cl->q); q 719 net/sched/sch_cbq.c q->tx_class = cl; q 720 net/sched/sch_cbq.c q->tx_borrowed = borrow; q 730 net/sched/sch_cbq.c q->tx_len = qdisc_pkt_len(skb); q 733 net/sched/sch_cbq.c q->active[prio] = cl; q 740 net/sched/sch_cbq.c if (cl->q->q.qlen == 0 || prio != cl->cpriority) { q 755 net/sched/sch_cbq.c q->active[prio] = NULL; q 756 net/sched/sch_cbq.c q->activemask &= ~(1<<prio); q 757 net/sched/sch_cbq.c if (cl->q->q.qlen) q 762 net/sched/sch_cbq.c q->active[prio] = cl_tail; q 764 net/sched/sch_cbq.c if (cl->q->q.qlen) q 776 net/sched/sch_cbq.c q->active[prio] = cl_prev; q 784 net/sched/sch_cbq.c struct cbq_sched_data *q = qdisc_priv(sch); q 788 net/sched/sch_cbq.c activemask = q->activemask & 0xFF; q 803 net/sched/sch_cbq.c struct cbq_sched_data *q = qdisc_priv(sch); q 808 net/sched/sch_cbq.c if (q->tx_class) q 809 net/sched/sch_cbq.c cbq_update(q); q 811 net/sched/sch_cbq.c q->now = now; q 814 net/sched/sch_cbq.c q->wd_expires = 0; q 819 net/sched/sch_cbq.c sch->q.qlen--; q 841 net/sched/sch_cbq.c if (q->toplevel == TC_CBQ_MAXLEVEL && q 842 net/sched/sch_cbq.c q->link.undertime == PSCHED_PASTPERFECT) q 845 net/sched/sch_cbq.c q->toplevel = TC_CBQ_MAXLEVEL; q 846 net/sched/sch_cbq.c q->link.undertime = PSCHED_PASTPERFECT; q 853 net/sched/sch_cbq.c if (sch->q.qlen) { q 855 net/sched/sch_cbq.c if (q->wd_expires) q 856 net/sched/sch_cbq.c qdisc_watchdog_schedule(&q->watchdog, q 857 net/sched/sch_cbq.c now + q->wd_expires); q 884 net/sched/sch_cbq.c static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio) q 889 net/sched/sch_cbq.c if (q->quanta[prio] == 0) q 892 net/sched/sch_cbq.c for (h = 0; h < q->clhash.hashsize; h++) { q 893 net/sched/sch_cbq.c hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { q 898 net/sched/sch_cbq.c cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/ q 899 net/sched/sch_cbq.c q->quanta[prio]; q 913 net/sched/sch_cbq.c struct cbq_sched_data *q = qdisc_priv(cl->qdisc); q 932 net/sched/sch_cbq.c for (h = 0; h < q->clhash.hashsize; h++) { q 935 net/sched/sch_cbq.c hlist_for_each_entry(c, &q->clhash.hash[h], q 981 net/sched/sch_cbq.c struct cbq_sched_data *q = qdisc_priv(this->qdisc); q 983 net/sched/sch_cbq.c qdisc_class_hash_remove(&q->clhash, &this->common); q 1008 net/sched/sch_cbq.c struct cbq_sched_data *q = qdisc_priv(this->qdisc); q 1012 net/sched/sch_cbq.c qdisc_class_hash_insert(&q->clhash, &this->common); q 1028 net/sched/sch_cbq.c struct cbq_sched_data *q = qdisc_priv(sch); q 1033 net/sched/sch_cbq.c q->activemask = 0; q 1034 net/sched/sch_cbq.c q->pmask = 0; q 1035 net/sched/sch_cbq.c q->tx_class = NULL; q 1036 net/sched/sch_cbq.c q->tx_borrowed = NULL; q 1037 net/sched/sch_cbq.c qdisc_watchdog_cancel(&q->watchdog); q 1038 net/sched/sch_cbq.c hrtimer_cancel(&q->delay_timer); q 1039 net/sched/sch_cbq.c q->toplevel = TC_CBQ_MAXLEVEL; q 1040 net/sched/sch_cbq.c q->now = psched_get_time(); q 1043 net/sched/sch_cbq.c q->active[prio] = NULL; q 1045 net/sched/sch_cbq.c for (h = 0; h < q->clhash.hashsize; h++) { q 1046 net/sched/sch_cbq.c hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { q 1047 net/sched/sch_cbq.c qdisc_reset(cl->q); q 1056 net/sched/sch_cbq.c sch->q.qlen = 0; q 1081 net/sched/sch_cbq.c static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl) q 1083 net/sched/sch_cbq.c q->nclasses[cl->priority]--; q 1084 net/sched/sch_cbq.c q->quanta[cl->priority] -= cl->weight; q 1085 net/sched/sch_cbq.c cbq_normalize_quanta(q, cl->priority); q 1088 net/sched/sch_cbq.c static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl) q 1090 net/sched/sch_cbq.c q->nclasses[cl->priority]++; q 1091 net/sched/sch_cbq.c q->quanta[cl->priority] += cl->weight; q 1092 net/sched/sch_cbq.c cbq_normalize_quanta(q, cl->priority); q 1097 net/sched/sch_cbq.c struct cbq_sched_data *q = qdisc_priv(cl->qdisc); q 1110 net/sched/sch_cbq.c cbq_addprio(q, cl); q 1160 net/sched/sch_cbq.c struct cbq_sched_data *q = qdisc_priv(sch); q 1165 net/sched/sch_cbq.c qdisc_watchdog_init(&q->watchdog, sch); q 1166 net/sched/sch_cbq.c hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); q 1167 net/sched/sch_cbq.c q->delay_timer.function = cbq_undelay; q 1180 net/sched/sch_cbq.c q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB], extack); q 1181 net/sched/sch_cbq.c if (!q->link.R_tab) q 1184 net/sched/sch_cbq.c err = tcf_block_get(&q->link.block, &q->link.filter_list, sch, extack); q 1188 net/sched/sch_cbq.c err = qdisc_class_hash_init(&q->clhash); q 1192 net/sched/sch_cbq.c q->link.sibling = &q->link; q 1193 net/sched/sch_cbq.c q->link.common.classid = sch->handle; q 1194 net/sched/sch_cbq.c q->link.qdisc = sch; q 1195 net/sched/sch_cbq.c q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, q 1197 net/sched/sch_cbq.c if (!q->link.q) q 1198 net/sched/sch_cbq.c q->link.q = &noop_qdisc; q 1200 net/sched/sch_cbq.c qdisc_hash_add(q->link.q, true); q 1202 net/sched/sch_cbq.c q->link.priority = TC_CBQ_MAXPRIO - 1; q 1203 net/sched/sch_cbq.c q->link.priority2 = TC_CBQ_MAXPRIO - 1; q 1204 net/sched/sch_cbq.c q->link.cpriority = TC_CBQ_MAXPRIO - 1; q 1205 net/sched/sch_cbq.c q->link.allot = psched_mtu(qdisc_dev(sch)); q 1206 net/sched/sch_cbq.c q->link.quantum = q->link.allot; q 1207 net/sched/sch_cbq.c q->link.weight = q->link.R_tab->rate.rate; q 1209 net/sched/sch_cbq.c q->link.ewma_log = TC_CBQ_DEF_EWMA; q 1210 net/sched/sch_cbq.c q->link.avpkt = q->link.allot/2; q 1211 net/sched/sch_cbq.c q->link.minidle = -0x7FFFFFFF; q 1213 net/sched/sch_cbq.c q->toplevel = TC_CBQ_MAXLEVEL; q 1214 net/sched/sch_cbq.c q->now = psched_get_time(); q 1216 net/sched/sch_cbq.c cbq_link_class(&q->link); q 1219 net/sched/sch_cbq.c cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT])); q 1221 net/sched/sch_cbq.c cbq_addprio(q, &q->link); q 1225 net/sched/sch_cbq.c tcf_block_put(q->link.block); q 1228 net/sched/sch_cbq.c qdisc_put_rtab(q->link.R_tab); q 1322 net/sched/sch_cbq.c struct cbq_sched_data *q = qdisc_priv(sch); q 1328 net/sched/sch_cbq.c if (cbq_dump_attr(skb, &q->link) < 0) q 1340 net/sched/sch_cbq.c struct cbq_sched_data *q = qdisc_priv(sch); q 1342 net/sched/sch_cbq.c q->link.xstats.avgidle = q->link.avgidle; q 1343 net/sched/sch_cbq.c return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats)); q 1358 net/sched/sch_cbq.c tcm->tcm_info = cl->q->handle; q 1376 net/sched/sch_cbq.c struct cbq_sched_data *q = qdisc_priv(sch); q 1382 net/sched/sch_cbq.c qdisc_qstats_qlen_backlog(cl->q, &qlen, &cl->qstats.backlog); q 1385 net/sched/sch_cbq.c cl->xstats.undertime = cl->undertime - q->now; q 1408 net/sched/sch_cbq.c *old = qdisc_replace(sch, new, &cl->q); q 1416 net/sched/sch_cbq.c return cl->q; q 1428 net/sched/sch_cbq.c struct cbq_sched_data *q = qdisc_priv(sch); q 1430 net/sched/sch_cbq.c return (unsigned long)cbq_class_lookup(q, classid); q 1435 net/sched/sch_cbq.c struct cbq_sched_data *q = qdisc_priv(sch); q 1440 net/sched/sch_cbq.c qdisc_put(cl->q); q 1443 net/sched/sch_cbq.c if (cl != &q->link) q 1449 net/sched/sch_cbq.c struct cbq_sched_data *q = qdisc_priv(sch); q 1455 net/sched/sch_cbq.c q->rx_class = NULL; q 1462 net/sched/sch_cbq.c for (h = 0; h < q->clhash.hashsize; h++) { q 1463 net/sched/sch_cbq.c hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { q 1468 net/sched/sch_cbq.c for (h = 0; h < q->clhash.hashsize; h++) { q 1469 net/sched/sch_cbq.c hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h], q 1473 net/sched/sch_cbq.c qdisc_class_hash_destroy(&q->clhash); q 1481 net/sched/sch_cbq.c struct cbq_sched_data *q = qdisc_priv(sch); q 1546 net/sched/sch_cbq.c cbq_rmprio(q, cl); q 1553 net/sched/sch_cbq.c if (cl->q->q.qlen) q 1577 net/sched/sch_cbq.c cbq_class_lookup(q, classid)) { q 1586 net/sched/sch_cbq.c if (++q->hgenerator >= 0x8000) q 1587 net/sched/sch_cbq.c q->hgenerator = 1; q 1588 net/sched/sch_cbq.c if (cbq_class_lookup(q, classid|q->hgenerator) == NULL) q 1596 net/sched/sch_cbq.c classid = classid|q->hgenerator; q 1599 net/sched/sch_cbq.c parent = &q->link; q 1601 net/sched/sch_cbq.c parent = cbq_class_lookup(q, parentid); q 1635 net/sched/sch_cbq.c cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid, q 1637 net/sched/sch_cbq.c if (!cl->q) q 1638 net/sched/sch_cbq.c cl->q = &noop_qdisc; q 1640 net/sched/sch_cbq.c qdisc_hash_add(cl->q, true); q 1652 net/sched/sch_cbq.c if (cl->tparent != &q->link) q 1659 net/sched/sch_cbq.c cl->ewma_log = q->link.ewma_log; q 1661 net/sched/sch_cbq.c cl->maxidle = q->link.maxidle; q 1663 net/sched/sch_cbq.c cl->avpkt = q->link.avpkt; q 1668 net/sched/sch_cbq.c qdisc_class_hash_grow(sch, &q->clhash); q 1680 net/sched/sch_cbq.c struct cbq_sched_data *q = qdisc_priv(sch); q 1683 net/sched/sch_cbq.c if (cl->filters || cl->children || cl == &q->link) q 1688 net/sched/sch_cbq.c qdisc_purge_queue(cl->q); q 1693 net/sched/sch_cbq.c if (q->tx_borrowed == cl) q 1694 net/sched/sch_cbq.c q->tx_borrowed = q->tx_class; q 1695 net/sched/sch_cbq.c if (q->tx_class == cl) { q 1696 net/sched/sch_cbq.c q->tx_class = NULL; q 1697 net/sched/sch_cbq.c q->tx_borrowed = NULL; q 1700 net/sched/sch_cbq.c if (q->rx_class == cl) q 1701 net/sched/sch_cbq.c q->rx_class = NULL; q 1709 net/sched/sch_cbq.c cbq_rmprio(q, cl); q 1719 net/sched/sch_cbq.c struct cbq_sched_data *q = qdisc_priv(sch); q 1723 net/sched/sch_cbq.c cl = &q->link; q 1731 net/sched/sch_cbq.c struct cbq_sched_data *q = qdisc_priv(sch); q 1733 net/sched/sch_cbq.c struct cbq_class *cl = cbq_class_lookup(q, classid); q 1753 net/sched/sch_cbq.c struct cbq_sched_data *q = qdisc_priv(sch); q 1760 net/sched/sch_cbq.c for (h = 0; h < q->clhash.hashsize; h++) { q 1761 net/sched/sch_cbq.c hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) { q 99 net/sched/sch_cbs.c sch->q.qlen++; q 107 net/sched/sch_cbs.c struct cbs_sched_data *q = qdisc_priv(sch); q 108 net/sched/sch_cbs.c struct Qdisc *qdisc = q->qdisc; q 116 net/sched/sch_cbs.c struct cbs_sched_data *q = qdisc_priv(sch); q 117 net/sched/sch_cbs.c struct Qdisc *qdisc = q->qdisc; q 119 net/sched/sch_cbs.c if (sch->q.qlen == 0 && q->credits > 0) { q 123 net/sched/sch_cbs.c q->credits = 0; q 124 net/sched/sch_cbs.c q->last = ktime_get_ns(); q 133 net/sched/sch_cbs.c struct cbs_sched_data *q = qdisc_priv(sch); q 135 net/sched/sch_cbs.c return q->enqueue(skb, sch, to_free); q 170 net/sched/sch_cbs.c sch->q.qlen--; q 177 net/sched/sch_cbs.c struct cbs_sched_data *q = qdisc_priv(sch); q 178 net/sched/sch_cbs.c struct Qdisc *qdisc = q->qdisc; q 185 net/sched/sch_cbs.c if (now < q->last) { q 186 net/sched/sch_cbs.c qdisc_watchdog_schedule_ns(&q->watchdog, q->last); q 189 net/sched/sch_cbs.c if (q->credits < 0) { q 190 net/sched/sch_cbs.c credits = timediff_to_credits(now - q->last, q->idleslope); q 192 net/sched/sch_cbs.c credits = q->credits + credits; q 193 net/sched/sch_cbs.c q->credits = min_t(s64, credits, q->hicredit); q 195 net/sched/sch_cbs.c if (q->credits < 0) { q 198 net/sched/sch_cbs.c delay = delay_from_credits(q->credits, q->idleslope); q 199 net/sched/sch_cbs.c qdisc_watchdog_schedule_ns(&q->watchdog, now + delay); q 201 net/sched/sch_cbs.c q->last = now; q 215 net/sched/sch_cbs.c credits = credits_from_len(len, q->sendslope, q 216 net/sched/sch_cbs.c atomic64_read(&q->port_rate)); q 217 net/sched/sch_cbs.c credits += q->credits; q 219 net/sched/sch_cbs.c q->credits = max_t(s64, credits, q->locredit); q 221 net/sched/sch_cbs.c if (unlikely(atomic64_read(&q->port_rate) == 0)) q 222 net/sched/sch_cbs.c q->last = now; q 224 net/sched/sch_cbs.c q->last = now + div64_s64(len * NSEC_PER_SEC, q 225 net/sched/sch_cbs.c atomic64_read(&q->port_rate)); q 232 net/sched/sch_cbs.c struct cbs_sched_data *q = qdisc_priv(sch); q 233 net/sched/sch_cbs.c struct Qdisc *qdisc = q->qdisc; q 240 net/sched/sch_cbs.c struct cbs_sched_data *q = qdisc_priv(sch); q 242 net/sched/sch_cbs.c return q->dequeue(sch); q 250 net/sched/sch_cbs.c struct cbs_sched_data *q) q 256 net/sched/sch_cbs.c if (!q->offload) q 259 net/sched/sch_cbs.c q->enqueue = cbs_enqueue_soft; q 260 net/sched/sch_cbs.c q->dequeue = cbs_dequeue_soft; q 266 net/sched/sch_cbs.c cbs.queue = q->queue; q 275 net/sched/sch_cbs.c static int cbs_enable_offload(struct net_device *dev, struct cbs_sched_data *q, q 288 net/sched/sch_cbs.c cbs.queue = q->queue; q 302 net/sched/sch_cbs.c q->enqueue = cbs_enqueue_offload; q 303 net/sched/sch_cbs.c q->dequeue = cbs_dequeue_offload; q 308 net/sched/sch_cbs.c static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q) q 325 net/sched/sch_cbs.c atomic64_set(&q->port_rate, port_rate); q 327 net/sched/sch_cbs.c dev->name, (long long)atomic64_read(&q->port_rate), q 335 net/sched/sch_cbs.c struct cbs_sched_data *q; q 345 net/sched/sch_cbs.c list_for_each_entry(q, &cbs_list, cbs_list) { q 346 net/sched/sch_cbs.c qdev = qdisc_dev(q->qdisc); q 355 net/sched/sch_cbs.c cbs_set_port_rate(dev, q); q 363 net/sched/sch_cbs.c struct cbs_sched_data *q = qdisc_priv(sch); q 382 net/sched/sch_cbs.c cbs_set_port_rate(dev, q); q 383 net/sched/sch_cbs.c cbs_disable_offload(dev, q); q 385 net/sched/sch_cbs.c err = cbs_enable_offload(dev, q, qopt, extack); q 391 net/sched/sch_cbs.c q->hicredit = qopt->hicredit; q 392 net/sched/sch_cbs.c q->locredit = qopt->locredit; q 393 net/sched/sch_cbs.c q->idleslope = qopt->idleslope * BYTES_PER_KBIT; q 394 net/sched/sch_cbs.c q->sendslope = qopt->sendslope * BYTES_PER_KBIT; q 395 net/sched/sch_cbs.c q->offload = qopt->offload; q 403 net/sched/sch_cbs.c struct cbs_sched_data *q = qdisc_priv(sch); q 411 net/sched/sch_cbs.c q->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, q 413 net/sched/sch_cbs.c if (!q->qdisc) q 417 net/sched/sch_cbs.c list_add(&q->cbs_list, &cbs_list); q 420 net/sched/sch_cbs.c qdisc_hash_add(q->qdisc, false); q 422 net/sched/sch_cbs.c q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0); q 424 net/sched/sch_cbs.c q->enqueue = cbs_enqueue_soft; q 425 net/sched/sch_cbs.c q->dequeue = cbs_dequeue_soft; q 427 net/sched/sch_cbs.c qdisc_watchdog_init(&q->watchdog, sch); q 434 net/sched/sch_cbs.c struct cbs_sched_data *q = qdisc_priv(sch); q 438 net/sched/sch_cbs.c if (!q->qdisc) q 441 net/sched/sch_cbs.c qdisc_watchdog_cancel(&q->watchdog); q 442 net/sched/sch_cbs.c cbs_disable_offload(dev, q); q 445 net/sched/sch_cbs.c list_del(&q->cbs_list); q 448 net/sched/sch_cbs.c qdisc_put(q->qdisc); q 453 net/sched/sch_cbs.c struct cbs_sched_data *q = qdisc_priv(sch); q 461 net/sched/sch_cbs.c opt.hicredit = q->hicredit; q 462 net/sched/sch_cbs.c opt.locredit = q->locredit; q 463 net/sched/sch_cbs.c opt.sendslope = div64_s64(q->sendslope, BYTES_PER_KBIT); q 464 net/sched/sch_cbs.c opt.idleslope = div64_s64(q->idleslope, BYTES_PER_KBIT); q 465 net/sched/sch_cbs.c opt.offload = q->offload; q 480 net/sched/sch_cbs.c struct cbs_sched_data *q = qdisc_priv(sch); q 482 net/sched/sch_cbs.c if (cl != 1 || !q->qdisc) /* only one class */ q 486 net/sched/sch_cbs.c tcm->tcm_info = q->qdisc->handle; q 494 net/sched/sch_cbs.c struct cbs_sched_data *q = qdisc_priv(sch); q 503 net/sched/sch_cbs.c *old = qdisc_replace(sch, new, &q->qdisc); q 509 net/sched/sch_cbs.c struct cbs_sched_data *q = qdisc_priv(sch); q 511 net/sched/sch_cbs.c return q->qdisc; q 76 net/sched/sch_choke.c static unsigned int choke_len(const struct choke_sched_data *q) q 78 net/sched/sch_choke.c return (q->tail - q->head) & q->tab_mask; q 82 net/sched/sch_choke.c static int use_ecn(const struct choke_sched_data *q) q 84 net/sched/sch_choke.c return q->flags & TC_RED_ECN; q 88 net/sched/sch_choke.c static int use_harddrop(const struct choke_sched_data *q) q 90 net/sched/sch_choke.c return q->flags & TC_RED_HARDDROP; q 94 net/sched/sch_choke.c static void choke_zap_head_holes(struct choke_sched_data *q) q 97 net/sched/sch_choke.c q->head = (q->head + 1) & q->tab_mask; q 98 net/sched/sch_choke.c if (q->head == q->tail) q 100 net/sched/sch_choke.c } while (q->tab[q->head] == NULL); q 104 net/sched/sch_choke.c static void choke_zap_tail_holes(struct choke_sched_data *q) q 107 net/sched/sch_choke.c q->tail = (q->tail - 1) & q->tab_mask; q 108 net/sched/sch_choke.c if (q->head == q->tail) q 110 net/sched/sch_choke.c } while (q->tab[q->tail] == NULL); q 117 net/sched/sch_choke.c struct choke_sched_data *q = qdisc_priv(sch); q 118 net/sched/sch_choke.c struct sk_buff *skb = q->tab[idx]; q 120 net/sched/sch_choke.c q->tab[idx] = NULL; q 122 net/sched/sch_choke.c if (idx == q->head) q 123 net/sched/sch_choke.c choke_zap_head_holes(q); q 124 net/sched/sch_choke.c if (idx == q->tail) q 125 net/sched/sch_choke.c choke_zap_tail_holes(q); q 130 net/sched/sch_choke.c --sch->q.qlen; q 186 net/sched/sch_choke.c static struct sk_buff *choke_peek_random(const struct choke_sched_data *q, q 193 net/sched/sch_choke.c *pidx = (q->head + prandom_u32_max(choke_len(q))) & q->tab_mask; q 194 net/sched/sch_choke.c skb = q->tab[*pidx]; q 199 net/sched/sch_choke.c return q->tab[*pidx = q->head]; q 206 net/sched/sch_choke.c static bool choke_match_random(const struct choke_sched_data *q, q 212 net/sched/sch_choke.c if (q->head == q->tail) q 215 net/sched/sch_choke.c oskb = choke_peek_random(q, pidx); q 222 net/sched/sch_choke.c struct choke_sched_data *q = qdisc_priv(sch); q 223 net/sched/sch_choke.c const struct red_parms *p = &q->parms; q 227 net/sched/sch_choke.c q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen); q 228 net/sched/sch_choke.c if (red_is_idling(&q->vars)) q 229 net/sched/sch_choke.c red_end_of_idle_period(&q->vars); q 232 net/sched/sch_choke.c if (q->vars.qavg <= p->qth_min) q 233 net/sched/sch_choke.c q->vars.qcount = -1; q 238 net/sched/sch_choke.c if (choke_match_random(q, skb, &idx)) { q 239 net/sched/sch_choke.c q->stats.matched++; q 245 net/sched/sch_choke.c if (q->vars.qavg > p->qth_max) { q 246 net/sched/sch_choke.c q->vars.qcount = -1; q 249 net/sched/sch_choke.c if (use_harddrop(q) || !use_ecn(q) || q 251 net/sched/sch_choke.c q->stats.forced_drop++; q 255 net/sched/sch_choke.c q->stats.forced_mark++; q 256 net/sched/sch_choke.c } else if (++q->vars.qcount) { q 257 net/sched/sch_choke.c if (red_mark_probability(p, &q->vars, q->vars.qavg)) { q 258 net/sched/sch_choke.c q->vars.qcount = 0; q 259 net/sched/sch_choke.c q->vars.qR = red_random(p); q 262 net/sched/sch_choke.c if (!use_ecn(q) || !INET_ECN_set_ce(skb)) { q 263 net/sched/sch_choke.c q->stats.prob_drop++; q 267 net/sched/sch_choke.c q->stats.prob_mark++; q 270 net/sched/sch_choke.c q->vars.qR = red_random(p); q 274 net/sched/sch_choke.c if (sch->q.qlen < q->limit) { q 275 net/sched/sch_choke.c q->tab[q->tail] = skb; q 276 net/sched/sch_choke.c q->tail = (q->tail + 1) & q->tab_mask; q 277 net/sched/sch_choke.c ++sch->q.qlen; q 282 net/sched/sch_choke.c q->stats.pdrop++; q 292 net/sched/sch_choke.c struct choke_sched_data *q = qdisc_priv(sch); q 295 net/sched/sch_choke.c if (q->head == q->tail) { q 296 net/sched/sch_choke.c if (!red_is_idling(&q->vars)) q 297 net/sched/sch_choke.c red_start_of_idle_period(&q->vars); q 301 net/sched/sch_choke.c skb = q->tab[q->head]; q 302 net/sched/sch_choke.c q->tab[q->head] = NULL; q 303 net/sched/sch_choke.c choke_zap_head_holes(q); q 304 net/sched/sch_choke.c --sch->q.qlen; q 313 net/sched/sch_choke.c struct choke_sched_data *q = qdisc_priv(sch); q 315 net/sched/sch_choke.c while (q->head != q->tail) { q 316 net/sched/sch_choke.c struct sk_buff *skb = q->tab[q->head]; q 318 net/sched/sch_choke.c q->head = (q->head + 1) & q->tab_mask; q 324 net/sched/sch_choke.c sch->q.qlen = 0; q 326 net/sched/sch_choke.c if (q->tab) q 327 net/sched/sch_choke.c memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *)); q 328 net/sched/sch_choke.c q->head = q->tail = 0; q 329 net/sched/sch_choke.c red_restart(&q->vars); q 347 net/sched/sch_choke.c struct choke_sched_data *q = qdisc_priv(sch); q 378 net/sched/sch_choke.c if (mask != q->tab_mask) { q 386 net/sched/sch_choke.c old = q->tab; q 388 net/sched/sch_choke.c unsigned int oqlen = sch->q.qlen, tail = 0; q 391 net/sched/sch_choke.c while (q->head != q->tail) { q 392 net/sched/sch_choke.c struct sk_buff *skb = q->tab[q->head]; q 394 net/sched/sch_choke.c q->head = (q->head + 1) & q->tab_mask; q 403 net/sched/sch_choke.c --sch->q.qlen; q 406 net/sched/sch_choke.c qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped); q 407 net/sched/sch_choke.c q->head = 0; q 408 net/sched/sch_choke.c q->tail = tail; q 411 net/sched/sch_choke.c q->tab_mask = mask; q 412 net/sched/sch_choke.c q->tab = ntab; q 416 net/sched/sch_choke.c q->flags = ctl->flags; q 417 net/sched/sch_choke.c q->limit = ctl->limit; q 419 net/sched/sch_choke.c red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, q 423 net/sched/sch_choke.c red_set_vars(&q->vars); q 425 net/sched/sch_choke.c if (q->head == q->tail) q 426 net/sched/sch_choke.c red_end_of_idle_period(&q->vars); q 441 net/sched/sch_choke.c struct choke_sched_data *q = qdisc_priv(sch); q 444 net/sched/sch_choke.c .limit = q->limit, q 445 net/sched/sch_choke.c .flags = q->flags, q 446 net/sched/sch_choke.c .qth_min = q->parms.qth_min >> q->parms.Wlog, q 447 net/sched/sch_choke.c .qth_max = q->parms.qth_max >> q->parms.Wlog, q 448 net/sched/sch_choke.c .Wlog = q->parms.Wlog, q 449 net/sched/sch_choke.c .Plog = q->parms.Plog, q 450 net/sched/sch_choke.c .Scell_log = q->parms.Scell_log, q 458 net/sched/sch_choke.c nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P)) q 469 net/sched/sch_choke.c struct choke_sched_data *q = qdisc_priv(sch); q 471 net/sched/sch_choke.c .early = q->stats.prob_drop + q->stats.forced_drop, q 472 net/sched/sch_choke.c .marked = q->stats.prob_mark + q->stats.forced_mark, q 473 net/sched/sch_choke.c .pdrop = q->stats.pdrop, q 474 net/sched/sch_choke.c .other = q->stats.other, q 475 net/sched/sch_choke.c .matched = q->stats.matched, q 483 net/sched/sch_choke.c struct choke_sched_data *q = qdisc_priv(sch); q 485 net/sched/sch_choke.c choke_free(q->tab); q 490 net/sched/sch_choke.c struct choke_sched_data *q = qdisc_priv(sch); q 492 net/sched/sch_choke.c return (q->head != q->tail) ? q->tab[q->head] : NULL; q 72 net/sched/sch_codel.c struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); q 91 net/sched/sch_codel.c struct codel_sched_data *q = qdisc_priv(sch); q 94 net/sched/sch_codel.c skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars, q 95 net/sched/sch_codel.c &q->stats, qdisc_pkt_len, codel_get_enqueue_time, q 101 net/sched/sch_codel.c if (q->stats.drop_count && sch->q.qlen) { q 102 net/sched/sch_codel.c qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len); q 103 net/sched/sch_codel.c q->stats.drop_count = 0; q 104 net/sched/sch_codel.c q->stats.drop_len = 0; q 114 net/sched/sch_codel.c struct codel_sched_data *q; q 120 net/sched/sch_codel.c q = qdisc_priv(sch); q 121 net/sched/sch_codel.c q->drop_overlimit++; q 136 net/sched/sch_codel.c struct codel_sched_data *q = qdisc_priv(sch); q 154 net/sched/sch_codel.c q->params.target = ((u64)target * NSEC_PER_USEC) >> CODEL_SHIFT; q 160 net/sched/sch_codel.c q->params.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT; q 166 net/sched/sch_codel.c q->params.interval = ((u64)interval * NSEC_PER_USEC) >> CODEL_SHIFT; q 173 net/sched/sch_codel.c q->params.ecn = !!nla_get_u32(tb[TCA_CODEL_ECN]); q 175 net/sched/sch_codel.c qlen = sch->q.qlen; q 176 net/sched/sch_codel.c while (sch->q.qlen > sch->limit) { q 177 net/sched/sch_codel.c struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); q 183 net/sched/sch_codel.c qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped); q 192 net/sched/sch_codel.c struct codel_sched_data *q = qdisc_priv(sch); q 196 net/sched/sch_codel.c codel_params_init(&q->params); q 197 net/sched/sch_codel.c codel_vars_init(&q->vars); q 198 net/sched/sch_codel.c codel_stats_init(&q->stats); q 199 net/sched/sch_codel.c q->params.mtu = psched_mtu(qdisc_dev(sch)); q 218 net/sched/sch_codel.c struct codel_sched_data *q = qdisc_priv(sch); q 226 net/sched/sch_codel.c codel_time_to_us(q->params.target)) || q 230 net/sched/sch_codel.c codel_time_to_us(q->params.interval)) || q 232 net/sched/sch_codel.c q->params.ecn)) q 234 net/sched/sch_codel.c if (q->params.ce_threshold != CODEL_DISABLED_THRESHOLD && q 236 net/sched/sch_codel.c codel_time_to_us(q->params.ce_threshold))) q 247 net/sched/sch_codel.c const struct codel_sched_data *q = qdisc_priv(sch); q 249 net/sched/sch_codel.c .maxpacket = q->stats.maxpacket, q 250 net/sched/sch_codel.c .count = q->vars.count, q 251 net/sched/sch_codel.c .lastcount = q->vars.lastcount, q 252 net/sched/sch_codel.c .drop_overlimit = q->drop_overlimit, q 253 net/sched/sch_codel.c .ldelay = codel_time_to_us(q->vars.ldelay), q 254 net/sched/sch_codel.c .dropping = q->vars.dropping, q 255 net/sched/sch_codel.c .ecn_mark = q->stats.ecn_mark, q 256 net/sched/sch_codel.c .ce_mark = q->stats.ce_mark, q 259 net/sched/sch_codel.c if (q->vars.dropping) { q 260 net/sched/sch_codel.c codel_tdiff_t delta = q->vars.drop_next - codel_get_time(); q 273 net/sched/sch_codel.c struct codel_sched_data *q = qdisc_priv(sch); q 276 net/sched/sch_codel.c codel_vars_init(&q->vars); q 41 net/sched/sch_drr.c struct drr_sched *q = qdisc_priv(sch); q 44 net/sched/sch_drr.c clc = qdisc_class_find(&q->clhash, classid); q 58 net/sched/sch_drr.c struct drr_sched *q = qdisc_priv(sch); q 133 net/sched/sch_drr.c qdisc_class_hash_insert(&q->clhash, &cl->common); q 136 net/sched/sch_drr.c qdisc_class_hash_grow(sch, &q->clhash); q 151 net/sched/sch_drr.c struct drr_sched *q = qdisc_priv(sch); q 160 net/sched/sch_drr.c qdisc_class_hash_remove(&q->clhash, &cl->common); q 176 net/sched/sch_drr.c struct drr_sched *q = qdisc_priv(sch); q 183 net/sched/sch_drr.c return q->block; q 280 net/sched/sch_drr.c struct drr_sched *q = qdisc_priv(sch); q 287 net/sched/sch_drr.c for (i = 0; i < q->clhash.hashsize; i++) { q 288 net/sched/sch_drr.c hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { q 305 net/sched/sch_drr.c struct drr_sched *q = qdisc_priv(sch); q 318 net/sched/sch_drr.c fl = rcu_dereference_bh(q->filter_list); q 344 net/sched/sch_drr.c struct drr_sched *q = qdisc_priv(sch); q 357 net/sched/sch_drr.c first = !cl->qdisc->q.qlen; q 368 net/sched/sch_drr.c list_add_tail(&cl->alist, &q->active); q 373 net/sched/sch_drr.c sch->q.qlen++; q 379 net/sched/sch_drr.c struct drr_sched *q = qdisc_priv(sch); q 384 net/sched/sch_drr.c if (list_empty(&q->active)) q 387 net/sched/sch_drr.c cl = list_first_entry(&q->active, struct drr_class, alist); q 400 net/sched/sch_drr.c if (cl->qdisc->q.qlen == 0) q 406 net/sched/sch_drr.c sch->q.qlen--; q 411 net/sched/sch_drr.c list_move_tail(&cl->alist, &q->active); q 420 net/sched/sch_drr.c struct drr_sched *q = qdisc_priv(sch); q 423 net/sched/sch_drr.c err = tcf_block_get(&q->block, &q->filter_list, sch, extack); q 426 net/sched/sch_drr.c err = qdisc_class_hash_init(&q->clhash); q 429 net/sched/sch_drr.c INIT_LIST_HEAD(&q->active); q 435 net/sched/sch_drr.c struct drr_sched *q = qdisc_priv(sch); q 439 net/sched/sch_drr.c for (i = 0; i < q->clhash.hashsize; i++) { q 440 net/sched/sch_drr.c hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { q 441 net/sched/sch_drr.c if (cl->qdisc->q.qlen) q 447 net/sched/sch_drr.c sch->q.qlen = 0; q 452 net/sched/sch_drr.c struct drr_sched *q = qdisc_priv(sch); q 457 net/sched/sch_drr.c tcf_block_put(q->block); q 459 net/sched/sch_drr.c for (i = 0; i < q->clhash.hashsize; i++) { q 460 net/sched/sch_drr.c hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], q 464 net/sched/sch_drr.c qdisc_class_hash_destroy(&q->clhash); q 46 net/sched/sch_dsmark.c struct Qdisc *q; q 80 net/sched/sch_dsmark.c *old = qdisc_replace(sch, new, &p->q); q 87 net/sched/sch_dsmark.c return p->q; q 270 net/sched/sch_dsmark.c err = qdisc_enqueue(skb, p->q, to_free); q 278 net/sched/sch_dsmark.c sch->q.qlen++; q 295 net/sched/sch_dsmark.c skb = qdisc_dequeue_peeked(p->q); q 301 net/sched/sch_dsmark.c sch->q.qlen--; q 336 net/sched/sch_dsmark.c return p->q->ops->peek(p->q); q 390 net/sched/sch_dsmark.c p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle, q 392 net/sched/sch_dsmark.c if (p->q == NULL) q 393 net/sched/sch_dsmark.c p->q = &noop_qdisc; q 395 net/sched/sch_dsmark.c qdisc_hash_add(p->q, true); q 397 net/sched/sch_dsmark.c pr_debug("%s: qdisc %p\n", __func__, p->q); q 409 net/sched/sch_dsmark.c qdisc_reset(p->q); q 411 net/sched/sch_dsmark.c sch->q.qlen = 0; q 421 net/sched/sch_dsmark.c qdisc_put(p->q); q 438 net/sched/sch_dsmark.c tcm->tcm_info = p->q->handle; q 77 net/sched/sch_etf.c struct etf_sched_data *q = qdisc_priv(sch); q 82 net/sched/sch_etf.c if (q->skip_sock_check) q 94 net/sched/sch_etf.c if (sk->sk_clockid != q->clockid) q 97 net/sched/sch_etf.c if (sk->sk_txtime_deadline_mode != q->deadline_mode) q 101 net/sched/sch_etf.c now = q->get_time(); q 102 net/sched/sch_etf.c if (ktime_before(txtime, now) || ktime_before(txtime, q->last)) q 110 net/sched/sch_etf.c struct etf_sched_data *q = qdisc_priv(sch); q 113 net/sched/sch_etf.c p = rb_first_cached(&q->head); q 122 net/sched/sch_etf.c struct etf_sched_data *q = qdisc_priv(sch); q 127 net/sched/sch_etf.c qdisc_watchdog_cancel(&q->watchdog); q 131 net/sched/sch_etf.c next = ktime_sub_ns(skb->tstamp, q->delta); q 132 net/sched/sch_etf.c qdisc_watchdog_schedule_ns(&q->watchdog, ktime_to_ns(next)); q 165 net/sched/sch_etf.c struct etf_sched_data *q = qdisc_priv(sch); q 166 net/sched/sch_etf.c struct rb_node **p = &q->head.rb_root.rb_node, *parent = NULL; q 189 net/sched/sch_etf.c rb_insert_color_cached(&nskb->rbnode, &q->head, leftmost); q 192 net/sched/sch_etf.c sch->q.qlen++; q 203 net/sched/sch_etf.c struct etf_sched_data *q = qdisc_priv(sch); q 211 net/sched/sch_etf.c rb_erase_cached(&skb->rbnode, &q->head); q 225 net/sched/sch_etf.c sch->q.qlen--; q 233 net/sched/sch_etf.c struct etf_sched_data *q = qdisc_priv(sch); q 235 net/sched/sch_etf.c rb_erase_cached(&skb->rbnode, &q->head); q 248 net/sched/sch_etf.c q->last = skb->tstamp; q 250 net/sched/sch_etf.c sch->q.qlen--; q 255 net/sched/sch_etf.c struct etf_sched_data *q = qdisc_priv(sch); q 263 net/sched/sch_etf.c now = q->get_time(); q 275 net/sched/sch_etf.c if (q->deadline_mode) { q 281 net/sched/sch_etf.c next = ktime_sub_ns(skb->tstamp, q->delta); q 297 net/sched/sch_etf.c struct etf_sched_data *q) q 303 net/sched/sch_etf.c if (!q->offload) q 310 net/sched/sch_etf.c etf.queue = q->queue; q 319 net/sched/sch_etf.c static int etf_enable_offload(struct net_device *dev, struct etf_sched_data *q, q 326 net/sched/sch_etf.c if (q->offload) q 334 net/sched/sch_etf.c etf.queue = q->queue; q 349 net/sched/sch_etf.c struct etf_sched_data *q = qdisc_priv(sch); q 382 net/sched/sch_etf.c q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0); q 385 net/sched/sch_etf.c err = etf_enable_offload(dev, q, extack); q 391 net/sched/sch_etf.c q->delta = qopt->delta; q 392 net/sched/sch_etf.c q->clockid = qopt->clockid; q 393 net/sched/sch_etf.c q->offload = OFFLOAD_IS_ON(qopt); q 394 net/sched/sch_etf.c q->deadline_mode = DEADLINE_MODE_IS_ON(qopt); q 395 net/sched/sch_etf.c q->skip_sock_check = SKIP_SOCK_CHECK_IS_SET(qopt); q 397 net/sched/sch_etf.c switch (q->clockid) { q 399 net/sched/sch_etf.c q->get_time = ktime_get_real; q 402 net/sched/sch_etf.c q->get_time = ktime_get; q 405 net/sched/sch_etf.c q->get_time = ktime_get_boottime; q 408 net/sched/sch_etf.c q->get_time = ktime_get_clocktai; q 415 net/sched/sch_etf.c qdisc_watchdog_init_clockid(&q->watchdog, sch, q->clockid); q 422 net/sched/sch_etf.c struct etf_sched_data *q = qdisc_priv(sch); q 423 net/sched/sch_etf.c struct rb_node *p = rb_first_cached(&q->head); q 430 net/sched/sch_etf.c rb_erase_cached(&skb->rbnode, &q->head); q 432 net/sched/sch_etf.c sch->q.qlen--; q 438 net/sched/sch_etf.c struct etf_sched_data *q = qdisc_priv(sch); q 441 net/sched/sch_etf.c if (q->watchdog.qdisc == sch) q 442 net/sched/sch_etf.c qdisc_watchdog_cancel(&q->watchdog); q 446 net/sched/sch_etf.c __qdisc_reset_queue(&sch->q); q 449 net/sched/sch_etf.c sch->q.qlen = 0; q 451 net/sched/sch_etf.c q->last = 0; q 456 net/sched/sch_etf.c struct etf_sched_data *q = qdisc_priv(sch); q 460 net/sched/sch_etf.c if (q->watchdog.qdisc == sch) q 461 net/sched/sch_etf.c qdisc_watchdog_cancel(&q->watchdog); q 463 net/sched/sch_etf.c etf_disable_offload(dev, q); q 468 net/sched/sch_etf.c struct etf_sched_data *q = qdisc_priv(sch); q 476 net/sched/sch_etf.c opt.delta = q->delta; q 477 net/sched/sch_etf.c opt.clockid = q->clockid; q 478 net/sched/sch_etf.c if (q->offload) q 481 net/sched/sch_etf.c if (q->deadline_mode) q 484 net/sched/sch_etf.c if (q->skip_sock_check) q 30 net/sched/sch_fifo.c if (likely(sch->q.qlen < sch->limit)) q 41 net/sched/sch_fifo.c if (likely(sch->q.qlen < sch->limit)) q 46 net/sched/sch_fifo.c __qdisc_queue_drop_head(sch, &sch->q, to_free); q 142 net/sched/sch_fifo.c int fifo_set_limit(struct Qdisc *q, unsigned int limit) q 148 net/sched/sch_fifo.c if (strncmp(q->ops->id + 1, "fifo", 4) != 0) q 157 net/sched/sch_fifo.c ret = q->ops->change(q, nla, NULL); q 168 net/sched/sch_fifo.c struct Qdisc *q; q 171 net/sched/sch_fifo.c q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1), q 173 net/sched/sch_fifo.c if (q) { q 174 net/sched/sch_fifo.c err = fifo_set_limit(q, limit); q 176 net/sched/sch_fifo.c qdisc_put(q); q 177 net/sched/sch_fifo.c q = NULL; q 181 net/sched/sch_fifo.c return q ? : ERR_PTR(err); q 156 net/sched/sch_fq.c static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f) q 158 net/sched/sch_fq.c rb_erase(&f->rate_node, &q->delayed); q 159 net/sched/sch_fq.c q->throttled_flows--; q 160 net/sched/sch_fq.c fq_flow_add_tail(&q->old_flows, f); q 163 net/sched/sch_fq.c static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) q 165 net/sched/sch_fq.c struct rb_node **p = &q->delayed.rb_node, *parent = NULL; q 178 net/sched/sch_fq.c rb_insert_color(&f->rate_node, &q->delayed); q 179 net/sched/sch_fq.c q->throttled_flows++; q 180 net/sched/sch_fq.c q->stat_throttled++; q 183 net/sched/sch_fq.c if (q->time_next_delayed_flow > f->time_next_packet) q 184 net/sched/sch_fq.c q->time_next_delayed_flow = f->time_next_packet; q 201 net/sched/sch_fq.c static void fq_gc(struct fq_sched_data *q, q 230 net/sched/sch_fq.c q->flows -= fcnt; q 231 net/sched/sch_fq.c q->inactive_flows -= fcnt; q 232 net/sched/sch_fq.c q->stat_gc_flows += fcnt; q 241 net/sched/sch_fq.c static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q) q 250 net/sched/sch_fq.c return &q->internal; q 262 net/sched/sch_fq.c unsigned long hash = skb_get_hash(skb) & q->orphan_mask; q 270 net/sched/sch_fq.c unsigned long hash = skb_get_hash(skb) & q->orphan_mask; q 282 net/sched/sch_fq.c root = &q->fq_root[hash_ptr(sk, q->fq_trees_log)]; q 284 net/sched/sch_fq.c if (q->flows >= (2U << q->fq_trees_log) && q 285 net/sched/sch_fq.c q->inactive_flows > q->flows/2) q 286 net/sched/sch_fq.c fq_gc(q, root, sk); q 302 net/sched/sch_fq.c f->credit = q->initial_quantum; q 304 net/sched/sch_fq.c if (q->rate_enable) q 308 net/sched/sch_fq.c fq_flow_unset_throttled(q, f); q 321 net/sched/sch_fq.c q->stat_allocation_errors++; q 322 net/sched/sch_fq.c return &q->internal; q 330 net/sched/sch_fq.c if (q->rate_enable) q 334 net/sched/sch_fq.c f->credit = q->initial_quantum; q 339 net/sched/sch_fq.c q->flows++; q 340 net/sched/sch_fq.c q->inactive_flows++; q 381 net/sched/sch_fq.c sch->q.qlen--; q 423 net/sched/sch_fq.c struct fq_sched_data *q = qdisc_priv(sch); q 426 net/sched/sch_fq.c if (unlikely(sch->q.qlen >= sch->limit)) q 429 net/sched/sch_fq.c f = fq_classify(skb, q); q 430 net/sched/sch_fq.c if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) { q 431 net/sched/sch_fq.c q->stat_flows_plimit++; q 438 net/sched/sch_fq.c fq_flow_add_tail(&q->new_flows, f); q 439 net/sched/sch_fq.c if (time_after(jiffies, f->age + q->flow_refill_delay)) q 440 net/sched/sch_fq.c f->credit = max_t(u32, f->credit, q->quantum); q 441 net/sched/sch_fq.c q->inactive_flows--; q 447 net/sched/sch_fq.c if (unlikely(f == &q->internal)) { q 448 net/sched/sch_fq.c q->stat_internal_packets++; q 450 net/sched/sch_fq.c sch->q.qlen++; q 455 net/sched/sch_fq.c static void fq_check_throttled(struct fq_sched_data *q, u64 now) q 460 net/sched/sch_fq.c if (q->time_next_delayed_flow > now) q 466 net/sched/sch_fq.c sample = (unsigned long)(now - q->time_next_delayed_flow); q 467 net/sched/sch_fq.c q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3; q 468 net/sched/sch_fq.c q->unthrottle_latency_ns += sample >> 3; q 470 net/sched/sch_fq.c q->time_next_delayed_flow = ~0ULL; q 471 net/sched/sch_fq.c while ((p = rb_first(&q->delayed)) != NULL) { q 475 net/sched/sch_fq.c q->time_next_delayed_flow = f->time_next_packet; q 478 net/sched/sch_fq.c fq_flow_unset_throttled(q, f); q 484 net/sched/sch_fq.c struct fq_sched_data *q = qdisc_priv(sch); q 492 net/sched/sch_fq.c if (!sch->q.qlen) q 495 net/sched/sch_fq.c skb = fq_dequeue_head(sch, &q->internal); q 500 net/sched/sch_fq.c fq_check_throttled(q, now); q 502 net/sched/sch_fq.c head = &q->new_flows; q 504 net/sched/sch_fq.c head = &q->old_flows; q 506 net/sched/sch_fq.c if (q->time_next_delayed_flow != ~0ULL) q 507 net/sched/sch_fq.c qdisc_watchdog_schedule_ns(&q->watchdog, q 508 net/sched/sch_fq.c q->time_next_delayed_flow); q 515 net/sched/sch_fq.c f->credit += q->quantum; q 517 net/sched/sch_fq.c fq_flow_add_tail(&q->old_flows, f); q 529 net/sched/sch_fq.c fq_flow_set_throttled(q, f); q 533 net/sched/sch_fq.c (s64)(now - time_next_packet - q->ce_threshold) > 0) { q 535 net/sched/sch_fq.c q->stat_ce_mark++; q 543 net/sched/sch_fq.c if ((head == &q->new_flows) && q->old_flows.first) { q 544 net/sched/sch_fq.c fq_flow_add_tail(&q->old_flows, f); q 547 net/sched/sch_fq.c q->inactive_flows++; q 555 net/sched/sch_fq.c if (!q->rate_enable) q 558 net/sched/sch_fq.c rate = q->flow_max_rate; q 568 net/sched/sch_fq.c if (rate <= q->low_rate_threshold) { q 571 net/sched/sch_fq.c plen = max(plen, q->quantum); q 587 net/sched/sch_fq.c q->stat_pkts_too_long++; q 620 net/sched/sch_fq.c struct fq_sched_data *q = qdisc_priv(sch); q 626 net/sched/sch_fq.c sch->q.qlen = 0; q 629 net/sched/sch_fq.c fq_flow_purge(&q->internal); q 631 net/sched/sch_fq.c if (!q->fq_root) q 634 net/sched/sch_fq.c for (idx = 0; idx < (1U << q->fq_trees_log); idx++) { q 635 net/sched/sch_fq.c root = &q->fq_root[idx]; q 645 net/sched/sch_fq.c q->new_flows.first = NULL; q 646 net/sched/sch_fq.c q->old_flows.first = NULL; q 647 net/sched/sch_fq.c q->delayed = RB_ROOT; q 648 net/sched/sch_fq.c q->flows = 0; q 649 net/sched/sch_fq.c q->inactive_flows = 0; q 650 net/sched/sch_fq.c q->throttled_flows = 0; q 653 net/sched/sch_fq.c static void fq_rehash(struct fq_sched_data *q, q 693 net/sched/sch_fq.c q->flows -= fcnt; q 694 net/sched/sch_fq.c q->inactive_flows -= fcnt; q 695 net/sched/sch_fq.c q->stat_gc_flows += fcnt; q 705 net/sched/sch_fq.c struct fq_sched_data *q = qdisc_priv(sch); q 710 net/sched/sch_fq.c if (q->fq_root && log == q->fq_trees_log) q 724 net/sched/sch_fq.c old_fq_root = q->fq_root; q 726 net/sched/sch_fq.c fq_rehash(q, old_fq_root, q->fq_trees_log, array, log); q 728 net/sched/sch_fq.c q->fq_root = array; q 729 net/sched/sch_fq.c q->fq_trees_log = log; q 756 net/sched/sch_fq.c struct fq_sched_data *q = qdisc_priv(sch); q 772 net/sched/sch_fq.c fq_log = q->fq_trees_log; q 786 net/sched/sch_fq.c q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]); q 792 net/sched/sch_fq.c q->quantum = quantum; q 800 net/sched/sch_fq.c q->initial_quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]); q 809 net/sched/sch_fq.c q->flow_max_rate = (rate == ~0U) ? ~0UL : rate; q 812 net/sched/sch_fq.c q->low_rate_threshold = q 819 net/sched/sch_fq.c q->rate_enable = enable; q 827 net/sched/sch_fq.c q->flow_refill_delay = usecs_to_jiffies(usecs_delay); q 831 net/sched/sch_fq.c q->orphan_mask = nla_get_u32(tb[TCA_FQ_ORPHAN_MASK]); q 834 net/sched/sch_fq.c q->ce_threshold = (u64)NSEC_PER_USEC * q 842 net/sched/sch_fq.c while (sch->q.qlen > sch->limit) { q 859 net/sched/sch_fq.c struct fq_sched_data *q = qdisc_priv(sch); q 862 net/sched/sch_fq.c fq_free(q->fq_root); q 863 net/sched/sch_fq.c qdisc_watchdog_cancel(&q->watchdog); q 869 net/sched/sch_fq.c struct fq_sched_data *q = qdisc_priv(sch); q 873 net/sched/sch_fq.c q->flow_plimit = 100; q 874 net/sched/sch_fq.c q->quantum = 2 * psched_mtu(qdisc_dev(sch)); q 875 net/sched/sch_fq.c q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch)); q 876 net/sched/sch_fq.c q->flow_refill_delay = msecs_to_jiffies(40); q 877 net/sched/sch_fq.c q->flow_max_rate = ~0UL; q 878 net/sched/sch_fq.c q->time_next_delayed_flow = ~0ULL; q 879 net/sched/sch_fq.c q->rate_enable = 1; q 880 net/sched/sch_fq.c q->new_flows.first = NULL; q 881 net/sched/sch_fq.c q->old_flows.first = NULL; q 882 net/sched/sch_fq.c q->delayed = RB_ROOT; q 883 net/sched/sch_fq.c q->fq_root = NULL; q 884 net/sched/sch_fq.c q->fq_trees_log = ilog2(1024); q 885 net/sched/sch_fq.c q->orphan_mask = 1024 - 1; q 886 net/sched/sch_fq.c q->low_rate_threshold = 550000 / 8; q 889 net/sched/sch_fq.c q->ce_threshold = (u64)NSEC_PER_USEC * ~0U; q 891 net/sched/sch_fq.c qdisc_watchdog_init_clockid(&q->watchdog, sch, CLOCK_MONOTONIC); q 896 net/sched/sch_fq.c err = fq_resize(sch, q->fq_trees_log); q 903 net/sched/sch_fq.c struct fq_sched_data *q = qdisc_priv(sch); q 904 net/sched/sch_fq.c u64 ce_threshold = q->ce_threshold; q 916 net/sched/sch_fq.c nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) || q 917 net/sched/sch_fq.c nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) || q 918 net/sched/sch_fq.c nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) || q 919 net/sched/sch_fq.c nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) || q 921 net/sched/sch_fq.c min_t(unsigned long, q->flow_max_rate, ~0U)) || q 923 net/sched/sch_fq.c jiffies_to_usecs(q->flow_refill_delay)) || q 924 net/sched/sch_fq.c nla_put_u32(skb, TCA_FQ_ORPHAN_MASK, q->orphan_mask) || q 926 net/sched/sch_fq.c q->low_rate_threshold) || q 928 net/sched/sch_fq.c nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log)) q 939 net/sched/sch_fq.c struct fq_sched_data *q = qdisc_priv(sch); q 944 net/sched/sch_fq.c st.gc_flows = q->stat_gc_flows; q 945 net/sched/sch_fq.c st.highprio_packets = q->stat_internal_packets; q 947 net/sched/sch_fq.c st.throttled = q->stat_throttled; q 948 net/sched/sch_fq.c st.flows_plimit = q->stat_flows_plimit; q 949 net/sched/sch_fq.c st.pkts_too_long = q->stat_pkts_too_long; q 950 net/sched/sch_fq.c st.allocation_errors = q->stat_allocation_errors; q 951 net/sched/sch_fq.c st.time_next_delayed_flow = q->time_next_delayed_flow - ktime_get_ns(); q 952 net/sched/sch_fq.c st.flows = q->flows; q 953 net/sched/sch_fq.c st.inactive_flows = q->inactive_flows; q 954 net/sched/sch_fq.c st.throttled_flows = q->throttled_flows; q 956 net/sched/sch_fq.c q->unthrottle_latency_ns, ~0U); q 957 net/sched/sch_fq.c st.ce_mark = q->stat_ce_mark; q 71 net/sched/sch_fq_codel.c static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q, q 74 net/sched/sch_fq_codel.c return reciprocal_scale(skb_get_hash(skb), q->flows_cnt); q 80 net/sched/sch_fq_codel.c struct fq_codel_sched_data *q = qdisc_priv(sch); q 87 net/sched/sch_fq_codel.c TC_H_MIN(skb->priority) <= q->flows_cnt) q 90 net/sched/sch_fq_codel.c filter = rcu_dereference_bh(q->filter_list); q 92 net/sched/sch_fq_codel.c return fq_codel_hash(q, skb) + 1; q 108 net/sched/sch_fq_codel.c if (TC_H_MIN(res.classid) <= q->flows_cnt) q 141 net/sched/sch_fq_codel.c struct fq_codel_sched_data *q = qdisc_priv(sch); q 155 net/sched/sch_fq_codel.c for (i = 0; i < q->flows_cnt; i++) { q 156 net/sched/sch_fq_codel.c if (q->backlogs[i] > maxbacklog) { q 157 net/sched/sch_fq_codel.c maxbacklog = q->backlogs[i]; q 165 net/sched/sch_fq_codel.c flow = &q->flows[idx]; q 177 net/sched/sch_fq_codel.c q->backlogs[idx] -= len; q 178 net/sched/sch_fq_codel.c q->memory_usage -= mem; q 181 net/sched/sch_fq_codel.c sch->q.qlen -= i; q 188 net/sched/sch_fq_codel.c struct fq_codel_sched_data *q = qdisc_priv(sch); q 205 net/sched/sch_fq_codel.c flow = &q->flows[idx]; q 207 net/sched/sch_fq_codel.c q->backlogs[idx] += qdisc_pkt_len(skb); q 211 net/sched/sch_fq_codel.c list_add_tail(&flow->flowchain, &q->new_flows); q 212 net/sched/sch_fq_codel.c q->new_flow_count++; q 213 net/sched/sch_fq_codel.c flow->deficit = q->quantum; q 216 net/sched/sch_fq_codel.c q->memory_usage += get_codel_cb(skb)->mem_usage; q 217 net/sched/sch_fq_codel.c memory_limited = q->memory_usage > q->memory_limit; q 218 net/sched/sch_fq_codel.c if (++sch->q.qlen <= sch->limit && !memory_limited) q 222 net/sched/sch_fq_codel.c prev_qlen = sch->q.qlen; q 231 net/sched/sch_fq_codel.c ret = fq_codel_drop(sch, q->drop_batch_size, to_free); q 233 net/sched/sch_fq_codel.c prev_qlen -= sch->q.qlen; q 235 net/sched/sch_fq_codel.c q->drop_overlimit += prev_qlen; q 237 net/sched/sch_fq_codel.c q->drop_overmemory += prev_qlen; q 259 net/sched/sch_fq_codel.c struct fq_codel_sched_data *q = qdisc_priv(sch); q 266 net/sched/sch_fq_codel.c q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); q 267 net/sched/sch_fq_codel.c q->memory_usage -= get_codel_cb(skb)->mem_usage; q 268 net/sched/sch_fq_codel.c sch->q.qlen--; q 284 net/sched/sch_fq_codel.c struct fq_codel_sched_data *q = qdisc_priv(sch); q 290 net/sched/sch_fq_codel.c head = &q->new_flows; q 292 net/sched/sch_fq_codel.c head = &q->old_flows; q 299 net/sched/sch_fq_codel.c flow->deficit += q->quantum; q 300 net/sched/sch_fq_codel.c list_move_tail(&flow->flowchain, &q->old_flows); q 304 net/sched/sch_fq_codel.c skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams, q 305 net/sched/sch_fq_codel.c &flow->cvars, &q->cstats, qdisc_pkt_len, q 310 net/sched/sch_fq_codel.c if ((head == &q->new_flows) && !list_empty(&q->old_flows)) q 311 net/sched/sch_fq_codel.c list_move_tail(&flow->flowchain, &q->old_flows); q 321 net/sched/sch_fq_codel.c if (q->cstats.drop_count && sch->q.qlen) { q 322 net/sched/sch_fq_codel.c qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q 323 net/sched/sch_fq_codel.c q->cstats.drop_len); q 324 net/sched/sch_fq_codel.c q->cstats.drop_count = 0; q 325 net/sched/sch_fq_codel.c q->cstats.drop_len = 0; q 338 net/sched/sch_fq_codel.c struct fq_codel_sched_data *q = qdisc_priv(sch); q 341 net/sched/sch_fq_codel.c INIT_LIST_HEAD(&q->new_flows); q 342 net/sched/sch_fq_codel.c INIT_LIST_HEAD(&q->old_flows); q 343 net/sched/sch_fq_codel.c for (i = 0; i < q->flows_cnt; i++) { q 344 net/sched/sch_fq_codel.c struct fq_codel_flow *flow = q->flows + i; q 350 net/sched/sch_fq_codel.c memset(q->backlogs, 0, q->flows_cnt * sizeof(u32)); q 351 net/sched/sch_fq_codel.c sch->q.qlen = 0; q 353 net/sched/sch_fq_codel.c q->memory_usage = 0; q 371 net/sched/sch_fq_codel.c struct fq_codel_sched_data *q = qdisc_priv(sch); q 383 net/sched/sch_fq_codel.c if (q->flows) q 385 net/sched/sch_fq_codel.c q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]); q 386 net/sched/sch_fq_codel.c if (!q->flows_cnt || q 387 net/sched/sch_fq_codel.c q->flows_cnt > 65536) q 395 net/sched/sch_fq_codel.c q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT; q 401 net/sched/sch_fq_codel.c q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT; q 407 net/sched/sch_fq_codel.c q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT; q 414 net/sched/sch_fq_codel.c q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]); q 417 net/sched/sch_fq_codel.c q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM])); q 420 net/sched/sch_fq_codel.c q->drop_batch_size = max(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])); q 423 net/sched/sch_fq_codel.c q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT])); q 425 net/sched/sch_fq_codel.c while (sch->q.qlen > sch->limit || q 426 net/sched/sch_fq_codel.c q->memory_usage > q->memory_limit) { q 429 net/sched/sch_fq_codel.c q->cstats.drop_len += qdisc_pkt_len(skb); q 431 net/sched/sch_fq_codel.c q->cstats.drop_count++; q 433 net/sched/sch_fq_codel.c qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len); q 434 net/sched/sch_fq_codel.c q->cstats.drop_count = 0; q 435 net/sched/sch_fq_codel.c q->cstats.drop_len = 0; q 443 net/sched/sch_fq_codel.c struct fq_codel_sched_data *q = qdisc_priv(sch); q 445 net/sched/sch_fq_codel.c tcf_block_put(q->block); q 446 net/sched/sch_fq_codel.c kvfree(q->backlogs); q 447 net/sched/sch_fq_codel.c kvfree(q->flows); q 453 net/sched/sch_fq_codel.c struct fq_codel_sched_data *q = qdisc_priv(sch); q 458 net/sched/sch_fq_codel.c q->flows_cnt = 1024; q 459 net/sched/sch_fq_codel.c q->memory_limit = 32 << 20; /* 32 MBytes */ q 460 net/sched/sch_fq_codel.c q->drop_batch_size = 64; q 461 net/sched/sch_fq_codel.c q->quantum = psched_mtu(qdisc_dev(sch)); q 462 net/sched/sch_fq_codel.c INIT_LIST_HEAD(&q->new_flows); q 463 net/sched/sch_fq_codel.c INIT_LIST_HEAD(&q->old_flows); q 464 net/sched/sch_fq_codel.c codel_params_init(&q->cparams); q 465 net/sched/sch_fq_codel.c codel_stats_init(&q->cstats); q 466 net/sched/sch_fq_codel.c q->cparams.ecn = true; q 467 net/sched/sch_fq_codel.c q->cparams.mtu = psched_mtu(qdisc_dev(sch)); q 475 net/sched/sch_fq_codel.c err = tcf_block_get(&q->block, &q->filter_list, sch, extack); q 479 net/sched/sch_fq_codel.c if (!q->flows) { q 480 net/sched/sch_fq_codel.c q->flows = kvcalloc(q->flows_cnt, q 483 net/sched/sch_fq_codel.c if (!q->flows) { q 487 net/sched/sch_fq_codel.c q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL); q 488 net/sched/sch_fq_codel.c if (!q->backlogs) { q 492 net/sched/sch_fq_codel.c for (i = 0; i < q->flows_cnt; i++) { q 493 net/sched/sch_fq_codel.c struct fq_codel_flow *flow = q->flows + i; q 506 net/sched/sch_fq_codel.c kvfree(q->flows); q 507 net/sched/sch_fq_codel.c q->flows = NULL; q 509 net/sched/sch_fq_codel.c q->flows_cnt = 0; q 515 net/sched/sch_fq_codel.c struct fq_codel_sched_data *q = qdisc_priv(sch); q 523 net/sched/sch_fq_codel.c codel_time_to_us(q->cparams.target)) || q 527 net/sched/sch_fq_codel.c codel_time_to_us(q->cparams.interval)) || q 529 net/sched/sch_fq_codel.c q->cparams.ecn) || q 531 net/sched/sch_fq_codel.c q->quantum) || q 533 net/sched/sch_fq_codel.c q->drop_batch_size) || q 535 net/sched/sch_fq_codel.c q->memory_limit) || q 537 net/sched/sch_fq_codel.c q->flows_cnt)) q 540 net/sched/sch_fq_codel.c if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD && q 542 net/sched/sch_fq_codel.c codel_time_to_us(q->cparams.ce_threshold))) q 553 net/sched/sch_fq_codel.c struct fq_codel_sched_data *q = qdisc_priv(sch); q 559 net/sched/sch_fq_codel.c st.qdisc_stats.maxpacket = q->cstats.maxpacket; q 560 net/sched/sch_fq_codel.c st.qdisc_stats.drop_overlimit = q->drop_overlimit; q 561 net/sched/sch_fq_codel.c st.qdisc_stats.ecn_mark = q->cstats.ecn_mark; q 562 net/sched/sch_fq_codel.c st.qdisc_stats.new_flow_count = q->new_flow_count; q 563 net/sched/sch_fq_codel.c st.qdisc_stats.ce_mark = q->cstats.ce_mark; q 564 net/sched/sch_fq_codel.c st.qdisc_stats.memory_usage = q->memory_usage; q 565 net/sched/sch_fq_codel.c st.qdisc_stats.drop_overmemory = q->drop_overmemory; q 568 net/sched/sch_fq_codel.c list_for_each(pos, &q->new_flows) q 571 net/sched/sch_fq_codel.c list_for_each(pos, &q->old_flows) q 594 net/sched/sch_fq_codel.c static void fq_codel_unbind(struct Qdisc *q, unsigned long cl) q 601 net/sched/sch_fq_codel.c struct fq_codel_sched_data *q = qdisc_priv(sch); q 605 net/sched/sch_fq_codel.c return q->block; q 618 net/sched/sch_fq_codel.c struct fq_codel_sched_data *q = qdisc_priv(sch); q 623 net/sched/sch_fq_codel.c if (idx < q->flows_cnt) { q 624 net/sched/sch_fq_codel.c const struct fq_codel_flow *flow = &q->flows[idx]; q 652 net/sched/sch_fq_codel.c qs.backlog = q->backlogs[idx]; q 657 net/sched/sch_fq_codel.c if (idx < q->flows_cnt) q 664 net/sched/sch_fq_codel.c struct fq_codel_sched_data *q = qdisc_priv(sch); q 670 net/sched/sch_fq_codel.c for (i = 0; i < q->flows_cnt; i++) { q 671 net/sched/sch_fq_codel.c if (list_empty(&q->flows[i].flowchain) || q 51 net/sched/sch_generic.c static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q) q 53 net/sched/sch_generic.c const struct netdev_queue *txq = q->dev_queue; q 57 net/sched/sch_generic.c if (q->flags & TCQ_F_NOLOCK) { q 58 net/sched/sch_generic.c lock = qdisc_lock(q); q 62 net/sched/sch_generic.c skb = skb_peek(&q->skb_bad_txq); q 67 net/sched/sch_generic.c skb = __skb_dequeue(&q->skb_bad_txq); q 68 net/sched/sch_generic.c if (qdisc_is_percpu_stats(q)) { q 69 net/sched/sch_generic.c qdisc_qstats_cpu_backlog_dec(q, skb); q 70 net/sched/sch_generic.c qdisc_qstats_cpu_qlen_dec(q); q 72 net/sched/sch_generic.c qdisc_qstats_backlog_dec(q, skb); q 73 net/sched/sch_generic.c q->q.qlen--; q 86 net/sched/sch_generic.c static inline struct sk_buff *qdisc_dequeue_skb_bad_txq(struct Qdisc *q) q 88 net/sched/sch_generic.c struct sk_buff *skb = skb_peek(&q->skb_bad_txq); q 91 net/sched/sch_generic.c skb = __skb_dequeue_bad_txq(q); q 96 net/sched/sch_generic.c static inline void qdisc_enqueue_skb_bad_txq(struct Qdisc *q, q 101 net/sched/sch_generic.c if (q->flags & TCQ_F_NOLOCK) { q 102 net/sched/sch_generic.c lock = qdisc_lock(q); q 106 net/sched/sch_generic.c __skb_queue_tail(&q->skb_bad_txq, skb); q 108 net/sched/sch_generic.c if (qdisc_is_percpu_stats(q)) { q 109 net/sched/sch_generic.c qdisc_qstats_cpu_backlog_inc(q, skb); q 110 net/sched/sch_generic.c qdisc_qstats_cpu_qlen_inc(q); q 112 net/sched/sch_generic.c qdisc_qstats_backlog_inc(q, skb); q 113 net/sched/sch_generic.c q->q.qlen++; q 120 net/sched/sch_generic.c static inline void dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) q 124 net/sched/sch_generic.c if (q->flags & TCQ_F_NOLOCK) { q 125 net/sched/sch_generic.c lock = qdisc_lock(q); q 132 net/sched/sch_generic.c __skb_queue_tail(&q->gso_skb, skb); q 135 net/sched/sch_generic.c if (qdisc_is_percpu_stats(q)) { q 136 net/sched/sch_generic.c qdisc_qstats_cpu_requeues_inc(q); q 137 net/sched/sch_generic.c qdisc_qstats_cpu_backlog_inc(q, skb); q 138 net/sched/sch_generic.c qdisc_qstats_cpu_qlen_inc(q); q 140 net/sched/sch_generic.c q->qstats.requeues++; q 141 net/sched/sch_generic.c qdisc_qstats_backlog_inc(q, skb); q 142 net/sched/sch_generic.c q->q.qlen++; q 149 net/sched/sch_generic.c __netif_schedule(q); q 152 net/sched/sch_generic.c static void try_bulk_dequeue_skb(struct Qdisc *q, q 160 net/sched/sch_generic.c struct sk_buff *nskb = q->dequeue(q); q 176 net/sched/sch_generic.c static void try_bulk_dequeue_skb_slow(struct Qdisc *q, q 185 net/sched/sch_generic.c nskb = q->dequeue(q); q 189 net/sched/sch_generic.c qdisc_enqueue_skb_bad_txq(q, nskb); q 202 net/sched/sch_generic.c static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, q 205 net/sched/sch_generic.c const struct netdev_queue *txq = q->dev_queue; q 209 net/sched/sch_generic.c if (unlikely(!skb_queue_empty(&q->gso_skb))) { q 212 net/sched/sch_generic.c if (q->flags & TCQ_F_NOLOCK) { q 213 net/sched/sch_generic.c lock = qdisc_lock(q); q 217 net/sched/sch_generic.c skb = skb_peek(&q->gso_skb); q 235 net/sched/sch_generic.c skb = __skb_dequeue(&q->gso_skb); q 236 net/sched/sch_generic.c if (qdisc_is_percpu_stats(q)) { q 237 net/sched/sch_generic.c qdisc_qstats_cpu_backlog_dec(q, skb); q 238 net/sched/sch_generic.c qdisc_qstats_cpu_qlen_dec(q); q 240 net/sched/sch_generic.c qdisc_qstats_backlog_dec(q, skb); q 241 net/sched/sch_generic.c q->q.qlen--; q 253 net/sched/sch_generic.c if ((q->flags & TCQ_F_ONETXQUEUE) && q 257 net/sched/sch_generic.c skb = qdisc_dequeue_skb_bad_txq(q); q 263 net/sched/sch_generic.c skb = q->dequeue(q); q 266 net/sched/sch_generic.c if (qdisc_may_bulk(q)) q 267 net/sched/sch_generic.c try_bulk_dequeue_skb(q, skb, txq, packets); q 269 net/sched/sch_generic.c try_bulk_dequeue_skb_slow(q, skb, packets); q 272 net/sched/sch_generic.c trace_qdisc_dequeue(q, txq, *packets, skb); q 285 net/sched/sch_generic.c bool sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q, q 305 net/sched/sch_generic.c dev_requeue_skb(skb, q); q 329 net/sched/sch_generic.c dev->name, ret, q->q.qlen); q 331 net/sched/sch_generic.c dev_requeue_skb(skb, q); q 357 net/sched/sch_generic.c static inline bool qdisc_restart(struct Qdisc *q, int *packets) q 366 net/sched/sch_generic.c skb = dequeue_skb(q, &validate, packets); q 370 net/sched/sch_generic.c if (!(q->flags & TCQ_F_NOLOCK)) q 371 net/sched/sch_generic.c root_lock = qdisc_lock(q); q 373 net/sched/sch_generic.c dev = qdisc_dev(q); q 376 net/sched/sch_generic.c return sch_direct_xmit(skb, q, dev, txq, root_lock, validate); q 379 net/sched/sch_generic.c void __qdisc_run(struct Qdisc *q) q 384 net/sched/sch_generic.c while (qdisc_restart(q, &packets)) { q 392 net/sched/sch_generic.c __netif_schedule(q); q 558 net/sched/sch_generic.c .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), q 612 net/sched/sch_generic.c struct skb_array q[PFIFO_FAST_BANDS]; q 618 net/sched/sch_generic.c return &priv->q[band]; q 626 net/sched/sch_generic.c struct skb_array *q = band2list(priv, band); q 630 net/sched/sch_generic.c err = skb_array_produce(q, skb); q 650 net/sched/sch_generic.c struct skb_array *q = band2list(priv, band); q 652 net/sched/sch_generic.c if (__skb_array_empty(q)) q 655 net/sched/sch_generic.c skb = __skb_array_consume(q); q 673 net/sched/sch_generic.c struct skb_array *q = band2list(priv, band); q 675 net/sched/sch_generic.c skb = __skb_array_peek(q); q 687 net/sched/sch_generic.c struct skb_array *q = band2list(priv, band); q 693 net/sched/sch_generic.c if (!q->ring.queue) q 696 net/sched/sch_generic.c while ((skb = __skb_array_consume(q)) != NULL) q 702 net/sched/sch_generic.c struct gnet_stats_queue *q; q 704 net/sched/sch_generic.c q = per_cpu_ptr(qdisc->cpu_qstats, i); q 705 net/sched/sch_generic.c q->backlog = 0; q 706 net/sched/sch_generic.c q->qlen = 0; q 736 net/sched/sch_generic.c struct skb_array *q = band2list(priv, prio); q 739 net/sched/sch_generic.c err = skb_array_init(q, qlen, GFP_KERNEL); q 755 net/sched/sch_generic.c struct skb_array *q = band2list(priv, prio); q 760 net/sched/sch_generic.c if (!q->ring.queue) q 765 net/sched/sch_generic.c ptr_ring_cleanup(&q->ring, NULL); q 777 net/sched/sch_generic.c struct skb_array *q = band2list(priv, prio); q 779 net/sched/sch_generic.c bands[prio] = q; q 837 net/sched/sch_generic.c qdisc_skb_head_init(&sch->q); q 838 net/sched/sch_generic.c spin_lock_init(&sch->q.lock); q 927 net/sched/sch_generic.c qdisc->q.qlen = 0; q 944 net/sched/sch_generic.c struct Qdisc *q = container_of(head, struct Qdisc, rcu); q 946 net/sched/sch_generic.c qdisc_free(q); q 1158 net/sched/sch_generic.c struct Qdisc *q; q 1162 net/sched/sch_generic.c q = dev_queue->qdisc_sleeping; q 1164 net/sched/sch_generic.c root_lock = qdisc_lock(q); q 1167 net/sched/sch_generic.c val = (qdisc_is_running(q) || q 1168 net/sched/sch_generic.c test_bit(__QDISC_STATE_SCHED, &q->state)); q 98 net/sched/sch_gred.c struct gred_sched_data *q = table->tab[i]; q 101 net/sched/sch_gred.c if (q == NULL) q 105 net/sched/sch_gred.c if (table->tab[n] && table->tab[n]->prio == q->prio) q 113 net/sched/sch_gred.c struct gred_sched_data *q, q 119 net/sched/sch_gred.c return q->backlog; q 128 net/sched/sch_gred.c struct gred_sched_data *q) q 130 net/sched/sch_gred.c q->vars.qavg = table->wred_set.qavg; q 131 net/sched/sch_gred.c q->vars.qidlestart = table->wred_set.qidlestart; q 135 net/sched/sch_gred.c struct gred_sched_data *q) q 137 net/sched/sch_gred.c table->wred_set.qavg = q->vars.qavg; q 138 net/sched/sch_gred.c table->wred_set.qidlestart = q->vars.qidlestart; q 141 net/sched/sch_gred.c static int gred_use_ecn(struct gred_sched_data *q) q 143 net/sched/sch_gred.c return q->red_flags & TC_RED_ECN; q 146 net/sched/sch_gred.c static int gred_use_harddrop(struct gred_sched_data *q) q 148 net/sched/sch_gred.c return q->red_flags & TC_RED_HARDDROP; q 167 net/sched/sch_gred.c struct gred_sched_data *q = NULL; q 172 net/sched/sch_gred.c if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { q 175 net/sched/sch_gred.c q = t->tab[dp]; q 176 net/sched/sch_gred.c if (!q) { q 198 net/sched/sch_gred.c if (t->tab[i] && t->tab[i]->prio < q->prio && q 205 net/sched/sch_gred.c q->packetsin++; q 206 net/sched/sch_gred.c q->bytesin += qdisc_pkt_len(skb); q 209 net/sched/sch_gred.c gred_load_wred_set(t, q); q 211 net/sched/sch_gred.c q->vars.qavg = red_calc_qavg(&q->parms, q 212 net/sched/sch_gred.c &q->vars, q 213 net/sched/sch_gred.c gred_backlog(t, q, sch)); q 215 net/sched/sch_gred.c if (red_is_idling(&q->vars)) q 216 net/sched/sch_gred.c red_end_of_idle_period(&q->vars); q 219 net/sched/sch_gred.c gred_store_wred_set(t, q); q 221 net/sched/sch_gred.c switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) { q 227 net/sched/sch_gred.c if (!gred_use_ecn(q) || !INET_ECN_set_ce(skb)) { q 228 net/sched/sch_gred.c q->stats.prob_drop++; q 232 net/sched/sch_gred.c q->stats.prob_mark++; q 237 net/sched/sch_gred.c if (gred_use_harddrop(q) || !gred_use_ecn(q) || q 239 net/sched/sch_gred.c q->stats.forced_drop++; q 242 net/sched/sch_gred.c q->stats.forced_mark++; q 246 net/sched/sch_gred.c if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) { q 247 net/sched/sch_gred.c q->backlog += qdisc_pkt_len(skb); q 251 net/sched/sch_gred.c q->stats.pdrop++; q 268 net/sched/sch_gred.c struct gred_sched_data *q; q 271 net/sched/sch_gred.c if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { q 275 net/sched/sch_gred.c q->backlog -= qdisc_pkt_len(skb); q 281 net/sched/sch_gred.c if (!q->backlog) q 282 net/sched/sch_gred.c red_start_of_idle_period(&q->vars); q 300 net/sched/sch_gred.c struct gred_sched_data *q = t->tab[i]; q 302 net/sched/sch_gred.c if (!q) q 305 net/sched/sch_gred.c red_restart(&q->vars); q 306 net/sched/sch_gred.c q->backlog = 0; q 332 net/sched/sch_gred.c struct gred_sched_data *q = table->tab[i]; q 334 net/sched/sch_gred.c if (!q) q 337 net/sched/sch_gred.c opt.set.tab[i].limit = q->limit; q 338 net/sched/sch_gred.c opt.set.tab[i].prio = q->prio; q 339 net/sched/sch_gred.c opt.set.tab[i].min = q->parms.qth_min >> q->parms.Wlog; q 340 net/sched/sch_gred.c opt.set.tab[i].max = q->parms.qth_max >> q->parms.Wlog; q 341 net/sched/sch_gred.c opt.set.tab[i].is_ecn = gred_use_ecn(q); q 342 net/sched/sch_gred.c opt.set.tab[i].is_harddrop = gred_use_harddrop(q); q 343 net/sched/sch_gred.c opt.set.tab[i].probability = q->parms.max_P; q 344 net/sched/sch_gred.c opt.set.tab[i].backlog = &q->backlog; q 396 net/sched/sch_gred.c static inline void gred_destroy_vq(struct gred_sched_data *q) q 398 net/sched/sch_gred.c kfree(q); q 481 net/sched/sch_gred.c struct gred_sched_data *q = table->tab[dp]; q 488 net/sched/sch_gred.c if (!q) { q 489 net/sched/sch_gred.c table->tab[dp] = q = *prealloc; q 491 net/sched/sch_gred.c if (!q) q 493 net/sched/sch_gred.c q->red_flags = table->red_flags & GRED_VQ_RED_FLAGS; q 496 net/sched/sch_gred.c q->DP = dp; q 497 net/sched/sch_gred.c q->prio = prio; q 499 net/sched/sch_gred.c q->limit = sch->limit; q 501 net/sched/sch_gred.c q->limit = ctl->limit; q 503 net/sched/sch_gred.c if (q->backlog == 0) q 504 net/sched/sch_gred.c red_end_of_idle_period(&q->vars); q 506 net/sched/sch_gred.c red_set_parms(&q->parms, q 509 net/sched/sch_gred.c red_set_vars(&q->vars); q 780 net/sched/sch_gred.c struct gred_sched_data *q = table->tab[i]; q 782 net/sched/sch_gred.c max_p[i] = q ? q->parms.max_P : 0; q 796 net/sched/sch_gred.c struct gred_sched_data *q = table->tab[i]; q 802 net/sched/sch_gred.c if (!q) { q 811 net/sched/sch_gred.c opt.limit = q->limit; q 812 net/sched/sch_gred.c opt.DP = q->DP; q 813 net/sched/sch_gred.c opt.backlog = gred_backlog(table, q, sch); q 814 net/sched/sch_gred.c opt.prio = q->prio; q 815 net/sched/sch_gred.c opt.qth_min = q->parms.qth_min >> q->parms.Wlog; q 816 net/sched/sch_gred.c opt.qth_max = q->parms.qth_max >> q->parms.Wlog; q 817 net/sched/sch_gred.c opt.Wlog = q->parms.Wlog; q 818 net/sched/sch_gred.c opt.Plog = q->parms.Plog; q 819 net/sched/sch_gred.c opt.Scell_log = q->parms.Scell_log; q 820 net/sched/sch_gred.c opt.other = q->stats.other; q 821 net/sched/sch_gred.c opt.early = q->stats.prob_drop; q 822 net/sched/sch_gred.c opt.forced = q->stats.forced_drop; q 823 net/sched/sch_gred.c opt.pdrop = q->stats.pdrop; q 824 net/sched/sch_gred.c opt.packets = q->packetsin; q 825 net/sched/sch_gred.c opt.bytesin = q->bytesin; q 828 net/sched/sch_gred.c gred_load_wred_set(table, q); q 830 net/sched/sch_gred.c qavg = red_calc_qavg(&q->parms, &q->vars, q 831 net/sched/sch_gred.c q->vars.qavg >> q->parms.Wlog); q 832 net/sched/sch_gred.c opt.qave = qavg >> q->parms.Wlog; q 847 net/sched/sch_gred.c struct gred_sched_data *q = table->tab[i]; q 850 net/sched/sch_gred.c if (!q) q 857 net/sched/sch_gred.c if (nla_put_u32(skb, TCA_GRED_VQ_DP, q->DP)) q 860 net/sched/sch_gred.c if (nla_put_u32(skb, TCA_GRED_VQ_FLAGS, q->red_flags)) q 864 net/sched/sch_gred.c if (nla_put_u64_64bit(skb, TCA_GRED_VQ_STAT_BYTES, q->bytesin, q 867 net/sched/sch_gred.c if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PACKETS, q->packetsin)) q 870 net/sched/sch_gred.c gred_backlog(table, q, sch))) q 873 net/sched/sch_gred.c q->stats.prob_drop)) q 876 net/sched/sch_gred.c q->stats.prob_mark)) q 879 net/sched/sch_gred.c q->stats.forced_drop)) q 882 net/sched/sch_gred.c q->stats.forced_mark)) q 884 net/sched/sch_gred.c if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PDROP, q->stats.pdrop)) q 886 net/sched/sch_gred.c if (nla_put_u32(skb, TCA_GRED_VQ_STAT_OTHER, q->stats.other)) q 219 net/sched/sch_hfsc.c eltree_get_mindl(struct hfsc_sched *q, u64 cur_time) q 224 net/sched/sch_hfsc.c for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) { q 236 net/sched/sch_hfsc.c eltree_get_minel(struct hfsc_sched *q) q 240 net/sched/sch_hfsc.c n = rb_first(&q->eligible); q 749 net/sched/sch_hfsc.c if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC) q 866 net/sched/sch_hfsc.c struct hfsc_sched *q = qdisc_priv(sch); q 869 net/sched/sch_hfsc.c clc = qdisc_class_find(&q->clhash, classid); q 917 net/sched/sch_hfsc.c struct hfsc_sched *q = qdisc_priv(sch); q 984 net/sched/sch_hfsc.c if (cl->qdisc->q.qlen != 0) { q 1009 net/sched/sch_hfsc.c parent = &q->root; q 1054 net/sched/sch_hfsc.c cl->sched = q; q 1067 net/sched/sch_hfsc.c qdisc_class_hash_insert(&q->clhash, &cl->cl_common); q 1074 net/sched/sch_hfsc.c qdisc_class_hash_grow(sch, &q->clhash); q 1083 net/sched/sch_hfsc.c struct hfsc_sched *q = qdisc_priv(sch); q 1088 net/sched/sch_hfsc.c if (cl != &q->root) q 1095 net/sched/sch_hfsc.c struct hfsc_sched *q = qdisc_priv(sch); q 1098 net/sched/sch_hfsc.c if (cl->level > 0 || cl->filter_cnt > 0 || cl == &q->root) q 1107 net/sched/sch_hfsc.c qdisc_class_hash_remove(&q->clhash, &cl->cl_common); q 1118 net/sched/sch_hfsc.c struct hfsc_sched *q = qdisc_priv(sch); q 1130 net/sched/sch_hfsc.c head = &q->root; q 1131 net/sched/sch_hfsc.c tcf = rcu_dereference_bh(q->root.filter_list); q 1162 net/sched/sch_hfsc.c cl = hfsc_find_class(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); q 1244 net/sched/sch_hfsc.c struct hfsc_sched *q = qdisc_priv(sch); q 1248 net/sched/sch_hfsc.c cl = &q->root; q 1343 net/sched/sch_hfsc.c struct hfsc_sched *q = qdisc_priv(sch); q 1350 net/sched/sch_hfsc.c for (i = 0; i < q->clhash.hashsize; i++) { q 1351 net/sched/sch_hfsc.c hlist_for_each_entry(cl, &q->clhash.hash[i], q 1369 net/sched/sch_hfsc.c struct hfsc_sched *q = qdisc_priv(sch); q 1373 net/sched/sch_hfsc.c cl = eltree_get_minel(q); q 1376 net/sched/sch_hfsc.c if (q->root.cl_cfmin != 0) { q 1377 net/sched/sch_hfsc.c if (next_time == 0 || next_time > q->root.cl_cfmin) q 1378 net/sched/sch_hfsc.c next_time = q->root.cl_cfmin; q 1381 net/sched/sch_hfsc.c qdisc_watchdog_schedule(&q->watchdog, next_time); q 1388 net/sched/sch_hfsc.c struct hfsc_sched *q = qdisc_priv(sch); q 1392 net/sched/sch_hfsc.c qdisc_watchdog_init(&q->watchdog, sch); q 1398 net/sched/sch_hfsc.c q->defcls = qopt->defcls; q 1399 net/sched/sch_hfsc.c err = qdisc_class_hash_init(&q->clhash); q 1402 net/sched/sch_hfsc.c q->eligible = RB_ROOT; q 1404 net/sched/sch_hfsc.c err = tcf_block_get(&q->root.block, &q->root.filter_list, sch, extack); q 1408 net/sched/sch_hfsc.c q->root.cl_common.classid = sch->handle; q 1409 net/sched/sch_hfsc.c q->root.sched = q; q 1410 net/sched/sch_hfsc.c q->root.qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, q 1412 net/sched/sch_hfsc.c if (q->root.qdisc == NULL) q 1413 net/sched/sch_hfsc.c q->root.qdisc = &noop_qdisc; q 1415 net/sched/sch_hfsc.c qdisc_hash_add(q->root.qdisc, true); q 1416 net/sched/sch_hfsc.c INIT_LIST_HEAD(&q->root.children); q 1417 net/sched/sch_hfsc.c q->root.vt_tree = RB_ROOT; q 1418 net/sched/sch_hfsc.c q->root.cf_tree = RB_ROOT; q 1420 net/sched/sch_hfsc.c qdisc_class_hash_insert(&q->clhash, &q->root.cl_common); q 1421 net/sched/sch_hfsc.c qdisc_class_hash_grow(sch, &q->clhash); q 1430 net/sched/sch_hfsc.c struct hfsc_sched *q = qdisc_priv(sch); q 1438 net/sched/sch_hfsc.c q->defcls = qopt->defcls; q 1477 net/sched/sch_hfsc.c struct hfsc_sched *q = qdisc_priv(sch); q 1481 net/sched/sch_hfsc.c for (i = 0; i < q->clhash.hashsize; i++) { q 1482 net/sched/sch_hfsc.c hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) q 1485 net/sched/sch_hfsc.c q->eligible = RB_ROOT; q 1486 net/sched/sch_hfsc.c qdisc_watchdog_cancel(&q->watchdog); q 1488 net/sched/sch_hfsc.c sch->q.qlen = 0; q 1494 net/sched/sch_hfsc.c struct hfsc_sched *q = qdisc_priv(sch); q 1499 net/sched/sch_hfsc.c for (i = 0; i < q->clhash.hashsize; i++) { q 1500 net/sched/sch_hfsc.c hlist_for_each_entry(cl, &q->clhash.hash[i], cl_common.hnode) { q 1505 net/sched/sch_hfsc.c for (i = 0; i < q->clhash.hashsize; i++) { q 1506 net/sched/sch_hfsc.c hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], q 1510 net/sched/sch_hfsc.c qdisc_class_hash_destroy(&q->clhash); q 1511 net/sched/sch_hfsc.c qdisc_watchdog_cancel(&q->watchdog); q 1517 net/sched/sch_hfsc.c struct hfsc_sched *q = qdisc_priv(sch); q 1521 net/sched/sch_hfsc.c qopt.defcls = q->defcls; q 1547 net/sched/sch_hfsc.c first = !cl->qdisc->q.qlen; q 1573 net/sched/sch_hfsc.c sch->q.qlen++; q 1581 net/sched/sch_hfsc.c struct hfsc_sched *q = qdisc_priv(sch); q 1588 net/sched/sch_hfsc.c if (sch->q.qlen == 0) q 1598 net/sched/sch_hfsc.c cl = eltree_get_mindl(q, cur_time); q 1606 net/sched/sch_hfsc.c cl = vttree_get_minvt(&q->root, cur_time); q 1626 net/sched/sch_hfsc.c if (cl->qdisc->q.qlen != 0) { q 1641 net/sched/sch_hfsc.c sch->q.qlen--; q 182 net/sched/sch_hhf.c struct hhf_sched_data *q) q 191 net/sched/sch_hhf.c u32 prev = flow->hit_timestamp + q->hhf_evict_timeout; q 201 net/sched/sch_hhf.c q->hh_flows_current_cnt--; q 213 net/sched/sch_hhf.c struct hhf_sched_data *q) q 221 net/sched/sch_hhf.c u32 prev = flow->hit_timestamp + q->hhf_evict_timeout; q 228 net/sched/sch_hhf.c if (q->hh_flows_current_cnt >= q->hh_flows_limit) { q 229 net/sched/sch_hhf.c q->hh_flows_overlimit++; q 237 net/sched/sch_hhf.c q->hh_flows_current_cnt++; q 249 net/sched/sch_hhf.c struct hhf_sched_data *q = qdisc_priv(sch); q 259 net/sched/sch_hhf.c prev = q->hhf_arrays_reset_timestamp + q->hhf_reset_timeout; q 262 net/sched/sch_hhf.c bitmap_zero(q->hhf_valid_bits[i], HHF_ARRAYS_LEN); q 263 net/sched/sch_hhf.c q->hhf_arrays_reset_timestamp = now; q 267 net/sched/sch_hhf.c hash = skb_get_hash_perturb(skb, &q->perturbation); q 271 net/sched/sch_hhf.c flow = seek_list(hash, &q->hh_flows[flow_pos], q); q 294 net/sched/sch_hhf.c if (!test_bit(filter_pos[i], q->hhf_valid_bits[i])) { q 295 net/sched/sch_hhf.c q->hhf_arrays[i][filter_pos[i]] = 0; q 296 net/sched/sch_hhf.c __set_bit(filter_pos[i], q->hhf_valid_bits[i]); q 299 net/sched/sch_hhf.c val = q->hhf_arrays[i][filter_pos[i]] + pkt_len; q 305 net/sched/sch_hhf.c if (min_hhf_val > q->hhf_admit_bytes) { q 307 net/sched/sch_hhf.c flow = alloc_new_hh(&q->hh_flows[flow_pos], q); q 312 net/sched/sch_hhf.c q->hh_flows_total_cnt++; q 322 net/sched/sch_hhf.c if (q->hhf_arrays[i][filter_pos[i]] < min_hhf_val) q 323 net/sched/sch_hhf.c q->hhf_arrays[i][filter_pos[i]] = min_hhf_val; q 351 net/sched/sch_hhf.c struct hhf_sched_data *q = qdisc_priv(sch); q 355 net/sched/sch_hhf.c bucket = &q->buckets[WDRR_BUCKET_FOR_HH]; q 357 net/sched/sch_hhf.c bucket = &q->buckets[WDRR_BUCKET_FOR_NON_HH]; q 362 net/sched/sch_hhf.c sch->q.qlen--; q 368 net/sched/sch_hhf.c return bucket - q->buckets; q 374 net/sched/sch_hhf.c struct hhf_sched_data *q = qdisc_priv(sch); q 381 net/sched/sch_hhf.c bucket = &q->buckets[idx]; q 395 net/sched/sch_hhf.c list_add_tail(&bucket->bucketchain, &q->old_buckets); q 397 net/sched/sch_hhf.c weight = q->hhf_non_hh_weight; q 398 net/sched/sch_hhf.c list_add_tail(&bucket->bucketchain, &q->new_buckets); q 400 net/sched/sch_hhf.c bucket->deficit = weight * q->quantum; q 402 net/sched/sch_hhf.c if (++sch->q.qlen <= sch->limit) q 406 net/sched/sch_hhf.c q->drop_overlimit++; q 420 net/sched/sch_hhf.c struct hhf_sched_data *q = qdisc_priv(sch); q 426 net/sched/sch_hhf.c head = &q->new_buckets; q 428 net/sched/sch_hhf.c head = &q->old_buckets; q 435 net/sched/sch_hhf.c int weight = (bucket - q->buckets == WDRR_BUCKET_FOR_HH) ? q 436 net/sched/sch_hhf.c 1 : q->hhf_non_hh_weight; q 438 net/sched/sch_hhf.c bucket->deficit += weight * q->quantum; q 439 net/sched/sch_hhf.c list_move_tail(&bucket->bucketchain, &q->old_buckets); q 445 net/sched/sch_hhf.c sch->q.qlen--; q 451 net/sched/sch_hhf.c if ((head == &q->new_buckets) && !list_empty(&q->old_buckets)) q 452 net/sched/sch_hhf.c list_move_tail(&bucket->bucketchain, &q->old_buckets); q 474 net/sched/sch_hhf.c struct hhf_sched_data *q = qdisc_priv(sch); q 477 net/sched/sch_hhf.c kvfree(q->hhf_arrays[i]); q 478 net/sched/sch_hhf.c kvfree(q->hhf_valid_bits[i]); q 481 net/sched/sch_hhf.c if (!q->hh_flows) q 486 net/sched/sch_hhf.c struct list_head *head = &q->hh_flows[i]; q 495 net/sched/sch_hhf.c kvfree(q->hh_flows); q 511 net/sched/sch_hhf.c struct hhf_sched_data *q = qdisc_priv(sch); q 516 net/sched/sch_hhf.c u32 new_quantum = q->quantum; q 517 net/sched/sch_hhf.c u32 new_hhf_non_hh_weight = q->hhf_non_hh_weight; q 542 net/sched/sch_hhf.c q->quantum = new_quantum; q 543 net/sched/sch_hhf.c q->hhf_non_hh_weight = new_hhf_non_hh_weight; q 546 net/sched/sch_hhf.c q->hh_flows_limit = nla_get_u32(tb[TCA_HHF_HH_FLOWS_LIMIT]); q 551 net/sched/sch_hhf.c q->hhf_reset_timeout = usecs_to_jiffies(us); q 555 net/sched/sch_hhf.c q->hhf_admit_bytes = nla_get_u32(tb[TCA_HHF_ADMIT_BYTES]); q 560 net/sched/sch_hhf.c q->hhf_evict_timeout = usecs_to_jiffies(us); q 563 net/sched/sch_hhf.c qlen = sch->q.qlen; q 565 net/sched/sch_hhf.c while (sch->q.qlen > sch->limit) { q 570 net/sched/sch_hhf.c qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, q 580 net/sched/sch_hhf.c struct hhf_sched_data *q = qdisc_priv(sch); q 584 net/sched/sch_hhf.c q->quantum = psched_mtu(qdisc_dev(sch)); q 585 net/sched/sch_hhf.c get_random_bytes(&q->perturbation, sizeof(q->perturbation)); q 586 net/sched/sch_hhf.c INIT_LIST_HEAD(&q->new_buckets); q 587 net/sched/sch_hhf.c INIT_LIST_HEAD(&q->old_buckets); q 590 net/sched/sch_hhf.c q->hhf_reset_timeout = HZ / 25; /* 40 ms */ q 591 net/sched/sch_hhf.c q->hhf_admit_bytes = 131072; /* 128 KB */ q 592 net/sched/sch_hhf.c q->hhf_evict_timeout = HZ; /* 1 sec */ q 593 net/sched/sch_hhf.c q->hhf_non_hh_weight = 2; q 602 net/sched/sch_hhf.c if (!q->hh_flows) { q 604 net/sched/sch_hhf.c q->hh_flows = kvcalloc(HH_FLOWS_CNT, sizeof(struct list_head), q 606 net/sched/sch_hhf.c if (!q->hh_flows) q 609 net/sched/sch_hhf.c INIT_LIST_HEAD(&q->hh_flows[i]); q 612 net/sched/sch_hhf.c q->hh_flows_limit = 2 * HH_FLOWS_CNT; q 613 net/sched/sch_hhf.c q->hh_flows_overlimit = 0; q 614 net/sched/sch_hhf.c q->hh_flows_total_cnt = 0; q 615 net/sched/sch_hhf.c q->hh_flows_current_cnt = 0; q 619 net/sched/sch_hhf.c q->hhf_arrays[i] = kvcalloc(HHF_ARRAYS_LEN, q 622 net/sched/sch_hhf.c if (!q->hhf_arrays[i]) { q 629 net/sched/sch_hhf.c q->hhf_arrays_reset_timestamp = hhf_time_stamp(); q 633 net/sched/sch_hhf.c q->hhf_valid_bits[i] = kvzalloc(HHF_ARRAYS_LEN / q 635 net/sched/sch_hhf.c if (!q->hhf_valid_bits[i]) { q 645 net/sched/sch_hhf.c struct wdrr_bucket *bucket = q->buckets + i; q 656 net/sched/sch_hhf.c struct hhf_sched_data *q = qdisc_priv(sch); q 664 net/sched/sch_hhf.c nla_put_u32(skb, TCA_HHF_QUANTUM, q->quantum) || q 665 net/sched/sch_hhf.c nla_put_u32(skb, TCA_HHF_HH_FLOWS_LIMIT, q->hh_flows_limit) || q 667 net/sched/sch_hhf.c jiffies_to_usecs(q->hhf_reset_timeout)) || q 668 net/sched/sch_hhf.c nla_put_u32(skb, TCA_HHF_ADMIT_BYTES, q->hhf_admit_bytes) || q 670 net/sched/sch_hhf.c jiffies_to_usecs(q->hhf_evict_timeout)) || q 671 net/sched/sch_hhf.c nla_put_u32(skb, TCA_HHF_NON_HH_WEIGHT, q->hhf_non_hh_weight)) q 682 net/sched/sch_hhf.c struct hhf_sched_data *q = qdisc_priv(sch); q 684 net/sched/sch_hhf.c .drop_overlimit = q->drop_overlimit, q 685 net/sched/sch_hhf.c .hh_overlimit = q->hh_flows_overlimit, q 686 net/sched/sch_hhf.c .hh_tot_count = q->hh_flows_total_cnt, q 687 net/sched/sch_hhf.c .hh_cur_count = q->hh_flows_current_cnt, q 126 net/sched/sch_htb.c struct Qdisc *q; q 182 net/sched/sch_htb.c struct htb_sched *q = qdisc_priv(sch); q 185 net/sched/sch_htb.c clc = qdisc_class_find(&q->clhash, handle); q 212 net/sched/sch_htb.c struct htb_sched *q = qdisc_priv(sch); q 231 net/sched/sch_htb.c tcf = rcu_dereference_bh(q->filter_list); q 262 net/sched/sch_htb.c cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); q 300 net/sched/sch_htb.c static void htb_add_to_wait_tree(struct htb_sched *q, q 303 net/sched/sch_htb.c struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL; q 305 net/sched/sch_htb.c cl->pq_key = q->now + delay; q 306 net/sched/sch_htb.c if (cl->pq_key == q->now) q 310 net/sched/sch_htb.c if (q->near_ev_cache[cl->level] > cl->pq_key) q 311 net/sched/sch_htb.c q->near_ev_cache[cl->level] = cl->pq_key; q 323 net/sched/sch_htb.c rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq); q 343 net/sched/sch_htb.c static inline void htb_add_class_to_row(struct htb_sched *q, q 346 net/sched/sch_htb.c q->row_mask[cl->level] |= mask; q 350 net/sched/sch_htb.c htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio); q 372 net/sched/sch_htb.c static inline void htb_remove_class_from_row(struct htb_sched *q, q 376 net/sched/sch_htb.c struct htb_level *hlevel = &q->hlevel[cl->level]; q 390 net/sched/sch_htb.c q->row_mask[cl->level] &= ~m; q 400 net/sched/sch_htb.c static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl) q 425 net/sched/sch_htb.c htb_add_class_to_row(q, cl, mask); q 435 net/sched/sch_htb.c static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl) q 469 net/sched/sch_htb.c htb_remove_class_from_row(q, cl, mask); q 526 net/sched/sch_htb.c htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff) q 535 net/sched/sch_htb.c q->overlimits++; q 540 net/sched/sch_htb.c htb_deactivate_prios(q, cl); q 543 net/sched/sch_htb.c htb_activate_prios(q, cl); q 555 net/sched/sch_htb.c static inline void htb_activate(struct htb_sched *q, struct htb_class *cl) q 557 net/sched/sch_htb.c WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen); q 561 net/sched/sch_htb.c htb_activate_prios(q, cl); q 571 net/sched/sch_htb.c static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl) q 575 net/sched/sch_htb.c htb_deactivate_prios(q, cl); q 584 net/sched/sch_htb.c struct htb_sched *q = qdisc_priv(sch); q 589 net/sched/sch_htb.c if (q->direct_queue.qlen < q->direct_qlen) { q 590 net/sched/sch_htb.c __qdisc_enqueue_tail(skb, &q->direct_queue); q 591 net/sched/sch_htb.c q->direct_pkts++; q 602 net/sched/sch_htb.c } else if ((ret = qdisc_enqueue(skb, cl->leaf.q, q 610 net/sched/sch_htb.c htb_activate(q, cl); q 614 net/sched/sch_htb.c sch->q.qlen++; q 655 net/sched/sch_htb.c static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, q 663 net/sched/sch_htb.c diff = min_t(s64, q->now - cl->t_c, cl->mbuffer); q 673 net/sched/sch_htb.c cl->t_c = q->now; q 677 net/sched/sch_htb.c htb_change_class_mode(q, cl, &diff); q 680 net/sched/sch_htb.c htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq); q 682 net/sched/sch_htb.c htb_add_to_wait_tree(q, cl, diff); q 700 net/sched/sch_htb.c static s64 htb_do_events(struct htb_sched *q, const int level, q 708 net/sched/sch_htb.c struct rb_root *wait_pq = &q->hlevel[level].wait_pq; q 719 net/sched/sch_htb.c if (cl->pq_key > q->now) q 723 net/sched/sch_htb.c diff = min_t(s64, q->now - cl->t_c, cl->mbuffer); q 724 net/sched/sch_htb.c htb_change_class_mode(q, cl, &diff); q 726 net/sched/sch_htb.c htb_add_to_wait_tree(q, cl, diff); q 730 net/sched/sch_htb.c if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) { q 732 net/sched/sch_htb.c q->warned |= HTB_WARN_TOOMANYEVENTS; q 735 net/sched/sch_htb.c return q->now; q 823 net/sched/sch_htb.c static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio, q 828 net/sched/sch_htb.c struct htb_level *hlevel = &q->hlevel[level]; q 844 net/sched/sch_htb.c if (unlikely(cl->leaf.q->q.qlen == 0)) { q 846 net/sched/sch_htb.c htb_deactivate(q, cl); q 849 net/sched/sch_htb.c if ((q->row_mask[level] & (1 << prio)) == 0) q 860 net/sched/sch_htb.c skb = cl->leaf.q->dequeue(cl->leaf.q); q 864 net/sched/sch_htb.c qdisc_warn_nonwc("htb", cl->leaf.q); q 866 net/sched/sch_htb.c &q->hlevel[0].hprio[prio].ptr); q 877 net/sched/sch_htb.c &q->hlevel[0].hprio[prio].ptr); q 882 net/sched/sch_htb.c if (!cl->leaf.q->q.qlen) q 883 net/sched/sch_htb.c htb_deactivate(q, cl); q 884 net/sched/sch_htb.c htb_charge_class(q, cl, level, skb); q 892 net/sched/sch_htb.c struct htb_sched *q = qdisc_priv(sch); q 898 net/sched/sch_htb.c skb = __qdisc_dequeue_head(&q->direct_queue); q 903 net/sched/sch_htb.c sch->q.qlen--; q 907 net/sched/sch_htb.c if (!sch->q.qlen) q 909 net/sched/sch_htb.c q->now = ktime_get_ns(); q 912 net/sched/sch_htb.c next_event = q->now + 5LLU * NSEC_PER_SEC; q 917 net/sched/sch_htb.c s64 event = q->near_ev_cache[level]; q 919 net/sched/sch_htb.c if (q->now >= event) { q 920 net/sched/sch_htb.c event = htb_do_events(q, level, start_at); q 922 net/sched/sch_htb.c event = q->now + NSEC_PER_SEC; q 923 net/sched/sch_htb.c q->near_ev_cache[level] = event; q 929 net/sched/sch_htb.c m = ~q->row_mask[level]; q 934 net/sched/sch_htb.c skb = htb_dequeue_tree(q, prio, level); q 939 net/sched/sch_htb.c if (likely(next_event > q->now)) q 940 net/sched/sch_htb.c qdisc_watchdog_schedule_ns(&q->watchdog, next_event); q 942 net/sched/sch_htb.c schedule_work(&q->work); q 951 net/sched/sch_htb.c struct htb_sched *q = qdisc_priv(sch); q 955 net/sched/sch_htb.c for (i = 0; i < q->clhash.hashsize; i++) { q 956 net/sched/sch_htb.c hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { q 960 net/sched/sch_htb.c if (cl->leaf.q) q 961 net/sched/sch_htb.c qdisc_reset(cl->leaf.q); q 967 net/sched/sch_htb.c qdisc_watchdog_cancel(&q->watchdog); q 968 net/sched/sch_htb.c __qdisc_reset_queue(&q->direct_queue); q 969 net/sched/sch_htb.c sch->q.qlen = 0; q 971 net/sched/sch_htb.c memset(q->hlevel, 0, sizeof(q->hlevel)); q 972 net/sched/sch_htb.c memset(q->row_mask, 0, sizeof(q->row_mask)); q 987 net/sched/sch_htb.c struct htb_sched *q = container_of(work, struct htb_sched, work); q 988 net/sched/sch_htb.c struct Qdisc *sch = q->watchdog.qdisc; q 998 net/sched/sch_htb.c struct htb_sched *q = qdisc_priv(sch); q 1003 net/sched/sch_htb.c qdisc_watchdog_init(&q->watchdog, sch); q 1004 net/sched/sch_htb.c INIT_WORK(&q->work, htb_work_func); q 1009 net/sched/sch_htb.c err = tcf_block_get(&q->block, &q->filter_list, sch, extack); q 1025 net/sched/sch_htb.c err = qdisc_class_hash_init(&q->clhash); q 1029 net/sched/sch_htb.c qdisc_skb_head_init(&q->direct_queue); q 1032 net/sched/sch_htb.c q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]); q 1034 net/sched/sch_htb.c q->direct_qlen = qdisc_dev(sch)->tx_queue_len; q 1036 net/sched/sch_htb.c if ((q->rate2quantum = gopt->rate2quantum) < 1) q 1037 net/sched/sch_htb.c q->rate2quantum = 1; q 1038 net/sched/sch_htb.c q->defcls = gopt->defcls; q 1045 net/sched/sch_htb.c struct htb_sched *q = qdisc_priv(sch); q 1049 net/sched/sch_htb.c sch->qstats.overlimits = q->overlimits; q 1054 net/sched/sch_htb.c gopt.direct_pkts = q->direct_pkts; q 1056 net/sched/sch_htb.c gopt.rate2quantum = q->rate2quantum; q 1057 net/sched/sch_htb.c gopt.defcls = q->defcls; q 1064 net/sched/sch_htb.c nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen)) q 1086 net/sched/sch_htb.c if (!cl->level && cl->leaf.q) q 1087 net/sched/sch_htb.c tcm->tcm_info = cl->leaf.q->handle; q 1130 net/sched/sch_htb.c if (!cl->level && cl->leaf.q) q 1131 net/sched/sch_htb.c qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog); q 1159 net/sched/sch_htb.c *old = qdisc_replace(sch, new, &cl->leaf.q); q 1166 net/sched/sch_htb.c return !cl->level ? cl->leaf.q : NULL; q 1187 net/sched/sch_htb.c static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl, q 1192 net/sched/sch_htb.c WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity); q 1196 net/sched/sch_htb.c &q->hlevel[parent->level].wait_pq); q 1200 net/sched/sch_htb.c parent->leaf.q = new_q ? new_q : &noop_qdisc; q 1210 net/sched/sch_htb.c WARN_ON(!cl->leaf.q); q 1211 net/sched/sch_htb.c qdisc_put(cl->leaf.q); q 1220 net/sched/sch_htb.c struct htb_sched *q = qdisc_priv(sch); q 1225 net/sched/sch_htb.c cancel_work_sync(&q->work); q 1226 net/sched/sch_htb.c qdisc_watchdog_cancel(&q->watchdog); q 1232 net/sched/sch_htb.c tcf_block_put(q->block); q 1234 net/sched/sch_htb.c for (i = 0; i < q->clhash.hashsize; i++) { q 1235 net/sched/sch_htb.c hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { q 1240 net/sched/sch_htb.c for (i = 0; i < q->clhash.hashsize; i++) { q 1241 net/sched/sch_htb.c hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], q 1245 net/sched/sch_htb.c qdisc_class_hash_destroy(&q->clhash); q 1246 net/sched/sch_htb.c __qdisc_reset_queue(&q->direct_queue); q 1251 net/sched/sch_htb.c struct htb_sched *q = qdisc_priv(sch); q 1273 net/sched/sch_htb.c qdisc_purge_queue(cl->leaf.q); q 1276 net/sched/sch_htb.c qdisc_class_hash_remove(&q->clhash, &cl->common); q 1281 net/sched/sch_htb.c htb_deactivate(q, cl); q 1285 net/sched/sch_htb.c &q->hlevel[cl->level].wait_pq); q 1288 net/sched/sch_htb.c htb_parent_to_leaf(q, cl, new_q); q 1301 net/sched/sch_htb.c struct htb_sched *q = qdisc_priv(sch); q 1404 net/sched/sch_htb.c qdisc_purge_queue(parent->leaf.q); q 1405 net/sched/sch_htb.c parent_qdisc = parent->leaf.q; q 1407 net/sched/sch_htb.c htb_deactivate(q, parent); q 1411 net/sched/sch_htb.c htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq); q 1419 net/sched/sch_htb.c cl->leaf.q = new_q ? new_q : &noop_qdisc; q 1432 net/sched/sch_htb.c qdisc_class_hash_insert(&q->clhash, &cl->common); q 1435 net/sched/sch_htb.c if (cl->leaf.q != &noop_qdisc) q 1436 net/sched/sch_htb.c qdisc_hash_add(cl->leaf.q, true); q 1463 net/sched/sch_htb.c do_div(quantum, q->rate2quantum); q 1490 net/sched/sch_htb.c qdisc_class_hash_grow(sch, &q->clhash); q 1502 net/sched/sch_htb.c struct htb_sched *q = qdisc_priv(sch); q 1505 net/sched/sch_htb.c return cl ? cl->block : q->block; q 1537 net/sched/sch_htb.c struct htb_sched *q = qdisc_priv(sch); q 1544 net/sched/sch_htb.c for (i = 0; i < q->clhash.hashsize; i++) { q 1545 net/sched/sch_htb.c hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { q 50 net/sched/sch_ingress.c struct ingress_sched_data *q = qdisc_priv(sch); q 52 net/sched/sch_ingress.c return q->block; q 64 net/sched/sch_ingress.c struct ingress_sched_data *q = qdisc_priv(sch); q 66 net/sched/sch_ingress.c q->block_info.block_index = block_index; q 71 net/sched/sch_ingress.c struct ingress_sched_data *q = qdisc_priv(sch); q 73 net/sched/sch_ingress.c return q->block_info.block_index; q 79 net/sched/sch_ingress.c struct ingress_sched_data *q = qdisc_priv(sch); q 84 net/sched/sch_ingress.c mini_qdisc_pair_init(&q->miniqp, sch, &dev->miniq_ingress); q 86 net/sched/sch_ingress.c q->block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; q 87 net/sched/sch_ingress.c q->block_info.chain_head_change = clsact_chain_head_change; q 88 net/sched/sch_ingress.c q->block_info.chain_head_change_priv = &q->miniqp; q 90 net/sched/sch_ingress.c return tcf_block_get_ext(&q->block, sch, &q->block_info, extack); q 95 net/sched/sch_ingress.c struct ingress_sched_data *q = qdisc_priv(sch); q 97 net/sched/sch_ingress.c tcf_block_put_ext(q->block, sch, &q->block_info); q 168 net/sched/sch_ingress.c struct clsact_sched_data *q = qdisc_priv(sch); q 172 net/sched/sch_ingress.c return q->ingress_block; q 174 net/sched/sch_ingress.c return q->egress_block; q 182 net/sched/sch_ingress.c struct clsact_sched_data *q = qdisc_priv(sch); q 184 net/sched/sch_ingress.c q->ingress_block_info.block_index = block_index; q 189 net/sched/sch_ingress.c struct clsact_sched_data *q = qdisc_priv(sch); q 191 net/sched/sch_ingress.c q->egress_block_info.block_index = block_index; q 196 net/sched/sch_ingress.c struct clsact_sched_data *q = qdisc_priv(sch); q 198 net/sched/sch_ingress.c return q->ingress_block_info.block_index; q 203 net/sched/sch_ingress.c struct clsact_sched_data *q = qdisc_priv(sch); q 205 net/sched/sch_ingress.c return q->egress_block_info.block_index; q 211 net/sched/sch_ingress.c struct clsact_sched_data *q = qdisc_priv(sch); q 218 net/sched/sch_ingress.c mini_qdisc_pair_init(&q->miniqp_ingress, sch, &dev->miniq_ingress); q 220 net/sched/sch_ingress.c q->ingress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS; q 221 net/sched/sch_ingress.c q->ingress_block_info.chain_head_change = clsact_chain_head_change; q 222 net/sched/sch_ingress.c q->ingress_block_info.chain_head_change_priv = &q->miniqp_ingress; q 224 net/sched/sch_ingress.c err = tcf_block_get_ext(&q->ingress_block, sch, &q->ingress_block_info, q 229 net/sched/sch_ingress.c mini_qdisc_pair_init(&q->miniqp_egress, sch, &dev->miniq_egress); q 231 net/sched/sch_ingress.c q->egress_block_info.binder_type = FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS; q 232 net/sched/sch_ingress.c q->egress_block_info.chain_head_change = clsact_chain_head_change; q 233 net/sched/sch_ingress.c q->egress_block_info.chain_head_change_priv = &q->miniqp_egress; q 235 net/sched/sch_ingress.c return tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info, extack); q 240 net/sched/sch_ingress.c struct clsact_sched_data *q = qdisc_priv(sch); q 242 net/sched/sch_ingress.c tcf_block_put_ext(q->egress_block, sch, &q->egress_block_info); q 243 net/sched/sch_ingress.c tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info); q 135 net/sched/sch_mq.c sch->q.qlen = 0; q 156 net/sched/sch_mq.c sch->q.qlen += qlen; q 158 net/sched/sch_mq.c sch->q.qlen += qdisc->q.qlen; q 392 net/sched/sch_mqprio.c sch->q.qlen = 0; q 414 net/sched/sch_mqprio.c sch->q.qlen += qlen; q 416 net/sched/sch_mqprio.c sch->q.qlen += qdisc->q.qlen; q 530 net/sched/sch_mqprio.c struct netdev_queue *q = netdev_get_tx_queue(dev, i); q 531 net/sched/sch_mqprio.c struct Qdisc *qdisc = rtnl_dereference(q->qdisc); q 32 net/sched/sch_multiq.c struct multiq_sched_data *q = qdisc_priv(sch); q 35 net/sched/sch_multiq.c struct tcf_proto *fl = rcu_dereference_bh(q->filter_list); q 53 net/sched/sch_multiq.c if (band >= q->bands) q 54 net/sched/sch_multiq.c return q->queues[0]; q 56 net/sched/sch_multiq.c return q->queues[band]; q 79 net/sched/sch_multiq.c sch->q.qlen++; q 89 net/sched/sch_multiq.c struct multiq_sched_data *q = qdisc_priv(sch); q 94 net/sched/sch_multiq.c for (band = 0; band < q->bands; band++) { q 96 net/sched/sch_multiq.c q->curband++; q 97 net/sched/sch_multiq.c if (q->curband >= q->bands) q 98 net/sched/sch_multiq.c q->curband = 0; q 104 net/sched/sch_multiq.c netdev_get_tx_queue(qdisc_dev(sch), q->curband))) { q 105 net/sched/sch_multiq.c qdisc = q->queues[q->curband]; q 109 net/sched/sch_multiq.c sch->q.qlen--; q 120 net/sched/sch_multiq.c struct multiq_sched_data *q = qdisc_priv(sch); q 121 net/sched/sch_multiq.c unsigned int curband = q->curband; q 126 net/sched/sch_multiq.c for (band = 0; band < q->bands; band++) { q 129 net/sched/sch_multiq.c if (curband >= q->bands) q 137 net/sched/sch_multiq.c qdisc = q->queues[curband]; q 151 net/sched/sch_multiq.c struct multiq_sched_data *q = qdisc_priv(sch); q 153 net/sched/sch_multiq.c for (band = 0; band < q->bands; band++) q 154 net/sched/sch_multiq.c qdisc_reset(q->queues[band]); q 155 net/sched/sch_multiq.c sch->q.qlen = 0; q 156 net/sched/sch_multiq.c q->curband = 0; q 163 net/sched/sch_multiq.c struct multiq_sched_data *q = qdisc_priv(sch); q 165 net/sched/sch_multiq.c tcf_block_put(q->block); q 166 net/sched/sch_multiq.c for (band = 0; band < q->bands; band++) q 167 net/sched/sch_multiq.c qdisc_put(q->queues[band]); q 169 net/sched/sch_multiq.c kfree(q->queues); q 175 net/sched/sch_multiq.c struct multiq_sched_data *q = qdisc_priv(sch); q 189 net/sched/sch_multiq.c removed = kmalloc(sizeof(*removed) * (q->max_bands - q->bands), q 195 net/sched/sch_multiq.c q->bands = qopt->bands; q 196 net/sched/sch_multiq.c for (i = q->bands; i < q->max_bands; i++) { q 197 net/sched/sch_multiq.c if (q->queues[i] != &noop_qdisc) { q 198 net/sched/sch_multiq.c struct Qdisc *child = q->queues[i]; q 200 net/sched/sch_multiq.c q->queues[i] = &noop_qdisc; q 212 net/sched/sch_multiq.c for (i = 0; i < q->bands; i++) { q 213 net/sched/sch_multiq.c if (q->queues[i] == &noop_qdisc) { q 221 net/sched/sch_multiq.c old = q->queues[i]; q 222 net/sched/sch_multiq.c q->queues[i] = child; q 239 net/sched/sch_multiq.c struct multiq_sched_data *q = qdisc_priv(sch); q 242 net/sched/sch_multiq.c q->queues = NULL; q 247 net/sched/sch_multiq.c err = tcf_block_get(&q->block, &q->filter_list, sch, extack); q 251 net/sched/sch_multiq.c q->max_bands = qdisc_dev(sch)->num_tx_queues; q 253 net/sched/sch_multiq.c q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL); q 254 net/sched/sch_multiq.c if (!q->queues) q 256 net/sched/sch_multiq.c for (i = 0; i < q->max_bands; i++) q 257 net/sched/sch_multiq.c q->queues[i] = &noop_qdisc; q 264 net/sched/sch_multiq.c struct multiq_sched_data *q = qdisc_priv(sch); q 268 net/sched/sch_multiq.c opt.bands = q->bands; q 269 net/sched/sch_multiq.c opt.max_bands = q->max_bands; q 284 net/sched/sch_multiq.c struct multiq_sched_data *q = qdisc_priv(sch); q 290 net/sched/sch_multiq.c *old = qdisc_replace(sch, new, &q->queues[band]); q 297 net/sched/sch_multiq.c struct multiq_sched_data *q = qdisc_priv(sch); q 300 net/sched/sch_multiq.c return q->queues[band]; q 305 net/sched/sch_multiq.c struct multiq_sched_data *q = qdisc_priv(sch); q 308 net/sched/sch_multiq.c if (band - 1 >= q->bands) q 320 net/sched/sch_multiq.c static void multiq_unbind(struct Qdisc *q, unsigned long cl) q 327 net/sched/sch_multiq.c struct multiq_sched_data *q = qdisc_priv(sch); q 330 net/sched/sch_multiq.c tcm->tcm_info = q->queues[cl - 1]->handle; q 337 net/sched/sch_multiq.c struct multiq_sched_data *q = qdisc_priv(sch); q 340 net/sched/sch_multiq.c cl_q = q->queues[cl - 1]; q 351 net/sched/sch_multiq.c struct multiq_sched_data *q = qdisc_priv(sch); q 357 net/sched/sch_multiq.c for (band = 0; band < q->bands; band++) { q 373 net/sched/sch_multiq.c struct multiq_sched_data *q = qdisc_priv(sch); q 377 net/sched/sch_multiq.c return q->block; q 200 net/sched/sch_netem.c static bool loss_4state(struct netem_sched_data *q) q 202 net/sched/sch_netem.c struct clgstate *clg = &q->clg; q 265 net/sched/sch_netem.c static bool loss_gilb_ell(struct netem_sched_data *q) q 267 net/sched/sch_netem.c struct clgstate *clg = &q->clg; q 286 net/sched/sch_netem.c static bool loss_event(struct netem_sched_data *q) q 288 net/sched/sch_netem.c switch (q->loss_model) { q 291 net/sched/sch_netem.c return q->loss && q->loss >= get_crandom(&q->loss_cor); q 299 net/sched/sch_netem.c return loss_4state(q); q 307 net/sched/sch_netem.c return loss_gilb_ell(q); q 345 net/sched/sch_netem.c static u64 packet_time_ns(u64 len, const struct netem_sched_data *q) q 347 net/sched/sch_netem.c len += q->packet_overhead; q 349 net/sched/sch_netem.c if (q->cell_size) { q 350 net/sched/sch_netem.c u32 cells = reciprocal_divide(len, q->cell_size_reciprocal); q 352 net/sched/sch_netem.c if (len > cells * q->cell_size) /* extra cell needed for remainder */ q 354 net/sched/sch_netem.c len = cells * (q->cell_size + q->cell_overhead); q 357 net/sched/sch_netem.c return div64_u64(len * NSEC_PER_SEC, q->rate); q 362 net/sched/sch_netem.c struct netem_sched_data *q = qdisc_priv(sch); q 363 net/sched/sch_netem.c struct rb_node *p = rb_first(&q->t_root); q 369 net/sched/sch_netem.c rb_erase(&skb->rbnode, &q->t_root); q 373 net/sched/sch_netem.c rtnl_kfree_skbs(q->t_head, q->t_tail); q 374 net/sched/sch_netem.c q->t_head = NULL; q 375 net/sched/sch_netem.c q->t_tail = NULL; q 380 net/sched/sch_netem.c struct netem_sched_data *q = qdisc_priv(sch); q 383 net/sched/sch_netem.c if (!q->t_tail || tnext >= netem_skb_cb(q->t_tail)->time_to_send) { q 384 net/sched/sch_netem.c if (q->t_tail) q 385 net/sched/sch_netem.c q->t_tail->next = nskb; q 387 net/sched/sch_netem.c q->t_head = nskb; q 388 net/sched/sch_netem.c q->t_tail = nskb; q 390 net/sched/sch_netem.c struct rb_node **p = &q->t_root.rb_node, *parent = NULL; q 403 net/sched/sch_netem.c rb_insert_color(&nskb->rbnode, &q->t_root); q 405 net/sched/sch_netem.c sch->q.qlen++; q 437 net/sched/sch_netem.c struct netem_sched_data *q = qdisc_priv(sch); q 451 net/sched/sch_netem.c if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) q 455 net/sched/sch_netem.c if (loss_event(q)) { q 456 net/sched/sch_netem.c if (q->ecn && INET_ECN_set_ce(skb)) q 470 net/sched/sch_netem.c if (q->latency || q->jitter || q->rate) q 480 net/sched/sch_netem.c u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ q 482 net/sched/sch_netem.c q->duplicate = 0; q 484 net/sched/sch_netem.c q->duplicate = dupsave; q 494 net/sched/sch_netem.c if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { q 520 net/sched/sch_netem.c if (unlikely(sch->q.qlen >= sch->limit)) { q 530 net/sched/sch_netem.c if (q->gap == 0 || /* not doing reordering */ q 531 net/sched/sch_netem.c q->counter < q->gap - 1 || /* inside last reordering gap */ q 532 net/sched/sch_netem.c q->reorder < get_crandom(&q->reorder_cor)) { q 536 net/sched/sch_netem.c delay = tabledist(q->latency, q->jitter, q 537 net/sched/sch_netem.c &q->delay_cor, q->delay_dist); q 541 net/sched/sch_netem.c if (q->rate) { q 544 net/sched/sch_netem.c if (sch->q.tail) q 545 net/sched/sch_netem.c last = netem_skb_cb(sch->q.tail); q 546 net/sched/sch_netem.c if (q->t_root.rb_node) { q 550 net/sched/sch_netem.c t_skb = skb_rb_last(&q->t_root); q 556 net/sched/sch_netem.c if (q->t_tail) { q 558 net/sched/sch_netem.c netem_skb_cb(q->t_tail); q 576 net/sched/sch_netem.c delay += packet_time_ns(qdisc_pkt_len(skb), q); q 580 net/sched/sch_netem.c ++q->counter; q 588 net/sched/sch_netem.c q->counter = 0; q 590 net/sched/sch_netem.c __qdisc_enqueue_head(skb, &sch->q); q 629 net/sched/sch_netem.c static void get_slot_next(struct netem_sched_data *q, u64 now) q 633 net/sched/sch_netem.c if (!q->slot_dist) q 634 net/sched/sch_netem.c next_delay = q->slot_config.min_delay + q 636 net/sched/sch_netem.c (q->slot_config.max_delay - q 637 net/sched/sch_netem.c q->slot_config.min_delay) >> 32); q 639 net/sched/sch_netem.c next_delay = tabledist(q->slot_config.dist_delay, q 640 net/sched/sch_netem.c (s32)(q->slot_config.dist_jitter), q 641 net/sched/sch_netem.c NULL, q->slot_dist); q 643 net/sched/sch_netem.c q->slot.slot_next = now + next_delay; q 644 net/sched/sch_netem.c q->slot.packets_left = q->slot_config.max_packets; q 645 net/sched/sch_netem.c q->slot.bytes_left = q->slot_config.max_bytes; q 648 net/sched/sch_netem.c static struct sk_buff *netem_peek(struct netem_sched_data *q) q 650 net/sched/sch_netem.c struct sk_buff *skb = skb_rb_first(&q->t_root); q 654 net/sched/sch_netem.c return q->t_head; q 655 net/sched/sch_netem.c if (!q->t_head) q 659 net/sched/sch_netem.c t2 = netem_skb_cb(q->t_head)->time_to_send; q 662 net/sched/sch_netem.c return q->t_head; q 665 net/sched/sch_netem.c static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb) q 667 net/sched/sch_netem.c if (skb == q->t_head) { q 668 net/sched/sch_netem.c q->t_head = skb->next; q 669 net/sched/sch_netem.c if (!q->t_head) q 670 net/sched/sch_netem.c q->t_tail = NULL; q 672 net/sched/sch_netem.c rb_erase(&skb->rbnode, &q->t_root); q 678 net/sched/sch_netem.c struct netem_sched_data *q = qdisc_priv(sch); q 682 net/sched/sch_netem.c skb = __qdisc_dequeue_head(&sch->q); q 689 net/sched/sch_netem.c skb = netem_peek(q); q 696 net/sched/sch_netem.c if (q->slot.slot_next && q->slot.slot_next < time_to_send) q 697 net/sched/sch_netem.c get_slot_next(q, now); q 699 net/sched/sch_netem.c if (time_to_send <= now && q->slot.slot_next <= now) { q 700 net/sched/sch_netem.c netem_erase_head(q, skb); q 701 net/sched/sch_netem.c sch->q.qlen--; q 710 net/sched/sch_netem.c if (q->slot.slot_next) { q 711 net/sched/sch_netem.c q->slot.packets_left--; q 712 net/sched/sch_netem.c q->slot.bytes_left -= qdisc_pkt_len(skb); q 713 net/sched/sch_netem.c if (q->slot.packets_left <= 0 || q 714 net/sched/sch_netem.c q->slot.bytes_left <= 0) q 715 net/sched/sch_netem.c get_slot_next(q, now); q 718 net/sched/sch_netem.c if (q->qdisc) { q 723 net/sched/sch_netem.c err = qdisc_enqueue(skb, q->qdisc, &to_free); q 736 net/sched/sch_netem.c if (q->qdisc) { q 737 net/sched/sch_netem.c skb = q->qdisc->ops->dequeue(q->qdisc); q 742 net/sched/sch_netem.c qdisc_watchdog_schedule_ns(&q->watchdog, q 744 net/sched/sch_netem.c q->slot.slot_next)); q 747 net/sched/sch_netem.c if (q->qdisc) { q 748 net/sched/sch_netem.c skb = q->qdisc->ops->dequeue(q->qdisc); q 757 net/sched/sch_netem.c struct netem_sched_data *q = qdisc_priv(sch); q 761 net/sched/sch_netem.c if (q->qdisc) q 762 net/sched/sch_netem.c qdisc_reset(q->qdisc); q 763 net/sched/sch_netem.c qdisc_watchdog_cancel(&q->watchdog); q 806 net/sched/sch_netem.c static void get_slot(struct netem_sched_data *q, const struct nlattr *attr) q 810 net/sched/sch_netem.c q->slot_config = *c; q 811 net/sched/sch_netem.c if (q->slot_config.max_packets == 0) q 812 net/sched/sch_netem.c q->slot_config.max_packets = INT_MAX; q 813 net/sched/sch_netem.c if (q->slot_config.max_bytes == 0) q 814 net/sched/sch_netem.c q->slot_config.max_bytes = INT_MAX; q 815 net/sched/sch_netem.c q->slot.packets_left = q->slot_config.max_packets; q 816 net/sched/sch_netem.c q->slot.bytes_left = q->slot_config.max_bytes; q 817 net/sched/sch_netem.c if (q->slot_config.min_delay | q->slot_config.max_delay | q 818 net/sched/sch_netem.c q->slot_config.dist_jitter) q 819 net/sched/sch_netem.c q->slot.slot_next = ktime_get_ns(); q 821 net/sched/sch_netem.c q->slot.slot_next = 0; q 824 net/sched/sch_netem.c static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr) q 828 net/sched/sch_netem.c init_crandom(&q->delay_cor, c->delay_corr); q 829 net/sched/sch_netem.c init_crandom(&q->loss_cor, c->loss_corr); q 830 net/sched/sch_netem.c init_crandom(&q->dup_cor, c->dup_corr); q 833 net/sched/sch_netem.c static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr) q 837 net/sched/sch_netem.c q->reorder = r->probability; q 838 net/sched/sch_netem.c init_crandom(&q->reorder_cor, r->correlation); q 841 net/sched/sch_netem.c static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr) q 845 net/sched/sch_netem.c q->corrupt = r->probability; q 846 net/sched/sch_netem.c init_crandom(&q->corrupt_cor, r->correlation); q 849 net/sched/sch_netem.c static void get_rate(struct netem_sched_data *q, const struct nlattr *attr) q 853 net/sched/sch_netem.c q->rate = r->rate; q 854 net/sched/sch_netem.c q->packet_overhead = r->packet_overhead; q 855 net/sched/sch_netem.c q->cell_size = r->cell_size; q 856 net/sched/sch_netem.c q->cell_overhead = r->cell_overhead; q 857 net/sched/sch_netem.c if (q->cell_size) q 858 net/sched/sch_netem.c q->cell_size_reciprocal = reciprocal_value(q->cell_size); q 860 net/sched/sch_netem.c q->cell_size_reciprocal = (struct reciprocal_value) { 0 }; q 863 net/sched/sch_netem.c static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr) q 880 net/sched/sch_netem.c q->loss_model = CLG_4_STATES; q 882 net/sched/sch_netem.c q->clg.state = TX_IN_GAP_PERIOD; q 883 net/sched/sch_netem.c q->clg.a1 = gi->p13; q 884 net/sched/sch_netem.c q->clg.a2 = gi->p31; q 885 net/sched/sch_netem.c q->clg.a3 = gi->p32; q 886 net/sched/sch_netem.c q->clg.a4 = gi->p14; q 887 net/sched/sch_netem.c q->clg.a5 = gi->p23; q 899 net/sched/sch_netem.c q->loss_model = CLG_GILB_ELL; q 900 net/sched/sch_netem.c q->clg.state = GOOD_STATE; q 901 net/sched/sch_netem.c q->clg.a1 = ge->p; q 902 net/sched/sch_netem.c q->clg.a2 = ge->r; q 903 net/sched/sch_netem.c q->clg.a3 = ge->h; q 904 net/sched/sch_netem.c q->clg.a4 = ge->k1; q 953 net/sched/sch_netem.c struct netem_sched_data *q = qdisc_priv(sch); q 969 net/sched/sch_netem.c old_clg = q->clg; q 970 net/sched/sch_netem.c old_loss_model = q->loss_model; q 973 net/sched/sch_netem.c ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]); q 975 net/sched/sch_netem.c q->loss_model = old_loss_model; q 979 net/sched/sch_netem.c q->loss_model = CLG_RANDOM; q 983 net/sched/sch_netem.c ret = get_dist_table(sch, &q->delay_dist, q 990 net/sched/sch_netem.c ret = get_dist_table(sch, &q->slot_dist, q 998 net/sched/sch_netem.c q->latency = PSCHED_TICKS2NS(qopt->latency); q 999 net/sched/sch_netem.c q->jitter = PSCHED_TICKS2NS(qopt->jitter); q 1000 net/sched/sch_netem.c q->limit = qopt->limit; q 1001 net/sched/sch_netem.c q->gap = qopt->gap; q 1002 net/sched/sch_netem.c q->counter = 0; q 1003 net/sched/sch_netem.c q->loss = qopt->loss; q 1004 net/sched/sch_netem.c q->duplicate = qopt->duplicate; q 1009 net/sched/sch_netem.c if (q->gap) q 1010 net/sched/sch_netem.c q->reorder = ~0; q 1013 net/sched/sch_netem.c get_correlation(q, tb[TCA_NETEM_CORR]); q 1016 net/sched/sch_netem.c get_reorder(q, tb[TCA_NETEM_REORDER]); q 1019 net/sched/sch_netem.c get_corrupt(q, tb[TCA_NETEM_CORRUPT]); q 1022 net/sched/sch_netem.c get_rate(q, tb[TCA_NETEM_RATE]); q 1025 net/sched/sch_netem.c q->rate = max_t(u64, q->rate, q 1029 net/sched/sch_netem.c q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]); q 1032 net/sched/sch_netem.c q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]); q 1035 net/sched/sch_netem.c q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]); q 1038 net/sched/sch_netem.c get_slot(q, tb[TCA_NETEM_SLOT]); q 1047 net/sched/sch_netem.c q->clg = old_clg; q 1048 net/sched/sch_netem.c q->loss_model = old_loss_model; q 1055 net/sched/sch_netem.c struct netem_sched_data *q = qdisc_priv(sch); q 1058 net/sched/sch_netem.c qdisc_watchdog_init(&q->watchdog, sch); q 1063 net/sched/sch_netem.c q->loss_model = CLG_RANDOM; q 1072 net/sched/sch_netem.c struct netem_sched_data *q = qdisc_priv(sch); q 1074 net/sched/sch_netem.c qdisc_watchdog_cancel(&q->watchdog); q 1075 net/sched/sch_netem.c if (q->qdisc) q 1076 net/sched/sch_netem.c qdisc_put(q->qdisc); q 1077 net/sched/sch_netem.c dist_free(q->delay_dist); q 1078 net/sched/sch_netem.c dist_free(q->slot_dist); q 1081 net/sched/sch_netem.c static int dump_loss_model(const struct netem_sched_data *q, q 1090 net/sched/sch_netem.c switch (q->loss_model) { q 1098 net/sched/sch_netem.c .p13 = q->clg.a1, q 1099 net/sched/sch_netem.c .p31 = q->clg.a2, q 1100 net/sched/sch_netem.c .p32 = q->clg.a3, q 1101 net/sched/sch_netem.c .p14 = q->clg.a4, q 1102 net/sched/sch_netem.c .p23 = q->clg.a5, q 1111 net/sched/sch_netem.c .p = q->clg.a1, q 1112 net/sched/sch_netem.c .r = q->clg.a2, q 1113 net/sched/sch_netem.c .h = q->clg.a3, q 1114 net/sched/sch_netem.c .k1 = q->clg.a4, q 1133 net/sched/sch_netem.c const struct netem_sched_data *q = qdisc_priv(sch); q 1142 net/sched/sch_netem.c qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency), q 1144 net/sched/sch_netem.c qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter), q 1146 net/sched/sch_netem.c qopt.limit = q->limit; q 1147 net/sched/sch_netem.c qopt.loss = q->loss; q 1148 net/sched/sch_netem.c qopt.gap = q->gap; q 1149 net/sched/sch_netem.c qopt.duplicate = q->duplicate; q 1153 net/sched/sch_netem.c if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency)) q 1156 net/sched/sch_netem.c if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter)) q 1159 net/sched/sch_netem.c cor.delay_corr = q->delay_cor.rho; q 1160 net/sched/sch_netem.c cor.loss_corr = q->loss_cor.rho; q 1161 net/sched/sch_netem.c cor.dup_corr = q->dup_cor.rho; q 1165 net/sched/sch_netem.c reorder.probability = q->reorder; q 1166 net/sched/sch_netem.c reorder.correlation = q->reorder_cor.rho; q 1170 net/sched/sch_netem.c corrupt.probability = q->corrupt; q 1171 net/sched/sch_netem.c corrupt.correlation = q->corrupt_cor.rho; q 1175 net/sched/sch_netem.c if (q->rate >= (1ULL << 32)) { q 1176 net/sched/sch_netem.c if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate, q 1181 net/sched/sch_netem.c rate.rate = q->rate; q 1183 net/sched/sch_netem.c rate.packet_overhead = q->packet_overhead; q 1184 net/sched/sch_netem.c rate.cell_size = q->cell_size; q 1185 net/sched/sch_netem.c rate.cell_overhead = q->cell_overhead; q 1189 net/sched/sch_netem.c if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn)) q 1192 net/sched/sch_netem.c if (dump_loss_model(q, skb) != 0) q 1195 net/sched/sch_netem.c if (q->slot_config.min_delay | q->slot_config.max_delay | q 1196 net/sched/sch_netem.c q->slot_config.dist_jitter) { q 1197 net/sched/sch_netem.c slot = q->slot_config; q 1216 net/sched/sch_netem.c struct netem_sched_data *q = qdisc_priv(sch); q 1218 net/sched/sch_netem.c if (cl != 1 || !q->qdisc) /* only one class */ q 1222 net/sched/sch_netem.c tcm->tcm_info = q->qdisc->handle; q 1230 net/sched/sch_netem.c struct netem_sched_data *q = qdisc_priv(sch); q 1232 net/sched/sch_netem.c *old = qdisc_replace(sch, new, &q->qdisc); q 1238 net/sched/sch_netem.c struct netem_sched_data *q = qdisc_priv(sch); q 1239 net/sched/sch_netem.c return q->qdisc; q 94 net/sched/sch_pie.c struct pie_sched_data *q = qdisc_priv(sch); q 96 net/sched/sch_pie.c u64 local_prob = q->vars.prob; q 100 net/sched/sch_pie.c if (q->vars.burst_time > 0) q 106 net/sched/sch_pie.c if ((q->vars.qdelay < q->params.target / 2) && q 107 net/sched/sch_pie.c (q->vars.prob < MAX_PROB / 5)) q 119 net/sched/sch_pie.c if (q->params.bytemode && packet_size <= mtu) q 122 net/sched/sch_pie.c local_prob = q->vars.prob; q 125 net/sched/sch_pie.c q->vars.accu_prob = 0; q 126 net/sched/sch_pie.c q->vars.accu_prob_overflows = 0; q 129 net/sched/sch_pie.c if (local_prob > MAX_PROB - q->vars.accu_prob) q 130 net/sched/sch_pie.c q->vars.accu_prob_overflows++; q 132 net/sched/sch_pie.c q->vars.accu_prob += local_prob; q 134 net/sched/sch_pie.c if (q->vars.accu_prob_overflows == 0 && q 135 net/sched/sch_pie.c q->vars.accu_prob < (MAX_PROB / 100) * 85) q 137 net/sched/sch_pie.c if (q->vars.accu_prob_overflows == 8 && q 138 net/sched/sch_pie.c q->vars.accu_prob >= MAX_PROB / 2) q 143 net/sched/sch_pie.c q->vars.accu_prob = 0; q 144 net/sched/sch_pie.c q->vars.accu_prob_overflows = 0; q 154 net/sched/sch_pie.c struct pie_sched_data *q = qdisc_priv(sch); q 158 net/sched/sch_pie.c q->stats.overlimit++; q 164 net/sched/sch_pie.c } else if (q->params.ecn && (q->vars.prob <= MAX_PROB / 10) && q 169 net/sched/sch_pie.c q->stats.ecn_mark++; q 175 net/sched/sch_pie.c q->stats.packets_in++; q 176 net/sched/sch_pie.c if (qdisc_qlen(sch) > q->stats.maxq) q 177 net/sched/sch_pie.c q->stats.maxq = qdisc_qlen(sch); q 183 net/sched/sch_pie.c q->stats.dropped++; q 184 net/sched/sch_pie.c q->vars.accu_prob = 0; q 185 net/sched/sch_pie.c q->vars.accu_prob_overflows = 0; q 202 net/sched/sch_pie.c struct pie_sched_data *q = qdisc_priv(sch); q 223 net/sched/sch_pie.c q->params.target = PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC); q 228 net/sched/sch_pie.c q->params.tupdate = q 234 net/sched/sch_pie.c q->params.limit = limit; q 239 net/sched/sch_pie.c q->params.alpha = nla_get_u32(tb[TCA_PIE_ALPHA]); q 242 net/sched/sch_pie.c q->params.beta = nla_get_u32(tb[TCA_PIE_BETA]); q 245 net/sched/sch_pie.c q->params.ecn = nla_get_u32(tb[TCA_PIE_ECN]); q 248 net/sched/sch_pie.c q->params.bytemode = nla_get_u32(tb[TCA_PIE_BYTEMODE]); q 251 net/sched/sch_pie.c qlen = sch->q.qlen; q 252 net/sched/sch_pie.c while (sch->q.qlen > sch->limit) { q 253 net/sched/sch_pie.c struct sk_buff *skb = __qdisc_dequeue_head(&sch->q); q 259 net/sched/sch_pie.c qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped); q 267 net/sched/sch_pie.c struct pie_sched_data *q = qdisc_priv(sch); q 274 net/sched/sch_pie.c if (qlen >= QUEUE_THRESHOLD && q->vars.dq_count == DQCOUNT_INVALID) { q 275 net/sched/sch_pie.c q->vars.dq_tstamp = psched_get_time(); q 276 net/sched/sch_pie.c q->vars.dq_count = 0; q 288 net/sched/sch_pie.c if (q->vars.dq_count != DQCOUNT_INVALID) { q 289 net/sched/sch_pie.c q->vars.dq_count += skb->len; q 291 net/sched/sch_pie.c if (q->vars.dq_count >= QUEUE_THRESHOLD) { q 293 net/sched/sch_pie.c u32 dtime = now - q->vars.dq_tstamp; q 294 net/sched/sch_pie.c u32 count = q->vars.dq_count << PIE_SCALE; q 301 net/sched/sch_pie.c if (q->vars.avg_dq_rate == 0) q 302 net/sched/sch_pie.c q->vars.avg_dq_rate = count; q 304 net/sched/sch_pie.c q->vars.avg_dq_rate = q 305 net/sched/sch_pie.c (q->vars.avg_dq_rate - q 306 net/sched/sch_pie.c (q->vars.avg_dq_rate >> 3)) + (count >> 3); q 314 net/sched/sch_pie.c q->vars.dq_count = DQCOUNT_INVALID; q 316 net/sched/sch_pie.c q->vars.dq_count = 0; q 317 net/sched/sch_pie.c q->vars.dq_tstamp = psched_get_time(); q 320 net/sched/sch_pie.c if (q->vars.burst_time > 0) { q 321 net/sched/sch_pie.c if (q->vars.burst_time > dtime) q 322 net/sched/sch_pie.c q->vars.burst_time -= dtime; q 324 net/sched/sch_pie.c q->vars.burst_time = 0; q 332 net/sched/sch_pie.c struct pie_sched_data *q = qdisc_priv(sch); q 335 net/sched/sch_pie.c psched_time_t qdelay_old = q->vars.qdelay; /* in pschedtime */ q 342 net/sched/sch_pie.c q->vars.qdelay_old = q->vars.qdelay; q 344 net/sched/sch_pie.c if (q->vars.avg_dq_rate > 0) q 345 net/sched/sch_pie.c qdelay = (qlen << PIE_SCALE) / q->vars.avg_dq_rate; q 362 net/sched/sch_pie.c alpha = ((u64)q->params.alpha * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4; q 363 net/sched/sch_pie.c beta = ((u64)q->params.beta * (MAX_PROB / PSCHED_TICKS_PER_SEC)) >> 4; q 368 net/sched/sch_pie.c if (q->vars.prob < MAX_PROB / 10) { q 373 net/sched/sch_pie.c while (q->vars.prob < div_u64(MAX_PROB, power) && q 382 net/sched/sch_pie.c delta += alpha * (u64)(qdelay - q->params.target); q 385 net/sched/sch_pie.c oldprob = q->vars.prob; q 389 net/sched/sch_pie.c q->vars.prob >= MAX_PROB / 10) q 400 net/sched/sch_pie.c q->vars.prob += delta; q 404 net/sched/sch_pie.c if (q->vars.prob < oldprob) { q 405 net/sched/sch_pie.c q->vars.prob = MAX_PROB; q 415 net/sched/sch_pie.c if (q->vars.prob > oldprob) q 416 net/sched/sch_pie.c q->vars.prob = 0; q 425 net/sched/sch_pie.c q->vars.prob -= q->vars.prob / 64u; q 427 net/sched/sch_pie.c q->vars.qdelay = qdelay; q 428 net/sched/sch_pie.c q->vars.qlen_old = qlen; q 436 net/sched/sch_pie.c if ((q->vars.qdelay < q->params.target / 2) && q 437 net/sched/sch_pie.c (q->vars.qdelay_old < q->params.target / 2) && q 438 net/sched/sch_pie.c q->vars.prob == 0 && q 439 net/sched/sch_pie.c q->vars.avg_dq_rate > 0) q 440 net/sched/sch_pie.c pie_vars_init(&q->vars); q 445 net/sched/sch_pie.c struct pie_sched_data *q = from_timer(q, t, adapt_timer); q 446 net/sched/sch_pie.c struct Qdisc *sch = q->sch; q 453 net/sched/sch_pie.c if (q->params.tupdate) q 454 net/sched/sch_pie.c mod_timer(&q->adapt_timer, jiffies + q->params.tupdate); q 461 net/sched/sch_pie.c struct pie_sched_data *q = qdisc_priv(sch); q 463 net/sched/sch_pie.c pie_params_init(&q->params); q 464 net/sched/sch_pie.c pie_vars_init(&q->vars); q 465 net/sched/sch_pie.c sch->limit = q->params.limit; q 467 net/sched/sch_pie.c q->sch = sch; q 468 net/sched/sch_pie.c timer_setup(&q->adapt_timer, pie_timer, 0); q 477 net/sched/sch_pie.c mod_timer(&q->adapt_timer, jiffies + HZ / 2); q 483 net/sched/sch_pie.c struct pie_sched_data *q = qdisc_priv(sch); q 492 net/sched/sch_pie.c ((u32)PSCHED_TICKS2NS(q->params.target)) / q 496 net/sched/sch_pie.c jiffies_to_usecs(q->params.tupdate)) || q 497 net/sched/sch_pie.c nla_put_u32(skb, TCA_PIE_ALPHA, q->params.alpha) || q 498 net/sched/sch_pie.c nla_put_u32(skb, TCA_PIE_BETA, q->params.beta) || q 499 net/sched/sch_pie.c nla_put_u32(skb, TCA_PIE_ECN, q->params.ecn) || q 500 net/sched/sch_pie.c nla_put_u32(skb, TCA_PIE_BYTEMODE, q->params.bytemode)) q 512 net/sched/sch_pie.c struct pie_sched_data *q = qdisc_priv(sch); q 514 net/sched/sch_pie.c .prob = q->vars.prob, q 515 net/sched/sch_pie.c .delay = ((u32)PSCHED_TICKS2NS(q->vars.qdelay)) / q 518 net/sched/sch_pie.c .avg_dq_rate = q->vars.avg_dq_rate * q 520 net/sched/sch_pie.c .packets_in = q->stats.packets_in, q 521 net/sched/sch_pie.c .overlimit = q->stats.overlimit, q 522 net/sched/sch_pie.c .maxq = q->stats.maxq, q 523 net/sched/sch_pie.c .dropped = q->stats.dropped, q 524 net/sched/sch_pie.c .ecn_mark = q->stats.ecn_mark, q 543 net/sched/sch_pie.c struct pie_sched_data *q = qdisc_priv(sch); q 546 net/sched/sch_pie.c pie_vars_init(&q->vars); q 551 net/sched/sch_pie.c struct pie_sched_data *q = qdisc_priv(sch); q 553 net/sched/sch_pie.c q->params.tupdate = 0; q 554 net/sched/sch_pie.c del_timer_sync(&q->adapt_timer); q 90 net/sched/sch_plug.c struct plug_sched_data *q = qdisc_priv(sch); q 92 net/sched/sch_plug.c if (likely(sch->qstats.backlog + skb->len <= q->limit)) { q 93 net/sched/sch_plug.c if (!q->unplug_indefinite) q 94 net/sched/sch_plug.c q->pkts_current_epoch++; q 103 net/sched/sch_plug.c struct plug_sched_data *q = qdisc_priv(sch); q 105 net/sched/sch_plug.c if (q->throttled) q 108 net/sched/sch_plug.c if (!q->unplug_indefinite) { q 109 net/sched/sch_plug.c if (!q->pkts_to_release) { q 113 net/sched/sch_plug.c q->throttled = true; q 116 net/sched/sch_plug.c q->pkts_to_release--; q 125 net/sched/sch_plug.c struct plug_sched_data *q = qdisc_priv(sch); q 127 net/sched/sch_plug.c q->pkts_current_epoch = 0; q 128 net/sched/sch_plug.c q->pkts_last_epoch = 0; q 129 net/sched/sch_plug.c q->pkts_to_release = 0; q 130 net/sched/sch_plug.c q->unplug_indefinite = false; q 133 net/sched/sch_plug.c q->limit = qdisc_dev(sch)->tx_queue_len q 141 net/sched/sch_plug.c q->limit = ctl->limit; q 144 net/sched/sch_plug.c q->throttled = true; q 161 net/sched/sch_plug.c struct plug_sched_data *q = qdisc_priv(sch); q 174 net/sched/sch_plug.c q->pkts_last_epoch = q->pkts_current_epoch; q 175 net/sched/sch_plug.c q->pkts_current_epoch = 0; q 176 net/sched/sch_plug.c if (q->unplug_indefinite) q 177 net/sched/sch_plug.c q->throttled = true; q 178 net/sched/sch_plug.c q->unplug_indefinite = false; q 184 net/sched/sch_plug.c q->pkts_to_release += q->pkts_last_epoch; q 185 net/sched/sch_plug.c q->pkts_last_epoch = 0; q 186 net/sched/sch_plug.c q->throttled = false; q 190 net/sched/sch_plug.c q->unplug_indefinite = true; q 191 net/sched/sch_plug.c q->pkts_to_release = 0; q 192 net/sched/sch_plug.c q->pkts_last_epoch = 0; q 193 net/sched/sch_plug.c q->pkts_current_epoch = 0; q 194 net/sched/sch_plug.c q->throttled = false; q 199 net/sched/sch_plug.c q->limit = msg->limit; q 33 net/sched/sch_prio.c struct prio_sched_data *q = qdisc_priv(sch); q 41 net/sched/sch_prio.c fl = rcu_dereference_bh(q->filter_list); q 57 net/sched/sch_prio.c return q->queues[q->prio2band[band & TC_PRIO_MAX]]; q 62 net/sched/sch_prio.c if (band >= q->bands) q 63 net/sched/sch_prio.c return q->queues[q->prio2band[0]]; q 65 net/sched/sch_prio.c return q->queues[band]; q 89 net/sched/sch_prio.c sch->q.qlen++; q 99 net/sched/sch_prio.c struct prio_sched_data *q = qdisc_priv(sch); q 102 net/sched/sch_prio.c for (prio = 0; prio < q->bands; prio++) { q 103 net/sched/sch_prio.c struct Qdisc *qdisc = q->queues[prio]; q 113 net/sched/sch_prio.c struct prio_sched_data *q = qdisc_priv(sch); q 116 net/sched/sch_prio.c for (prio = 0; prio < q->bands; prio++) { q 117 net/sched/sch_prio.c struct Qdisc *qdisc = q->queues[prio]; q 122 net/sched/sch_prio.c sch->q.qlen--; q 134 net/sched/sch_prio.c struct prio_sched_data *q = qdisc_priv(sch); q 136 net/sched/sch_prio.c for (prio = 0; prio < q->bands; prio++) q 137 net/sched/sch_prio.c qdisc_reset(q->queues[prio]); q 139 net/sched/sch_prio.c sch->q.qlen = 0; q 170 net/sched/sch_prio.c struct prio_sched_data *q = qdisc_priv(sch); q 172 net/sched/sch_prio.c tcf_block_put(q->block); q 174 net/sched/sch_prio.c for (prio = 0; prio < q->bands; prio++) q 175 net/sched/sch_prio.c qdisc_put(q->queues[prio]); q 181 net/sched/sch_prio.c struct prio_sched_data *q = qdisc_priv(sch); q 183 net/sched/sch_prio.c int oldbands = q->bands, i; q 212 net/sched/sch_prio.c q->bands = qopt->bands; q 213 net/sched/sch_prio.c memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); q 215 net/sched/sch_prio.c for (i = q->bands; i < oldbands; i++) q 216 net/sched/sch_prio.c qdisc_tree_flush_backlog(q->queues[i]); q 218 net/sched/sch_prio.c for (i = oldbands; i < q->bands; i++) { q 219 net/sched/sch_prio.c q->queues[i] = queues[i]; q 220 net/sched/sch_prio.c if (q->queues[i] != &noop_qdisc) q 221 net/sched/sch_prio.c qdisc_hash_add(q->queues[i], true); q 226 net/sched/sch_prio.c for (i = q->bands; i < oldbands; i++) q 227 net/sched/sch_prio.c qdisc_put(q->queues[i]); q 234 net/sched/sch_prio.c struct prio_sched_data *q = qdisc_priv(sch); q 240 net/sched/sch_prio.c err = tcf_block_get(&q->block, &q->filter_list, sch, extack); q 266 net/sched/sch_prio.c struct prio_sched_data *q = qdisc_priv(sch); q 271 net/sched/sch_prio.c opt.bands = q->bands; q 272 net/sched/sch_prio.c memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1); q 291 net/sched/sch_prio.c struct prio_sched_data *q = qdisc_priv(sch); q 304 net/sched/sch_prio.c *old = qdisc_replace(sch, new, &q->queues[band]); q 321 net/sched/sch_prio.c struct prio_sched_data *q = qdisc_priv(sch); q 324 net/sched/sch_prio.c return q->queues[band]; q 329 net/sched/sch_prio.c struct prio_sched_data *q = qdisc_priv(sch); q 332 net/sched/sch_prio.c if (band - 1 >= q->bands) q 343 net/sched/sch_prio.c static void prio_unbind(struct Qdisc *q, unsigned long cl) q 350 net/sched/sch_prio.c struct prio_sched_data *q = qdisc_priv(sch); q 353 net/sched/sch_prio.c tcm->tcm_info = q->queues[cl-1]->handle; q 360 net/sched/sch_prio.c struct prio_sched_data *q = qdisc_priv(sch); q 363 net/sched/sch_prio.c cl_q = q->queues[cl - 1]; q 374 net/sched/sch_prio.c struct prio_sched_data *q = qdisc_priv(sch); q 380 net/sched/sch_prio.c for (prio = 0; prio < q->bands; prio++) { q 396 net/sched/sch_prio.c struct prio_sched_data *q = qdisc_priv(sch); q 400 net/sched/sch_prio.c return q->block; q 208 net/sched/sch_qfq.c struct qfq_sched *q = qdisc_priv(sch); q 211 net/sched/sch_qfq.c clc = qdisc_class_find(&q->clhash, classid); q 253 net/sched/sch_qfq.c static void qfq_init_agg(struct qfq_sched *q, struct qfq_aggregate *agg, q 257 net/sched/sch_qfq.c hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs); q 263 net/sched/sch_qfq.c static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q, q 268 net/sched/sch_qfq.c hlist_for_each_entry(agg, &q->nonfull_aggs, nonfull_next) q 277 net/sched/sch_qfq.c static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg, q 282 net/sched/sch_qfq.c if (new_num_classes == q->max_agg_classes) q 286 net/sched/sch_qfq.c new_num_classes == q->max_agg_classes - 1) /* agg no more full */ q 287 net/sched/sch_qfq.c hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs); q 299 net/sched/sch_qfq.c q->min_slot_shift); q 300 net/sched/sch_qfq.c agg->grp = &q->groups[i]; q 303 net/sched/sch_qfq.c q->wsum += q 305 net/sched/sch_qfq.c q->iwsum = ONE_FP / q->wsum; q 311 net/sched/sch_qfq.c static void qfq_add_to_agg(struct qfq_sched *q, q 317 net/sched/sch_qfq.c qfq_update_agg(q, agg, agg->num_classes+1); q 318 net/sched/sch_qfq.c if (cl->qdisc->q.qlen > 0) { /* adding an active class */ q 321 net/sched/sch_qfq.c cl && q->in_serv_agg != agg) /* agg was inactive */ q 322 net/sched/sch_qfq.c qfq_activate_agg(q, agg, enqueue); /* schedule agg */ q 328 net/sched/sch_qfq.c static void qfq_destroy_agg(struct qfq_sched *q, struct qfq_aggregate *agg) q 331 net/sched/sch_qfq.c q->wsum -= agg->class_weight; q 332 net/sched/sch_qfq.c if (q->wsum != 0) q 333 net/sched/sch_qfq.c q->iwsum = ONE_FP / q->wsum; q 335 net/sched/sch_qfq.c if (q->in_serv_agg == agg) q 336 net/sched/sch_qfq.c q->in_serv_agg = qfq_choose_next_agg(q); q 341 net/sched/sch_qfq.c static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl) q 348 net/sched/sch_qfq.c qfq_deactivate_agg(q, agg); q 352 net/sched/sch_qfq.c static void qfq_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl) q 358 net/sched/sch_qfq.c qfq_destroy_agg(q, agg); q 361 net/sched/sch_qfq.c qfq_update_agg(q, agg, agg->num_classes-1); q 365 net/sched/sch_qfq.c static void qfq_deact_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl) q 367 net/sched/sch_qfq.c if (cl->qdisc->q.qlen > 0) /* class is active */ q 368 net/sched/sch_qfq.c qfq_deactivate_class(q, cl); q 370 net/sched/sch_qfq.c qfq_rm_from_agg(q, cl); q 377 net/sched/sch_qfq.c struct qfq_sched *q = qdisc_priv(sch); q 378 net/sched/sch_qfq.c struct qfq_aggregate *new_agg = qfq_find_agg(q, lmax, weight); q 384 net/sched/sch_qfq.c qfq_init_agg(q, new_agg, lmax, weight); q 386 net/sched/sch_qfq.c qfq_deact_rm_from_agg(q, cl); q 387 net/sched/sch_qfq.c qfq_add_to_agg(q, new_agg, cl); q 396 net/sched/sch_qfq.c struct qfq_sched *q = qdisc_priv(sch); q 443 net/sched/sch_qfq.c if (q->wsum + delta_w > QFQ_MAX_WSUM) { q 445 net/sched/sch_qfq.c delta_w, q->wsum); q 489 net/sched/sch_qfq.c qdisc_class_hash_insert(&q->clhash, &cl->common); q 492 net/sched/sch_qfq.c qdisc_class_hash_grow(sch, &q->clhash); q 496 net/sched/sch_qfq.c new_agg = qfq_find_agg(q, lmax, weight); q 506 net/sched/sch_qfq.c qfq_init_agg(q, new_agg, lmax, weight); q 509 net/sched/sch_qfq.c qfq_deact_rm_from_agg(q, cl); q 510 net/sched/sch_qfq.c qfq_add_to_agg(q, new_agg, cl); q 524 net/sched/sch_qfq.c struct qfq_sched *q = qdisc_priv(sch); q 526 net/sched/sch_qfq.c qfq_rm_from_agg(q, cl); q 534 net/sched/sch_qfq.c struct qfq_sched *q = qdisc_priv(sch); q 543 net/sched/sch_qfq.c qdisc_class_hash_remove(&q->clhash, &cl->common); q 559 net/sched/sch_qfq.c struct qfq_sched *q = qdisc_priv(sch); q 564 net/sched/sch_qfq.c return q->block; q 654 net/sched/sch_qfq.c struct qfq_sched *q = qdisc_priv(sch); q 661 net/sched/sch_qfq.c for (i = 0; i < q->clhash.hashsize; i++) { q 662 net/sched/sch_qfq.c hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { q 679 net/sched/sch_qfq.c struct qfq_sched *q = qdisc_priv(sch); q 693 net/sched/sch_qfq.c fl = rcu_dereference_bh(q->filter_list); q 729 net/sched/sch_qfq.c static inline struct qfq_group *qfq_ffs(struct qfq_sched *q, q 733 net/sched/sch_qfq.c return &q->groups[index]; q 746 net/sched/sch_qfq.c static int qfq_calc_state(struct qfq_sched *q, const struct qfq_group *grp) q 749 net/sched/sch_qfq.c unsigned int state = qfq_gt(grp->S, q->V); q 750 net/sched/sch_qfq.c unsigned long mask = mask_from(q->bitmaps[ER], grp->index); q 754 net/sched/sch_qfq.c next = qfq_ffs(q, mask); q 769 net/sched/sch_qfq.c static inline void qfq_move_groups(struct qfq_sched *q, unsigned long mask, q 772 net/sched/sch_qfq.c q->bitmaps[dst] |= q->bitmaps[src] & mask; q 773 net/sched/sch_qfq.c q->bitmaps[src] &= ~mask; q 776 net/sched/sch_qfq.c static void qfq_unblock_groups(struct qfq_sched *q, int index, u64 old_F) q 778 net/sched/sch_qfq.c unsigned long mask = mask_from(q->bitmaps[ER], index + 1); q 782 net/sched/sch_qfq.c next = qfq_ffs(q, mask); q 788 net/sched/sch_qfq.c qfq_move_groups(q, mask, EB, ER); q 789 net/sched/sch_qfq.c qfq_move_groups(q, mask, IB, IR); q 802 net/sched/sch_qfq.c static void qfq_make_eligible(struct qfq_sched *q) q 804 net/sched/sch_qfq.c unsigned long vslot = q->V >> q->min_slot_shift; q 805 net/sched/sch_qfq.c unsigned long old_vslot = q->oldV >> q->min_slot_shift; q 816 net/sched/sch_qfq.c qfq_move_groups(q, mask, IR, ER); q 817 net/sched/sch_qfq.c qfq_move_groups(q, mask, IB, EB); q 957 net/sched/sch_qfq.c static void qfq_update_eligible(struct qfq_sched *q) q 962 net/sched/sch_qfq.c ineligible = q->bitmaps[IR] | q->bitmaps[IB]; q 964 net/sched/sch_qfq.c if (!q->bitmaps[ER]) { q 965 net/sched/sch_qfq.c grp = qfq_ffs(q, ineligible); q 966 net/sched/sch_qfq.c if (qfq_gt(grp->S, q->V)) q 967 net/sched/sch_qfq.c q->V = grp->S; q 969 net/sched/sch_qfq.c qfq_make_eligible(q); q 981 net/sched/sch_qfq.c if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */ q 1031 net/sched/sch_qfq.c static void qfq_update_start(struct qfq_sched *q, struct qfq_aggregate *agg) q 1038 net/sched/sch_qfq.c limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift); q 1040 net/sched/sch_qfq.c if (!qfq_gt(agg->F, q->V) || qfq_gt(roundedF, limit)) { q 1042 net/sched/sch_qfq.c mask = mask_from(q->bitmaps[ER], agg->grp->index); q 1044 net/sched/sch_qfq.c struct qfq_group *next = qfq_ffs(q, mask); q 1053 net/sched/sch_qfq.c agg->S = q->V; q 1064 net/sched/sch_qfq.c qfq_update_agg_ts(struct qfq_sched *q, q 1068 net/sched/sch_qfq.c qfq_update_start(q, agg); q 1075 net/sched/sch_qfq.c static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg); q 1079 net/sched/sch_qfq.c struct qfq_sched *q = qdisc_priv(sch); q 1080 net/sched/sch_qfq.c struct qfq_aggregate *in_serv_agg = q->in_serv_agg; q 1115 net/sched/sch_qfq.c qfq_update_agg_ts(q, in_serv_agg, requeue); q 1116 net/sched/sch_qfq.c qfq_schedule_agg(q, in_serv_agg); q 1117 net/sched/sch_qfq.c } else if (sch->q.qlen == 0) { /* no aggregate to serve */ q 1118 net/sched/sch_qfq.c q->in_serv_agg = NULL; q 1126 net/sched/sch_qfq.c in_serv_agg = q->in_serv_agg = qfq_choose_next_agg(q); q 1133 net/sched/sch_qfq.c sch->q.qlen--; q 1146 net/sched/sch_qfq.c q->V += (u64)len * q->iwsum; q 1149 net/sched/sch_qfq.c (unsigned long long) q->V); q 1154 net/sched/sch_qfq.c static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q) q 1160 net/sched/sch_qfq.c qfq_update_eligible(q); q 1161 net/sched/sch_qfq.c q->oldV = q->V; q 1163 net/sched/sch_qfq.c if (!q->bitmaps[ER]) q 1166 net/sched/sch_qfq.c grp = qfq_ffs(q, q->bitmaps[ER]); q 1177 net/sched/sch_qfq.c __clear_bit(grp->index, &q->bitmaps[ER]); q 1187 net/sched/sch_qfq.c __clear_bit(grp->index, &q->bitmaps[ER]); q 1188 net/sched/sch_qfq.c s = qfq_calc_state(q, grp); q 1189 net/sched/sch_qfq.c __set_bit(grp->index, &q->bitmaps[s]); q 1192 net/sched/sch_qfq.c qfq_unblock_groups(q, grp->index, old_F); q 1201 net/sched/sch_qfq.c struct qfq_sched *q = qdisc_priv(sch); q 1227 net/sched/sch_qfq.c first = !cl->qdisc->q.qlen; q 1241 net/sched/sch_qfq.c ++sch->q.qlen; q 1259 net/sched/sch_qfq.c q->in_serv_agg == agg) q 1262 net/sched/sch_qfq.c qfq_activate_agg(q, agg, enqueue); q 1270 net/sched/sch_qfq.c static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg) q 1294 net/sched/sch_qfq.c __clear_bit(grp->index, &q->bitmaps[IR]); q 1295 net/sched/sch_qfq.c __clear_bit(grp->index, &q->bitmaps[IB]); q 1296 net/sched/sch_qfq.c } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V) && q 1297 net/sched/sch_qfq.c q->in_serv_agg == NULL) q 1298 net/sched/sch_qfq.c q->V = roundedS; q 1302 net/sched/sch_qfq.c s = qfq_calc_state(q, grp); q 1303 net/sched/sch_qfq.c __set_bit(grp->index, &q->bitmaps[s]); q 1306 net/sched/sch_qfq.c s, q->bitmaps[s], q 1309 net/sched/sch_qfq.c (unsigned long long) q->V); q 1317 net/sched/sch_qfq.c static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg, q 1322 net/sched/sch_qfq.c qfq_update_agg_ts(q, agg, reason); q 1323 net/sched/sch_qfq.c if (q->in_serv_agg == NULL) { /* no aggr. in service or scheduled */ q 1324 net/sched/sch_qfq.c q->in_serv_agg = agg; /* start serving this aggregate */ q 1326 net/sched/sch_qfq.c q->oldV = q->V = agg->S; q 1327 net/sched/sch_qfq.c } else if (agg != q->in_serv_agg) q 1328 net/sched/sch_qfq.c qfq_schedule_agg(q, agg); q 1331 net/sched/sch_qfq.c static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp, q 1354 net/sched/sch_qfq.c static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg) q 1361 net/sched/sch_qfq.c if (agg == q->in_serv_agg) { q 1363 net/sched/sch_qfq.c q->in_serv_agg = qfq_choose_next_agg(q); q 1368 net/sched/sch_qfq.c qfq_slot_remove(q, grp, agg); q 1371 net/sched/sch_qfq.c __clear_bit(grp->index, &q->bitmaps[IR]); q 1372 net/sched/sch_qfq.c __clear_bit(grp->index, &q->bitmaps[EB]); q 1373 net/sched/sch_qfq.c __clear_bit(grp->index, &q->bitmaps[IB]); q 1375 net/sched/sch_qfq.c if (test_bit(grp->index, &q->bitmaps[ER]) && q 1376 net/sched/sch_qfq.c !(q->bitmaps[ER] & ~((1UL << grp->index) - 1))) { q 1377 net/sched/sch_qfq.c mask = q->bitmaps[ER] & ((1UL << grp->index) - 1); q 1382 net/sched/sch_qfq.c qfq_move_groups(q, mask, EB, ER); q 1383 net/sched/sch_qfq.c qfq_move_groups(q, mask, IB, IR); q 1385 net/sched/sch_qfq.c __clear_bit(grp->index, &q->bitmaps[ER]); q 1390 net/sched/sch_qfq.c __clear_bit(grp->index, &q->bitmaps[ER]); q 1391 net/sched/sch_qfq.c __clear_bit(grp->index, &q->bitmaps[IR]); q 1392 net/sched/sch_qfq.c __clear_bit(grp->index, &q->bitmaps[EB]); q 1393 net/sched/sch_qfq.c __clear_bit(grp->index, &q->bitmaps[IB]); q 1396 net/sched/sch_qfq.c s = qfq_calc_state(q, grp); q 1397 net/sched/sch_qfq.c __set_bit(grp->index, &q->bitmaps[s]); q 1404 net/sched/sch_qfq.c struct qfq_sched *q = qdisc_priv(sch); q 1407 net/sched/sch_qfq.c qfq_deactivate_class(q, cl); q 1413 net/sched/sch_qfq.c struct qfq_sched *q = qdisc_priv(sch); q 1418 net/sched/sch_qfq.c err = tcf_block_get(&q->block, &q->filter_list, sch, extack); q 1422 net/sched/sch_qfq.c err = qdisc_class_hash_init(&q->clhash); q 1432 net/sched/sch_qfq.c q->max_agg_classes = 1<<max_cl_shift; q 1436 net/sched/sch_qfq.c q->min_slot_shift = FRAC_BITS + maxbudg_shift - QFQ_MAX_INDEX; q 1439 net/sched/sch_qfq.c grp = &q->groups[i]; q 1441 net/sched/sch_qfq.c grp->slot_shift = q->min_slot_shift + i; q 1446 net/sched/sch_qfq.c INIT_HLIST_HEAD(&q->nonfull_aggs); q 1453 net/sched/sch_qfq.c struct qfq_sched *q = qdisc_priv(sch); q 1457 net/sched/sch_qfq.c for (i = 0; i < q->clhash.hashsize; i++) { q 1458 net/sched/sch_qfq.c hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { q 1459 net/sched/sch_qfq.c if (cl->qdisc->q.qlen > 0) q 1460 net/sched/sch_qfq.c qfq_deactivate_class(q, cl); q 1466 net/sched/sch_qfq.c sch->q.qlen = 0; q 1471 net/sched/sch_qfq.c struct qfq_sched *q = qdisc_priv(sch); q 1476 net/sched/sch_qfq.c tcf_block_put(q->block); q 1478 net/sched/sch_qfq.c for (i = 0; i < q->clhash.hashsize; i++) { q 1479 net/sched/sch_qfq.c hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], q 1484 net/sched/sch_qfq.c qdisc_class_hash_destroy(&q->clhash); q 47 net/sched/sch_red.c static inline int red_use_ecn(struct red_sched_data *q) q 49 net/sched/sch_red.c return q->flags & TC_RED_ECN; q 52 net/sched/sch_red.c static inline int red_use_harddrop(struct red_sched_data *q) q 54 net/sched/sch_red.c return q->flags & TC_RED_HARDDROP; q 60 net/sched/sch_red.c struct red_sched_data *q = qdisc_priv(sch); q 61 net/sched/sch_red.c struct Qdisc *child = q->qdisc; q 64 net/sched/sch_red.c q->vars.qavg = red_calc_qavg(&q->parms, q 65 net/sched/sch_red.c &q->vars, q 68 net/sched/sch_red.c if (red_is_idling(&q->vars)) q 69 net/sched/sch_red.c red_end_of_idle_period(&q->vars); q 71 net/sched/sch_red.c switch (red_action(&q->parms, &q->vars, q->vars.qavg)) { q 77 net/sched/sch_red.c if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) { q 78 net/sched/sch_red.c q->stats.prob_drop++; q 82 net/sched/sch_red.c q->stats.prob_mark++; q 87 net/sched/sch_red.c if (red_use_harddrop(q) || !red_use_ecn(q) || q 89 net/sched/sch_red.c q->stats.forced_drop++; q 93 net/sched/sch_red.c q->stats.forced_mark++; q 100 net/sched/sch_red.c sch->q.qlen++; q 102 net/sched/sch_red.c q->stats.pdrop++; q 115 net/sched/sch_red.c struct red_sched_data *q = qdisc_priv(sch); q 116 net/sched/sch_red.c struct Qdisc *child = q->qdisc; q 122 net/sched/sch_red.c sch->q.qlen--; q 124 net/sched/sch_red.c if (!red_is_idling(&q->vars)) q 125 net/sched/sch_red.c red_start_of_idle_period(&q->vars); q 132 net/sched/sch_red.c struct red_sched_data *q = qdisc_priv(sch); q 133 net/sched/sch_red.c struct Qdisc *child = q->qdisc; q 140 net/sched/sch_red.c struct red_sched_data *q = qdisc_priv(sch); q 142 net/sched/sch_red.c qdisc_reset(q->qdisc); q 144 net/sched/sch_red.c sch->q.qlen = 0; q 145 net/sched/sch_red.c red_restart(&q->vars); q 150 net/sched/sch_red.c struct red_sched_data *q = qdisc_priv(sch); q 162 net/sched/sch_red.c opt.set.min = q->parms.qth_min >> q->parms.Wlog; q 163 net/sched/sch_red.c opt.set.max = q->parms.qth_max >> q->parms.Wlog; q 164 net/sched/sch_red.c opt.set.probability = q->parms.max_P; q 165 net/sched/sch_red.c opt.set.limit = q->limit; q 166 net/sched/sch_red.c opt.set.is_ecn = red_use_ecn(q); q 167 net/sched/sch_red.c opt.set.is_harddrop = red_use_harddrop(q); q 178 net/sched/sch_red.c struct red_sched_data *q = qdisc_priv(sch); q 180 net/sched/sch_red.c del_timer_sync(&q->adapt_timer); q 182 net/sched/sch_red.c qdisc_put(q->qdisc); q 195 net/sched/sch_red.c struct red_sched_data *q = qdisc_priv(sch); q 230 net/sched/sch_red.c q->flags = ctl->flags; q 231 net/sched/sch_red.c q->limit = ctl->limit; q 233 net/sched/sch_red.c qdisc_tree_flush_backlog(q->qdisc); q 234 net/sched/sch_red.c old_child = q->qdisc; q 235 net/sched/sch_red.c q->qdisc = child; q 238 net/sched/sch_red.c red_set_parms(&q->parms, q 243 net/sched/sch_red.c red_set_vars(&q->vars); q 245 net/sched/sch_red.c del_timer(&q->adapt_timer); q 247 net/sched/sch_red.c mod_timer(&q->adapt_timer, jiffies + HZ/2); q 249 net/sched/sch_red.c if (!q->qdisc->q.qlen) q 250 net/sched/sch_red.c red_start_of_idle_period(&q->vars); q 263 net/sched/sch_red.c struct red_sched_data *q = from_timer(q, t, adapt_timer); q 264 net/sched/sch_red.c struct Qdisc *sch = q->sch; q 268 net/sched/sch_red.c red_adaptative_algo(&q->parms, &q->vars); q 269 net/sched/sch_red.c mod_timer(&q->adapt_timer, jiffies + HZ/2); q 276 net/sched/sch_red.c struct red_sched_data *q = qdisc_priv(sch); q 278 net/sched/sch_red.c q->qdisc = &noop_qdisc; q 279 net/sched/sch_red.c q->sch = sch; q 280 net/sched/sch_red.c timer_setup(&q->adapt_timer, red_adaptative_timer, 0); q 301 net/sched/sch_red.c struct red_sched_data *q = qdisc_priv(sch); q 304 net/sched/sch_red.c .limit = q->limit, q 305 net/sched/sch_red.c .flags = q->flags, q 306 net/sched/sch_red.c .qth_min = q->parms.qth_min >> q->parms.Wlog, q 307 net/sched/sch_red.c .qth_max = q->parms.qth_max >> q->parms.Wlog, q 308 net/sched/sch_red.c .Wlog = q->parms.Wlog, q 309 net/sched/sch_red.c .Plog = q->parms.Plog, q 310 net/sched/sch_red.c .Scell_log = q->parms.Scell_log, q 322 net/sched/sch_red.c nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P)) q 333 net/sched/sch_red.c struct red_sched_data *q = qdisc_priv(sch); q 343 net/sched/sch_red.c .xstats = &q->stats, q 349 net/sched/sch_red.c st.early = q->stats.prob_drop + q->stats.forced_drop; q 350 net/sched/sch_red.c st.pdrop = q->stats.pdrop; q 351 net/sched/sch_red.c st.other = q->stats.other; q 352 net/sched/sch_red.c st.marked = q->stats.prob_mark + q->stats.forced_mark; q 360 net/sched/sch_red.c struct red_sched_data *q = qdisc_priv(sch); q 363 net/sched/sch_red.c tcm->tcm_info = q->qdisc->handle; q 385 net/sched/sch_red.c struct red_sched_data *q = qdisc_priv(sch); q 390 net/sched/sch_red.c *old = qdisc_replace(sch, new, &q->qdisc); q 398 net/sched/sch_red.c struct red_sched_data *q = qdisc_priv(sch); q 399 net/sched/sch_red.c return q->qdisc; q 123 net/sched/sch_sfb.c static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q) q 126 net/sched/sch_sfb.c struct sfb_bucket *b = &q->bins[slot].bins[0][0]; q 138 net/sched/sch_sfb.c static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) q 144 net/sched/sch_sfb.c increment_one_qlen(sfbhash, 0, q); q 148 net/sched/sch_sfb.c increment_one_qlen(sfbhash, 1, q); q 152 net/sched/sch_sfb.c struct sfb_sched_data *q) q 155 net/sched/sch_sfb.c struct sfb_bucket *b = &q->bins[slot].bins[0][0]; q 167 net/sched/sch_sfb.c static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) q 173 net/sched/sch_sfb.c decrement_one_qlen(sfbhash, 0, q); q 177 net/sched/sch_sfb.c decrement_one_qlen(sfbhash, 1, q); q 180 net/sched/sch_sfb.c static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q) q 182 net/sched/sch_sfb.c b->p_mark = prob_minus(b->p_mark, q->decrement); q 185 net/sched/sch_sfb.c static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q) q 187 net/sched/sch_sfb.c b->p_mark = prob_plus(b->p_mark, q->increment); q 190 net/sched/sch_sfb.c static void sfb_zero_all_buckets(struct sfb_sched_data *q) q 192 net/sched/sch_sfb.c memset(&q->bins, 0, sizeof(q->bins)); q 198 net/sched/sch_sfb.c static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q) q 202 net/sched/sch_sfb.c const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0]; q 218 net/sched/sch_sfb.c static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q) q 220 net/sched/sch_sfb.c get_random_bytes(&q->bins[slot].perturbation, q 221 net/sched/sch_sfb.c sizeof(q->bins[slot].perturbation)); q 224 net/sched/sch_sfb.c static void sfb_swap_slot(struct sfb_sched_data *q) q 226 net/sched/sch_sfb.c sfb_init_perturbation(q->slot, q); q 227 net/sched/sch_sfb.c q->slot ^= 1; q 228 net/sched/sch_sfb.c q->double_buffering = false; q 234 net/sched/sch_sfb.c static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q) q 236 net/sched/sch_sfb.c if (q->penalty_rate == 0 || q->penalty_burst == 0) q 239 net/sched/sch_sfb.c if (q->tokens_avail < 1) { q 240 net/sched/sch_sfb.c unsigned long age = min(10UL * HZ, jiffies - q->token_time); q 242 net/sched/sch_sfb.c q->tokens_avail = (age * q->penalty_rate) / HZ; q 243 net/sched/sch_sfb.c if (q->tokens_avail > q->penalty_burst) q 244 net/sched/sch_sfb.c q->tokens_avail = q->penalty_burst; q 245 net/sched/sch_sfb.c q->token_time = jiffies; q 246 net/sched/sch_sfb.c if (q->tokens_avail < 1) q 250 net/sched/sch_sfb.c q->tokens_avail--; q 283 net/sched/sch_sfb.c struct sfb_sched_data *q = qdisc_priv(sch); q 284 net/sched/sch_sfb.c struct Qdisc *child = q->qdisc; q 290 net/sched/sch_sfb.c u32 slot = q->slot; q 293 net/sched/sch_sfb.c if (unlikely(sch->q.qlen >= q->limit)) { q 295 net/sched/sch_sfb.c q->stats.queuedrop++; q 299 net/sched/sch_sfb.c if (q->rehash_interval > 0) { q 300 net/sched/sch_sfb.c unsigned long limit = q->rehash_time + q->rehash_interval; q 303 net/sched/sch_sfb.c sfb_swap_slot(q); q 304 net/sched/sch_sfb.c q->rehash_time = jiffies; q 305 net/sched/sch_sfb.c } else if (unlikely(!q->double_buffering && q->warmup_time > 0 && q 306 net/sched/sch_sfb.c time_after(jiffies, limit - q->warmup_time))) { q 307 net/sched/sch_sfb.c q->double_buffering = true; q 311 net/sched/sch_sfb.c fl = rcu_dereference_bh(q->filter_list); q 318 net/sched/sch_sfb.c sfbhash = siphash_1u32(salt, &q->bins[slot].perturbation); q 320 net/sched/sch_sfb.c sfbhash = skb_get_hash_perturb(skb, &q->bins[slot].perturbation); q 330 net/sched/sch_sfb.c struct sfb_bucket *b = &q->bins[slot].bins[i][hash]; q 334 net/sched/sch_sfb.c decrement_prob(b, q); q 335 net/sched/sch_sfb.c else if (b->qlen >= q->bin_size) q 336 net/sched/sch_sfb.c increment_prob(b, q); q 346 net/sched/sch_sfb.c if (unlikely(minqlen >= q->max)) { q 348 net/sched/sch_sfb.c q->stats.bucketdrop++; q 354 net/sched/sch_sfb.c if (q->double_buffering) { q 356 net/sched/sch_sfb.c &q->bins[slot].perturbation); q 363 net/sched/sch_sfb.c struct sfb_bucket *b = &q->bins[slot].bins[i][hash]; q 367 net/sched/sch_sfb.c decrement_prob(b, q); q 368 net/sched/sch_sfb.c else if (b->qlen >= q->bin_size) q 369 net/sched/sch_sfb.c increment_prob(b, q); q 372 net/sched/sch_sfb.c if (sfb_rate_limit(skb, q)) { q 374 net/sched/sch_sfb.c q->stats.penaltydrop++; q 389 net/sched/sch_sfb.c q->stats.earlydrop++; q 394 net/sched/sch_sfb.c q->stats.marked++; q 396 net/sched/sch_sfb.c q->stats.earlydrop++; q 405 net/sched/sch_sfb.c sch->q.qlen++; q 406 net/sched/sch_sfb.c increment_qlen(skb, q); q 408 net/sched/sch_sfb.c q->stats.childdrop++; q 425 net/sched/sch_sfb.c struct sfb_sched_data *q = qdisc_priv(sch); q 426 net/sched/sch_sfb.c struct Qdisc *child = q->qdisc; q 429 net/sched/sch_sfb.c skb = child->dequeue(q->qdisc); q 434 net/sched/sch_sfb.c sch->q.qlen--; q 435 net/sched/sch_sfb.c decrement_qlen(skb, q); q 443 net/sched/sch_sfb.c struct sfb_sched_data *q = qdisc_priv(sch); q 444 net/sched/sch_sfb.c struct Qdisc *child = q->qdisc; q 453 net/sched/sch_sfb.c struct sfb_sched_data *q = qdisc_priv(sch); q 455 net/sched/sch_sfb.c qdisc_reset(q->qdisc); q 457 net/sched/sch_sfb.c sch->q.qlen = 0; q 458 net/sched/sch_sfb.c q->slot = 0; q 459 net/sched/sch_sfb.c q->double_buffering = false; q 460 net/sched/sch_sfb.c sfb_zero_all_buckets(q); q 461 net/sched/sch_sfb.c sfb_init_perturbation(0, q); q 466 net/sched/sch_sfb.c struct sfb_sched_data *q = qdisc_priv(sch); q 468 net/sched/sch_sfb.c tcf_block_put(q->block); q 469 net/sched/sch_sfb.c qdisc_put(q->qdisc); q 491 net/sched/sch_sfb.c struct sfb_sched_data *q = qdisc_priv(sch); q 522 net/sched/sch_sfb.c qdisc_purge_queue(q->qdisc); q 523 net/sched/sch_sfb.c old = q->qdisc; q 524 net/sched/sch_sfb.c q->qdisc = child; q 526 net/sched/sch_sfb.c q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval); q 527 net/sched/sch_sfb.c q->warmup_time = msecs_to_jiffies(ctl->warmup_time); q 528 net/sched/sch_sfb.c q->rehash_time = jiffies; q 529 net/sched/sch_sfb.c q->limit = limit; q 530 net/sched/sch_sfb.c q->increment = ctl->increment; q 531 net/sched/sch_sfb.c q->decrement = ctl->decrement; q 532 net/sched/sch_sfb.c q->max = ctl->max; q 533 net/sched/sch_sfb.c q->bin_size = ctl->bin_size; q 534 net/sched/sch_sfb.c q->penalty_rate = ctl->penalty_rate; q 535 net/sched/sch_sfb.c q->penalty_burst = ctl->penalty_burst; q 536 net/sched/sch_sfb.c q->tokens_avail = ctl->penalty_burst; q 537 net/sched/sch_sfb.c q->token_time = jiffies; q 539 net/sched/sch_sfb.c q->slot = 0; q 540 net/sched/sch_sfb.c q->double_buffering = false; q 541 net/sched/sch_sfb.c sfb_zero_all_buckets(q); q 542 net/sched/sch_sfb.c sfb_init_perturbation(0, q); q 543 net/sched/sch_sfb.c sfb_init_perturbation(1, q); q 554 net/sched/sch_sfb.c struct sfb_sched_data *q = qdisc_priv(sch); q 557 net/sched/sch_sfb.c err = tcf_block_get(&q->block, &q->filter_list, sch, extack); q 561 net/sched/sch_sfb.c q->qdisc = &noop_qdisc; q 567 net/sched/sch_sfb.c struct sfb_sched_data *q = qdisc_priv(sch); q 570 net/sched/sch_sfb.c .rehash_interval = jiffies_to_msecs(q->rehash_interval), q 571 net/sched/sch_sfb.c .warmup_time = jiffies_to_msecs(q->warmup_time), q 572 net/sched/sch_sfb.c .limit = q->limit, q 573 net/sched/sch_sfb.c .max = q->max, q 574 net/sched/sch_sfb.c .bin_size = q->bin_size, q 575 net/sched/sch_sfb.c .increment = q->increment, q 576 net/sched/sch_sfb.c .decrement = q->decrement, q 577 net/sched/sch_sfb.c .penalty_rate = q->penalty_rate, q 578 net/sched/sch_sfb.c .penalty_burst = q->penalty_burst, q 581 net/sched/sch_sfb.c sch->qstats.backlog = q->qdisc->qstats.backlog; q 596 net/sched/sch_sfb.c struct sfb_sched_data *q = qdisc_priv(sch); q 598 net/sched/sch_sfb.c .earlydrop = q->stats.earlydrop, q 599 net/sched/sch_sfb.c .penaltydrop = q->stats.penaltydrop, q 600 net/sched/sch_sfb.c .bucketdrop = q->stats.bucketdrop, q 601 net/sched/sch_sfb.c .queuedrop = q->stats.queuedrop, q 602 net/sched/sch_sfb.c .childdrop = q->stats.childdrop, q 603 net/sched/sch_sfb.c .marked = q->stats.marked, q 606 net/sched/sch_sfb.c st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q); q 620 net/sched/sch_sfb.c struct sfb_sched_data *q = qdisc_priv(sch); q 625 net/sched/sch_sfb.c *old = qdisc_replace(sch, new, &q->qdisc); q 631 net/sched/sch_sfb.c struct sfb_sched_data *q = qdisc_priv(sch); q 633 net/sched/sch_sfb.c return q->qdisc; q 672 net/sched/sch_sfb.c struct sfb_sched_data *q = qdisc_priv(sch); q 676 net/sched/sch_sfb.c return q->block; q 150 net/sched/sch_sfq.c static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val) q 153 net/sched/sch_sfq.c return &q->slots[val].dep; q 154 net/sched/sch_sfq.c return &q->dep[val - SFQ_MAX_FLOWS]; q 157 net/sched/sch_sfq.c static unsigned int sfq_hash(const struct sfq_sched_data *q, q 160 net/sched/sch_sfq.c return skb_get_hash_perturb(skb, &q->perturbation) & (q->divisor - 1); q 166 net/sched/sch_sfq.c struct sfq_sched_data *q = qdisc_priv(sch); q 173 net/sched/sch_sfq.c TC_H_MIN(skb->priority) <= q->divisor) q 176 net/sched/sch_sfq.c fl = rcu_dereference_bh(q->filter_list); q 178 net/sched/sch_sfq.c return sfq_hash(q, skb) + 1; q 194 net/sched/sch_sfq.c if (TC_H_MIN(res.classid) <= q->divisor) q 203 net/sched/sch_sfq.c static inline void sfq_link(struct sfq_sched_data *q, sfq_index x) q 206 net/sched/sch_sfq.c struct sfq_slot *slot = &q->slots[x]; q 210 net/sched/sch_sfq.c n = q->dep[qlen].next; q 215 net/sched/sch_sfq.c q->dep[qlen].next = x; /* sfq_dep_head(q, p)->next = x */ q 216 net/sched/sch_sfq.c sfq_dep_head(q, n)->prev = x; q 219 net/sched/sch_sfq.c #define sfq_unlink(q, x, n, p) \ q 221 net/sched/sch_sfq.c n = q->slots[x].dep.next; \ q 222 net/sched/sch_sfq.c p = q->slots[x].dep.prev; \ q 223 net/sched/sch_sfq.c sfq_dep_head(q, p)->next = n; \ q 224 net/sched/sch_sfq.c sfq_dep_head(q, n)->prev = p; \ q 228 net/sched/sch_sfq.c static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x) q 233 net/sched/sch_sfq.c sfq_unlink(q, x, n, p); q 235 net/sched/sch_sfq.c d = q->slots[x].qlen--; q 236 net/sched/sch_sfq.c if (n == p && q->cur_depth == d) q 237 net/sched/sch_sfq.c q->cur_depth--; q 238 net/sched/sch_sfq.c sfq_link(q, x); q 241 net/sched/sch_sfq.c static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x) q 246 net/sched/sch_sfq.c sfq_unlink(q, x, n, p); q 248 net/sched/sch_sfq.c d = ++q->slots[x].qlen; q 249 net/sched/sch_sfq.c if (q->cur_depth < d) q 250 net/sched/sch_sfq.c q->cur_depth = d; q 251 net/sched/sch_sfq.c sfq_link(q, x); q 295 net/sched/sch_sfq.c struct sfq_sched_data *q = qdisc_priv(sch); q 296 net/sched/sch_sfq.c sfq_index x, d = q->cur_depth; q 303 net/sched/sch_sfq.c x = q->dep[d].next; q 304 net/sched/sch_sfq.c slot = &q->slots[x]; q 306 net/sched/sch_sfq.c skb = q->headdrop ? slot_dequeue_head(slot) : slot_dequeue_tail(slot); q 309 net/sched/sch_sfq.c sfq_dec(q, x); q 310 net/sched/sch_sfq.c sch->q.qlen--; q 318 net/sched/sch_sfq.c x = q->tail->next; q 319 net/sched/sch_sfq.c slot = &q->slots[x]; q 320 net/sched/sch_sfq.c q->tail->next = slot->next; q 321 net/sched/sch_sfq.c q->ht[slot->hash] = SFQ_EMPTY_SLOT; q 329 net/sched/sch_sfq.c static int sfq_prob_mark(const struct sfq_sched_data *q) q 331 net/sched/sch_sfq.c return q->flags & TC_RED_ECN; q 335 net/sched/sch_sfq.c static int sfq_hard_mark(const struct sfq_sched_data *q) q 337 net/sched/sch_sfq.c return (q->flags & (TC_RED_ECN | TC_RED_HARDDROP)) == TC_RED_ECN; q 340 net/sched/sch_sfq.c static int sfq_headdrop(const struct sfq_sched_data *q) q 342 net/sched/sch_sfq.c return q->headdrop; q 348 net/sched/sch_sfq.c struct sfq_sched_data *q = qdisc_priv(sch); q 365 net/sched/sch_sfq.c x = q->ht[hash]; q 366 net/sched/sch_sfq.c slot = &q->slots[x]; q 368 net/sched/sch_sfq.c x = q->dep[0].next; /* get a free slot */ q 371 net/sched/sch_sfq.c q->ht[hash] = x; q 372 net/sched/sch_sfq.c slot = &q->slots[x]; q 378 net/sched/sch_sfq.c if (q->red_parms) { q 379 net/sched/sch_sfq.c slot->vars.qavg = red_calc_qavg_no_idle_time(q->red_parms, q 382 net/sched/sch_sfq.c switch (red_action(q->red_parms, q 390 net/sched/sch_sfq.c if (sfq_prob_mark(q)) { q 392 net/sched/sch_sfq.c if (sfq_headdrop(q) && q 394 net/sched/sch_sfq.c q->stats.prob_mark_head++; q 398 net/sched/sch_sfq.c q->stats.prob_mark++; q 402 net/sched/sch_sfq.c q->stats.prob_drop++; q 407 net/sched/sch_sfq.c if (sfq_hard_mark(q)) { q 409 net/sched/sch_sfq.c if (sfq_headdrop(q) && q 411 net/sched/sch_sfq.c q->stats.forced_mark_head++; q 415 net/sched/sch_sfq.c q->stats.forced_mark++; q 419 net/sched/sch_sfq.c q->stats.forced_drop++; q 424 net/sched/sch_sfq.c if (slot->qlen >= q->maxdepth) { q 426 net/sched/sch_sfq.c if (!sfq_headdrop(q)) q 445 net/sched/sch_sfq.c sfq_inc(q, x); q 447 net/sched/sch_sfq.c if (q->tail == NULL) { /* It is the first flow */ q 450 net/sched/sch_sfq.c slot->next = q->tail->next; q 451 net/sched/sch_sfq.c q->tail->next = x; q 457 net/sched/sch_sfq.c q->tail = slot; q 459 net/sched/sch_sfq.c slot->allot = q->scaled_quantum; q 461 net/sched/sch_sfq.c if (++sch->q.qlen <= q->limit) q 482 net/sched/sch_sfq.c struct sfq_sched_data *q = qdisc_priv(sch); q 488 net/sched/sch_sfq.c if (q->tail == NULL) q 492 net/sched/sch_sfq.c a = q->tail->next; q 493 net/sched/sch_sfq.c slot = &q->slots[a]; q 495 net/sched/sch_sfq.c q->tail = slot; q 496 net/sched/sch_sfq.c slot->allot += q->scaled_quantum; q 500 net/sched/sch_sfq.c sfq_dec(q, a); q 502 net/sched/sch_sfq.c sch->q.qlen--; q 507 net/sched/sch_sfq.c q->ht[slot->hash] = SFQ_EMPTY_SLOT; q 510 net/sched/sch_sfq.c q->tail = NULL; /* no more active slots */ q 513 net/sched/sch_sfq.c q->tail->next = next_a; q 537 net/sched/sch_sfq.c struct sfq_sched_data *q = qdisc_priv(sch); q 547 net/sched/sch_sfq.c for (i = 0; i < q->maxflows; i++) { q 548 net/sched/sch_sfq.c slot = &q->slots[i]; q 553 net/sched/sch_sfq.c sfq_dec(q, i); q 558 net/sched/sch_sfq.c q->ht[slot->hash] = SFQ_EMPTY_SLOT; q 560 net/sched/sch_sfq.c q->tail = NULL; q 563 net/sched/sch_sfq.c unsigned int hash = sfq_hash(q, skb); q 564 net/sched/sch_sfq.c sfq_index x = q->ht[hash]; q 566 net/sched/sch_sfq.c slot = &q->slots[x]; q 568 net/sched/sch_sfq.c x = q->dep[0].next; /* get a free slot */ q 577 net/sched/sch_sfq.c q->ht[hash] = x; q 578 net/sched/sch_sfq.c slot = &q->slots[x]; q 581 net/sched/sch_sfq.c if (slot->qlen >= q->maxdepth) q 584 net/sched/sch_sfq.c if (q->red_parms) q 585 net/sched/sch_sfq.c slot->vars.qavg = red_calc_qavg(q->red_parms, q 589 net/sched/sch_sfq.c sfq_inc(q, x); q 591 net/sched/sch_sfq.c if (q->tail == NULL) { /* It is the first flow */ q 594 net/sched/sch_sfq.c slot->next = q->tail->next; q 595 net/sched/sch_sfq.c q->tail->next = x; q 597 net/sched/sch_sfq.c q->tail = slot; q 598 net/sched/sch_sfq.c slot->allot = q->scaled_quantum; q 601 net/sched/sch_sfq.c sch->q.qlen -= dropped; q 607 net/sched/sch_sfq.c struct sfq_sched_data *q = from_timer(q, t, perturb_timer); q 608 net/sched/sch_sfq.c struct Qdisc *sch = q->sch; q 614 net/sched/sch_sfq.c q->perturbation = nkey; q 615 net/sched/sch_sfq.c if (!q->filter_list && q->tail) q 619 net/sched/sch_sfq.c if (q->perturb_period) q 620 net/sched/sch_sfq.c mod_timer(&q->perturb_timer, jiffies + q->perturb_period); q 625 net/sched/sch_sfq.c struct sfq_sched_data *q = qdisc_priv(sch); q 659 net/sched/sch_sfq.c q->quantum = ctl->quantum; q 660 net/sched/sch_sfq.c q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum); q 662 net/sched/sch_sfq.c q->perturb_period = ctl->perturb_period * HZ; q 664 net/sched/sch_sfq.c q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS); q 666 net/sched/sch_sfq.c q->divisor = ctl->divisor; q 667 net/sched/sch_sfq.c q->maxflows = min_t(u32, q->maxflows, q->divisor); q 671 net/sched/sch_sfq.c q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH); q 673 net/sched/sch_sfq.c swap(q->red_parms, p); q 674 net/sched/sch_sfq.c red_set_parms(q->red_parms, q 681 net/sched/sch_sfq.c q->flags = ctl_v1->flags; q 682 net/sched/sch_sfq.c q->headdrop = ctl_v1->headdrop; q 685 net/sched/sch_sfq.c q->limit = min_t(u32, ctl->limit, q->maxdepth * q->maxflows); q 686 net/sched/sch_sfq.c q->maxflows = min_t(u32, q->maxflows, q->limit); q 689 net/sched/sch_sfq.c qlen = sch->q.qlen; q 690 net/sched/sch_sfq.c while (sch->q.qlen > q->limit) { q 697 net/sched/sch_sfq.c qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped); q 699 net/sched/sch_sfq.c del_timer(&q->perturb_timer); q 700 net/sched/sch_sfq.c if (q->perturb_period) { q 701 net/sched/sch_sfq.c mod_timer(&q->perturb_timer, jiffies + q->perturb_period); q 702 net/sched/sch_sfq.c get_random_bytes(&q->perturbation, sizeof(q->perturbation)); q 721 net/sched/sch_sfq.c struct sfq_sched_data *q = qdisc_priv(sch); q 723 net/sched/sch_sfq.c tcf_block_put(q->block); q 724 net/sched/sch_sfq.c q->perturb_period = 0; q 725 net/sched/sch_sfq.c del_timer_sync(&q->perturb_timer); q 726 net/sched/sch_sfq.c sfq_free(q->ht); q 727 net/sched/sch_sfq.c sfq_free(q->slots); q 728 net/sched/sch_sfq.c kfree(q->red_parms); q 734 net/sched/sch_sfq.c struct sfq_sched_data *q = qdisc_priv(sch); q 738 net/sched/sch_sfq.c q->sch = sch; q 739 net/sched/sch_sfq.c timer_setup(&q->perturb_timer, sfq_perturbation, TIMER_DEFERRABLE); q 741 net/sched/sch_sfq.c err = tcf_block_get(&q->block, &q->filter_list, sch, extack); q 746 net/sched/sch_sfq.c q->dep[i].next = i + SFQ_MAX_FLOWS; q 747 net/sched/sch_sfq.c q->dep[i].prev = i + SFQ_MAX_FLOWS; q 750 net/sched/sch_sfq.c q->limit = SFQ_MAX_DEPTH; q 751 net/sched/sch_sfq.c q->maxdepth = SFQ_MAX_DEPTH; q 752 net/sched/sch_sfq.c q->cur_depth = 0; q 753 net/sched/sch_sfq.c q->tail = NULL; q 754 net/sched/sch_sfq.c q->divisor = SFQ_DEFAULT_HASH_DIVISOR; q 755 net/sched/sch_sfq.c q->maxflows = SFQ_DEFAULT_FLOWS; q 756 net/sched/sch_sfq.c q->quantum = psched_mtu(qdisc_dev(sch)); q 757 net/sched/sch_sfq.c q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum); q 758 net/sched/sch_sfq.c q->perturb_period = 0; q 759 net/sched/sch_sfq.c get_random_bytes(&q->perturbation, sizeof(q->perturbation)); q 767 net/sched/sch_sfq.c q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor); q 768 net/sched/sch_sfq.c q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows); q 769 net/sched/sch_sfq.c if (!q->ht || !q->slots) { q 774 net/sched/sch_sfq.c for (i = 0; i < q->divisor; i++) q 775 net/sched/sch_sfq.c q->ht[i] = SFQ_EMPTY_SLOT; q 777 net/sched/sch_sfq.c for (i = 0; i < q->maxflows; i++) { q 778 net/sched/sch_sfq.c slot_queue_init(&q->slots[i]); q 779 net/sched/sch_sfq.c sfq_link(q, i); q 781 net/sched/sch_sfq.c if (q->limit >= 1) q 790 net/sched/sch_sfq.c struct sfq_sched_data *q = qdisc_priv(sch); q 793 net/sched/sch_sfq.c struct red_parms *p = q->red_parms; q 796 net/sched/sch_sfq.c opt.v0.quantum = q->quantum; q 797 net/sched/sch_sfq.c opt.v0.perturb_period = q->perturb_period / HZ; q 798 net/sched/sch_sfq.c opt.v0.limit = q->limit; q 799 net/sched/sch_sfq.c opt.v0.divisor = q->divisor; q 800 net/sched/sch_sfq.c opt.v0.flows = q->maxflows; q 801 net/sched/sch_sfq.c opt.depth = q->maxdepth; q 802 net/sched/sch_sfq.c opt.headdrop = q->headdrop; q 812 net/sched/sch_sfq.c memcpy(&opt.stats, &q->stats, sizeof(opt.stats)); q 813 net/sched/sch_sfq.c opt.flags = q->flags; q 841 net/sched/sch_sfq.c static void sfq_unbind(struct Qdisc *q, unsigned long cl) q 848 net/sched/sch_sfq.c struct sfq_sched_data *q = qdisc_priv(sch); q 852 net/sched/sch_sfq.c return q->block; q 865 net/sched/sch_sfq.c struct sfq_sched_data *q = qdisc_priv(sch); q 866 net/sched/sch_sfq.c sfq_index idx = q->ht[cl - 1]; q 871 net/sched/sch_sfq.c const struct sfq_slot *slot = &q->slots[idx]; q 884 net/sched/sch_sfq.c struct sfq_sched_data *q = qdisc_priv(sch); q 890 net/sched/sch_sfq.c for (i = 0; i < q->divisor; i++) { q 891 net/sched/sch_sfq.c if (q->ht[i] == SFQ_EMPTY_SLOT || q 40 net/sched/sch_skbprio.c static u16 calc_new_high_prio(const struct skbprio_sched_data *q) q 44 net/sched/sch_skbprio.c for (prio = q->highest_prio - 1; prio >= q->lowest_prio; prio--) { q 45 net/sched/sch_skbprio.c if (!skb_queue_empty(&q->qdiscs[prio])) q 53 net/sched/sch_skbprio.c static u16 calc_new_low_prio(const struct skbprio_sched_data *q) q 57 net/sched/sch_skbprio.c for (prio = q->lowest_prio + 1; prio <= q->highest_prio; prio++) { q 58 net/sched/sch_skbprio.c if (!skb_queue_empty(&q->qdiscs[prio])) q 72 net/sched/sch_skbprio.c struct skbprio_sched_data *q = qdisc_priv(sch); q 81 net/sched/sch_skbprio.c qdisc = &q->qdiscs[prio]; q 82 net/sched/sch_skbprio.c if (sch->q.qlen < sch->limit) { q 85 net/sched/sch_skbprio.c q->qstats[prio].backlog += qdisc_pkt_len(skb); q 88 net/sched/sch_skbprio.c if (prio > q->highest_prio) q 89 net/sched/sch_skbprio.c q->highest_prio = prio; q 91 net/sched/sch_skbprio.c if (prio < q->lowest_prio) q 92 net/sched/sch_skbprio.c q->lowest_prio = prio; q 94 net/sched/sch_skbprio.c sch->q.qlen++; q 99 net/sched/sch_skbprio.c lp = q->lowest_prio; q 101 net/sched/sch_skbprio.c q->qstats[prio].drops++; q 102 net/sched/sch_skbprio.c q->qstats[prio].overlimits++; q 108 net/sched/sch_skbprio.c q->qstats[prio].backlog += qdisc_pkt_len(skb); q 111 net/sched/sch_skbprio.c lp_qdisc = &q->qdiscs[lp]; q 117 net/sched/sch_skbprio.c q->qstats[lp].backlog -= qdisc_pkt_len(to_drop); q 118 net/sched/sch_skbprio.c q->qstats[lp].drops++; q 119 net/sched/sch_skbprio.c q->qstats[lp].overlimits++; q 123 net/sched/sch_skbprio.c if (q->lowest_prio == q->highest_prio) { q 125 net/sched/sch_skbprio.c BUG_ON(sch->q.qlen != 1); q 126 net/sched/sch_skbprio.c q->lowest_prio = prio; q 127 net/sched/sch_skbprio.c q->highest_prio = prio; q 129 net/sched/sch_skbprio.c q->lowest_prio = calc_new_low_prio(q); q 133 net/sched/sch_skbprio.c if (prio > q->highest_prio) q 134 net/sched/sch_skbprio.c q->highest_prio = prio; q 141 net/sched/sch_skbprio.c struct skbprio_sched_data *q = qdisc_priv(sch); q 142 net/sched/sch_skbprio.c struct sk_buff_head *hpq = &q->qdiscs[q->highest_prio]; q 148 net/sched/sch_skbprio.c sch->q.qlen--; q 152 net/sched/sch_skbprio.c q->qstats[q->highest_prio].backlog -= qdisc_pkt_len(skb); q 156 net/sched/sch_skbprio.c if (q->lowest_prio == q->highest_prio) { q 157 net/sched/sch_skbprio.c BUG_ON(sch->q.qlen); q 158 net/sched/sch_skbprio.c q->highest_prio = 0; q 159 net/sched/sch_skbprio.c q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1; q 161 net/sched/sch_skbprio.c q->highest_prio = calc_new_high_prio(q); q 182 net/sched/sch_skbprio.c struct skbprio_sched_data *q = qdisc_priv(sch); q 187 net/sched/sch_skbprio.c __skb_queue_head_init(&q->qdiscs[prio]); q 189 net/sched/sch_skbprio.c memset(&q->qstats, 0, sizeof(q->qstats)); q 190 net/sched/sch_skbprio.c q->highest_prio = 0; q 191 net/sched/sch_skbprio.c q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1; q 213 net/sched/sch_skbprio.c struct skbprio_sched_data *q = qdisc_priv(sch); q 217 net/sched/sch_skbprio.c sch->q.qlen = 0; q 220 net/sched/sch_skbprio.c __skb_queue_purge(&q->qdiscs[prio]); q 222 net/sched/sch_skbprio.c memset(&q->qstats, 0, sizeof(q->qstats)); q 223 net/sched/sch_skbprio.c q->highest_prio = 0; q 224 net/sched/sch_skbprio.c q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1; q 229 net/sched/sch_skbprio.c struct skbprio_sched_data *q = qdisc_priv(sch); q 233 net/sched/sch_skbprio.c __skb_queue_purge(&q->qdiscs[prio]); q 256 net/sched/sch_skbprio.c struct skbprio_sched_data *q = qdisc_priv(sch); q 257 net/sched/sch_skbprio.c if (gnet_stats_copy_queue(d, NULL, &q->qstats[cl - 1], q 258 net/sched/sch_skbprio.c q->qstats[cl - 1].qlen) < 0) q 97 net/sched/sch_taprio.c static ktime_t taprio_get_time(struct taprio_sched *q) q 101 net/sched/sch_taprio.c switch (q->tk_offset) { q 105 net/sched/sch_taprio.c return ktime_mono_to_any(mono, q->tk_offset); q 127 net/sched/sch_taprio.c static void switch_schedules(struct taprio_sched *q, q 131 net/sched/sch_taprio.c rcu_assign_pointer(q->oper_sched, *admin); q 132 net/sched/sch_taprio.c rcu_assign_pointer(q->admin_sched, NULL); q 175 net/sched/sch_taprio.c static int length_to_duration(struct taprio_sched *q, int len) q 177 net/sched/sch_taprio.c return div_u64(len * atomic64_read(&q->picos_per_byte), 1000); q 196 net/sched/sch_taprio.c struct taprio_sched *q = qdisc_priv(sch); q 203 net/sched/sch_taprio.c packet_transmit_time = length_to_duration(q, qdisc_pkt_len(skb)); q 263 net/sched/sch_taprio.c struct taprio_sched *q = qdisc_priv(sch); q 269 net/sched/sch_taprio.c sched = rcu_dereference(q->oper_sched); q 270 net/sched/sch_taprio.c admin = rcu_dereference(q->admin_sched); q 293 net/sched/sch_taprio.c static ktime_t get_tcp_tstamp(struct taprio_sched *q, struct sk_buff *skb) q 324 net/sched/sch_taprio.c return ktime_mono_to_any(skb->skb_mstamp_ns, q->tk_offset); q 345 net/sched/sch_taprio.c struct taprio_sched *q = qdisc_priv(sch); q 352 net/sched/sch_taprio.c now = taprio_get_time(q); q 353 net/sched/sch_taprio.c minimum_time = ktime_add_ns(now, q->txtime_delay); q 355 net/sched/sch_taprio.c tcp_tstamp = get_tcp_tstamp(q, skb); q 359 net/sched/sch_taprio.c admin = rcu_dereference(q->admin_sched); q 360 net/sched/sch_taprio.c sched = rcu_dereference(q->oper_sched); q 362 net/sched/sch_taprio.c switch_schedules(q, &admin, &sched); q 371 net/sched/sch_taprio.c packet_transmit_time = length_to_duration(q, len); q 416 net/sched/sch_taprio.c struct taprio_sched *q = qdisc_priv(sch); q 422 net/sched/sch_taprio.c child = q->qdiscs[queue]; q 429 net/sched/sch_taprio.c } else if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { q 436 net/sched/sch_taprio.c sch->q.qlen++; q 443 net/sched/sch_taprio.c struct taprio_sched *q = qdisc_priv(sch); q 451 net/sched/sch_taprio.c entry = rcu_dereference(q->current_entry); q 459 net/sched/sch_taprio.c struct Qdisc *child = q->qdiscs[i]; q 470 net/sched/sch_taprio.c if (TXTIME_ASSIST_IS_ENABLED(q->flags)) q 487 net/sched/sch_taprio.c struct taprio_sched *q = qdisc_priv(sch); q 493 net/sched/sch_taprio.c struct Qdisc *child = q->qdiscs[i]; q 510 net/sched/sch_taprio.c struct taprio_sched *q = qdisc_priv(sch); q 512 net/sched/sch_taprio.c return q->peek(sch); q 515 net/sched/sch_taprio.c static void taprio_set_budget(struct taprio_sched *q, struct sched_entry *entry) q 519 net/sched/sch_taprio.c atomic64_read(&q->picos_per_byte))); q 524 net/sched/sch_taprio.c struct taprio_sched *q = qdisc_priv(sch); q 532 net/sched/sch_taprio.c entry = rcu_dereference(q->current_entry); q 544 net/sched/sch_taprio.c struct Qdisc *child = q->qdiscs[i]; q 553 net/sched/sch_taprio.c if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { q 573 net/sched/sch_taprio.c guard = ktime_add_ns(taprio_get_time(q), q 574 net/sched/sch_taprio.c length_to_duration(q, len)); q 599 net/sched/sch_taprio.c sch->q.qlen--; q 612 net/sched/sch_taprio.c struct taprio_sched *q = qdisc_priv(sch); q 618 net/sched/sch_taprio.c struct Qdisc *child = q->qdiscs[i]; q 629 net/sched/sch_taprio.c sch->q.qlen--; q 639 net/sched/sch_taprio.c struct taprio_sched *q = qdisc_priv(sch); q 641 net/sched/sch_taprio.c return q->dequeue(sch); q 692 net/sched/sch_taprio.c struct taprio_sched *q = container_of(timer, struct taprio_sched, q 696 net/sched/sch_taprio.c struct Qdisc *sch = q->root; q 699 net/sched/sch_taprio.c spin_lock(&q->current_entry_lock); q 700 net/sched/sch_taprio.c entry = rcu_dereference_protected(q->current_entry, q 701 net/sched/sch_taprio.c lockdep_is_held(&q->current_entry_lock)); q 702 net/sched/sch_taprio.c oper = rcu_dereference_protected(q->oper_sched, q 703 net/sched/sch_taprio.c lockdep_is_held(&q->current_entry_lock)); q 704 net/sched/sch_taprio.c admin = rcu_dereference_protected(q->admin_sched, q 705 net/sched/sch_taprio.c lockdep_is_held(&q->current_entry_lock)); q 708 net/sched/sch_taprio.c switch_schedules(q, &admin, &oper); q 740 net/sched/sch_taprio.c switch_schedules(q, &admin, &oper); q 744 net/sched/sch_taprio.c taprio_set_budget(q, next); q 747 net/sched/sch_taprio.c rcu_assign_pointer(q->current_entry, next); q 748 net/sched/sch_taprio.c spin_unlock(&q->current_entry_lock); q 750 net/sched/sch_taprio.c hrtimer_set_expires(&q->advance_timer, close_time); q 973 net/sched/sch_taprio.c struct taprio_sched *q = qdisc_priv(sch); q 978 net/sched/sch_taprio.c now = taprio_get_time(q); q 1003 net/sched/sch_taprio.c static void setup_first_close_time(struct taprio_sched *q, q 1018 net/sched/sch_taprio.c taprio_set_budget(q, first); q 1019 net/sched/sch_taprio.c rcu_assign_pointer(q->current_entry, NULL); q 1025 net/sched/sch_taprio.c struct taprio_sched *q = qdisc_priv(sch); q 1028 net/sched/sch_taprio.c if (FULL_OFFLOAD_IS_ENABLED(q->flags)) q 1031 net/sched/sch_taprio.c expires = hrtimer_get_expires(&q->advance_timer); q 1041 net/sched/sch_taprio.c hrtimer_start(&q->advance_timer, start, HRTIMER_MODE_ABS); q 1045 net/sched/sch_taprio.c struct taprio_sched *q) q 1062 net/sched/sch_taprio.c atomic64_set(&q->picos_per_byte, picos_per_byte); q 1064 net/sched/sch_taprio.c dev->name, (long long)atomic64_read(&q->picos_per_byte), q 1073 net/sched/sch_taprio.c struct taprio_sched *q; q 1082 net/sched/sch_taprio.c list_for_each_entry(q, &taprio_list, taprio_list) { q 1083 net/sched/sch_taprio.c qdev = qdisc_dev(q->root); q 1092 net/sched/sch_taprio.c taprio_set_picos_per_byte(dev, q); q 1097 net/sched/sch_taprio.c static void setup_txtime(struct taprio_sched *q, q 1164 net/sched/sch_taprio.c static void taprio_offload_config_changed(struct taprio_sched *q) q 1168 net/sched/sch_taprio.c spin_lock(&q->current_entry_lock); q 1170 net/sched/sch_taprio.c oper = rcu_dereference_protected(q->oper_sched, q 1171 net/sched/sch_taprio.c lockdep_is_held(&q->current_entry_lock)); q 1172 net/sched/sch_taprio.c admin = rcu_dereference_protected(q->admin_sched, q 1173 net/sched/sch_taprio.c lockdep_is_held(&q->current_entry_lock)); q 1175 net/sched/sch_taprio.c switch_schedules(q, &admin, &oper); q 1177 net/sched/sch_taprio.c spin_unlock(&q->current_entry_lock); q 1180 net/sched/sch_taprio.c static void taprio_sched_to_offload(struct taprio_sched *q, q 1206 net/sched/sch_taprio.c struct taprio_sched *q, q 1227 net/sched/sch_taprio.c taprio_sched_to_offload(q, sched, mqprio, offload); q 1243 net/sched/sch_taprio.c struct taprio_sched *q, q 1250 net/sched/sch_taprio.c if (!FULL_OFFLOAD_IS_ENABLED(q->flags)) q 1287 net/sched/sch_taprio.c struct taprio_sched *q = qdisc_priv(sch); q 1291 net/sched/sch_taprio.c if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { q 1320 net/sched/sch_taprio.c (q->clockid != -1 && q->clockid != clockid)) { q 1329 net/sched/sch_taprio.c q->tk_offset = TK_OFFS_REAL; q 1332 net/sched/sch_taprio.c q->tk_offset = TK_OFFS_MAX; q 1335 net/sched/sch_taprio.c q->tk_offset = TK_OFFS_BOOT; q 1338 net/sched/sch_taprio.c q->tk_offset = TK_OFFS_TAI; q 1346 net/sched/sch_taprio.c q->clockid = clockid; q 1411 net/sched/sch_taprio.c struct taprio_sched *q = qdisc_priv(sch); q 1427 net/sched/sch_taprio.c q->flags, extack); q 1431 net/sched/sch_taprio.c q->flags = err; q 1433 net/sched/sch_taprio.c err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags); q 1445 net/sched/sch_taprio.c oper = rcu_dereference(q->oper_sched); q 1446 net/sched/sch_taprio.c admin = rcu_dereference(q->admin_sched); q 1473 net/sched/sch_taprio.c taprio_set_picos_per_byte(dev, q); q 1488 net/sched/sch_taprio.c if (FULL_OFFLOAD_IS_ENABLED(q->flags)) q 1489 net/sched/sch_taprio.c err = taprio_enable_offload(dev, mqprio, q, new_admin, extack); q 1491 net/sched/sch_taprio.c err = taprio_disable_offload(dev, q, extack); q 1499 net/sched/sch_taprio.c if (!TXTIME_ASSIST_IS_ENABLED(q->flags)) { q 1505 net/sched/sch_taprio.c q->txtime_delay = nla_get_u32(tb[TCA_TAPRIO_ATTR_TXTIME_DELAY]); q 1508 net/sched/sch_taprio.c if (!TXTIME_ASSIST_IS_ENABLED(q->flags) && q 1509 net/sched/sch_taprio.c !FULL_OFFLOAD_IS_ENABLED(q->flags) && q 1510 net/sched/sch_taprio.c !hrtimer_active(&q->advance_timer)) { q 1511 net/sched/sch_taprio.c hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS); q 1512 net/sched/sch_taprio.c q->advance_timer.function = advance_sched; q 1515 net/sched/sch_taprio.c if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { q 1516 net/sched/sch_taprio.c q->dequeue = taprio_dequeue_offload; q 1517 net/sched/sch_taprio.c q->peek = taprio_peek_offload; q 1522 net/sched/sch_taprio.c q->dequeue = taprio_dequeue_soft; q 1523 net/sched/sch_taprio.c q->peek = taprio_peek_soft; q 1532 net/sched/sch_taprio.c setup_txtime(q, new_admin, start); q 1534 net/sched/sch_taprio.c if (TXTIME_ASSIST_IS_ENABLED(q->flags)) { q 1536 net/sched/sch_taprio.c rcu_assign_pointer(q->oper_sched, new_admin); q 1542 net/sched/sch_taprio.c rcu_assign_pointer(q->admin_sched, new_admin); q 1546 net/sched/sch_taprio.c setup_first_close_time(q, new_admin, start); q 1549 net/sched/sch_taprio.c spin_lock_irqsave(&q->current_entry_lock, flags); q 1553 net/sched/sch_taprio.c rcu_assign_pointer(q->admin_sched, new_admin); q 1557 net/sched/sch_taprio.c spin_unlock_irqrestore(&q->current_entry_lock, flags); q 1559 net/sched/sch_taprio.c if (FULL_OFFLOAD_IS_ENABLED(q->flags)) q 1560 net/sched/sch_taprio.c taprio_offload_config_changed(q); q 1578 net/sched/sch_taprio.c struct taprio_sched *q = qdisc_priv(sch); q 1583 net/sched/sch_taprio.c list_del(&q->taprio_list); q 1586 net/sched/sch_taprio.c hrtimer_cancel(&q->advance_timer); q 1588 net/sched/sch_taprio.c taprio_disable_offload(dev, q, NULL); q 1590 net/sched/sch_taprio.c if (q->qdiscs) { q 1591 net/sched/sch_taprio.c for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++) q 1592 net/sched/sch_taprio.c qdisc_put(q->qdiscs[i]); q 1594 net/sched/sch_taprio.c kfree(q->qdiscs); q 1596 net/sched/sch_taprio.c q->qdiscs = NULL; q 1600 net/sched/sch_taprio.c if (q->oper_sched) q 1601 net/sched/sch_taprio.c call_rcu(&q->oper_sched->rcu, taprio_free_sched_cb); q 1603 net/sched/sch_taprio.c if (q->admin_sched) q 1604 net/sched/sch_taprio.c call_rcu(&q->admin_sched->rcu, taprio_free_sched_cb); q 1610 net/sched/sch_taprio.c struct taprio_sched *q = qdisc_priv(sch); q 1614 net/sched/sch_taprio.c spin_lock_init(&q->current_entry_lock); q 1616 net/sched/sch_taprio.c hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS); q 1617 net/sched/sch_taprio.c q->advance_timer.function = advance_sched; q 1619 net/sched/sch_taprio.c q->dequeue = taprio_dequeue_soft; q 1620 net/sched/sch_taprio.c q->peek = taprio_peek_soft; q 1622 net/sched/sch_taprio.c q->root = sch; q 1627 net/sched/sch_taprio.c q->clockid = -1; q 1628 net/sched/sch_taprio.c q->flags = TAPRIO_FLAGS_INVALID; q 1631 net/sched/sch_taprio.c list_add(&q->taprio_list, &taprio_list); q 1641 net/sched/sch_taprio.c q->qdiscs = kcalloc(dev->num_tx_queues, q 1642 net/sched/sch_taprio.c sizeof(q->qdiscs[0]), q 1645 net/sched/sch_taprio.c if (!q->qdiscs) q 1667 net/sched/sch_taprio.c q->qdiscs[i] = qdisc; q 1689 net/sched/sch_taprio.c struct taprio_sched *q = qdisc_priv(sch); q 1699 net/sched/sch_taprio.c *old = q->qdiscs[cl - 1]; q 1700 net/sched/sch_taprio.c q->qdiscs[cl - 1] = new; q 1779 net/sched/sch_taprio.c struct taprio_sched *q = qdisc_priv(sch); q 1787 net/sched/sch_taprio.c oper = rcu_dereference(q->oper_sched); q 1788 net/sched/sch_taprio.c admin = rcu_dereference(q->admin_sched); q 1805 net/sched/sch_taprio.c if (!FULL_OFFLOAD_IS_ENABLED(q->flags) && q 1806 net/sched/sch_taprio.c nla_put_s32(skb, TCA_TAPRIO_ATTR_SCHED_CLOCKID, q->clockid)) q 1809 net/sched/sch_taprio.c if (q->flags && nla_put_u32(skb, TCA_TAPRIO_ATTR_FLAGS, q->flags)) q 1812 net/sched/sch_taprio.c if (q->txtime_delay && q 1813 net/sched/sch_taprio.c nla_put_u32(skb, TCA_TAPRIO_ATTR_TXTIME_DELAY, q->txtime_delay)) q 146 net/sched/sch_tbf.c struct tbf_sched_data *q = qdisc_priv(sch); q 163 net/sched/sch_tbf.c ret = qdisc_enqueue(segs, q->qdisc, to_free); q 172 net/sched/sch_tbf.c sch->q.qlen += nb; q 182 net/sched/sch_tbf.c struct tbf_sched_data *q = qdisc_priv(sch); q 186 net/sched/sch_tbf.c if (qdisc_pkt_len(skb) > q->max_size) { q 188 net/sched/sch_tbf.c skb_gso_validate_mac_len(skb, q->max_size)) q 192 net/sched/sch_tbf.c ret = qdisc_enqueue(skb, q->qdisc, to_free); q 200 net/sched/sch_tbf.c sch->q.qlen++; q 204 net/sched/sch_tbf.c static bool tbf_peak_present(const struct tbf_sched_data *q) q 206 net/sched/sch_tbf.c return q->peak.rate_bytes_ps; q 211 net/sched/sch_tbf.c struct tbf_sched_data *q = qdisc_priv(sch); q 214 net/sched/sch_tbf.c skb = q->qdisc->ops->peek(q->qdisc); q 223 net/sched/sch_tbf.c toks = min_t(s64, now - q->t_c, q->buffer); q 225 net/sched/sch_tbf.c if (tbf_peak_present(q)) { q 226 net/sched/sch_tbf.c ptoks = toks + q->ptokens; q 227 net/sched/sch_tbf.c if (ptoks > q->mtu) q 228 net/sched/sch_tbf.c ptoks = q->mtu; q 229 net/sched/sch_tbf.c ptoks -= (s64) psched_l2t_ns(&q->peak, len); q 231 net/sched/sch_tbf.c toks += q->tokens; q 232 net/sched/sch_tbf.c if (toks > q->buffer) q 233 net/sched/sch_tbf.c toks = q->buffer; q 234 net/sched/sch_tbf.c toks -= (s64) psched_l2t_ns(&q->rate, len); q 237 net/sched/sch_tbf.c skb = qdisc_dequeue_peeked(q->qdisc); q 241 net/sched/sch_tbf.c q->t_c = now; q 242 net/sched/sch_tbf.c q->tokens = toks; q 243 net/sched/sch_tbf.c q->ptokens = ptoks; q 245 net/sched/sch_tbf.c sch->q.qlen--; q 250 net/sched/sch_tbf.c qdisc_watchdog_schedule_ns(&q->watchdog, q 271 net/sched/sch_tbf.c struct tbf_sched_data *q = qdisc_priv(sch); q 273 net/sched/sch_tbf.c qdisc_reset(q->qdisc); q 275 net/sched/sch_tbf.c sch->q.qlen = 0; q 276 net/sched/sch_tbf.c q->t_c = ktime_get_ns(); q 277 net/sched/sch_tbf.c q->tokens = q->buffer; q 278 net/sched/sch_tbf.c q->ptokens = q->mtu; q 279 net/sched/sch_tbf.c qdisc_watchdog_cancel(&q->watchdog); q 296 net/sched/sch_tbf.c struct tbf_sched_data *q = qdisc_priv(sch); q 372 net/sched/sch_tbf.c if (q->qdisc != &noop_qdisc) { q 373 net/sched/sch_tbf.c err = fifo_set_limit(q->qdisc, qopt->limit); q 390 net/sched/sch_tbf.c qdisc_tree_flush_backlog(q->qdisc); q 391 net/sched/sch_tbf.c qdisc_put(q->qdisc); q 392 net/sched/sch_tbf.c q->qdisc = child; q 394 net/sched/sch_tbf.c q->limit = qopt->limit; q 396 net/sched/sch_tbf.c q->mtu = mtu; q 398 net/sched/sch_tbf.c q->mtu = PSCHED_TICKS2NS(qopt->mtu); q 399 net/sched/sch_tbf.c q->max_size = max_size; q 401 net/sched/sch_tbf.c q->buffer = buffer; q 403 net/sched/sch_tbf.c q->buffer = PSCHED_TICKS2NS(qopt->buffer); q 404 net/sched/sch_tbf.c q->tokens = q->buffer; q 405 net/sched/sch_tbf.c q->ptokens = q->mtu; q 407 net/sched/sch_tbf.c memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg)); q 408 net/sched/sch_tbf.c memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg)); q 419 net/sched/sch_tbf.c struct tbf_sched_data *q = qdisc_priv(sch); q 421 net/sched/sch_tbf.c qdisc_watchdog_init(&q->watchdog, sch); q 422 net/sched/sch_tbf.c q->qdisc = &noop_qdisc; q 427 net/sched/sch_tbf.c q->t_c = ktime_get_ns(); q 434 net/sched/sch_tbf.c struct tbf_sched_data *q = qdisc_priv(sch); q 436 net/sched/sch_tbf.c qdisc_watchdog_cancel(&q->watchdog); q 437 net/sched/sch_tbf.c qdisc_put(q->qdisc); q 442 net/sched/sch_tbf.c struct tbf_sched_data *q = qdisc_priv(sch); q 446 net/sched/sch_tbf.c sch->qstats.backlog = q->qdisc->qstats.backlog; q 451 net/sched/sch_tbf.c opt.limit = q->limit; q 452 net/sched/sch_tbf.c psched_ratecfg_getrate(&opt.rate, &q->rate); q 453 net/sched/sch_tbf.c if (tbf_peak_present(q)) q 454 net/sched/sch_tbf.c psched_ratecfg_getrate(&opt.peakrate, &q->peak); q 457 net/sched/sch_tbf.c opt.mtu = PSCHED_NS2TICKS(q->mtu); q 458 net/sched/sch_tbf.c opt.buffer = PSCHED_NS2TICKS(q->buffer); q 461 net/sched/sch_tbf.c if (q->rate.rate_bytes_ps >= (1ULL << 32) && q 462 net/sched/sch_tbf.c nla_put_u64_64bit(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps, q 465 net/sched/sch_tbf.c if (tbf_peak_present(q) && q 466 net/sched/sch_tbf.c q->peak.rate_bytes_ps >= (1ULL << 32) && q 467 net/sched/sch_tbf.c nla_put_u64_64bit(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps, q 481 net/sched/sch_tbf.c struct tbf_sched_data *q = qdisc_priv(sch); q 484 net/sched/sch_tbf.c tcm->tcm_info = q->qdisc->handle; q 492 net/sched/sch_tbf.c struct tbf_sched_data *q = qdisc_priv(sch); q 497 net/sched/sch_tbf.c *old = qdisc_replace(sch, new, &q->qdisc); q 503 net/sched/sch_tbf.c struct tbf_sched_data *q = qdisc_priv(sch); q 504 net/sched/sch_tbf.c return q->qdisc; q 66 net/sched/sch_teql.c struct sk_buff_head q; q 69 net/sched/sch_teql.c #define NEXT_SLAVE(q) (((struct teql_sched_data *)qdisc_priv(q))->next) q 79 net/sched/sch_teql.c struct teql_sched_data *q = qdisc_priv(sch); q 81 net/sched/sch_teql.c if (q->q.qlen < dev->tx_queue_len) { q 82 net/sched/sch_teql.c __skb_queue_tail(&q->q, skb); q 95 net/sched/sch_teql.c struct Qdisc *q; q 97 net/sched/sch_teql.c skb = __skb_dequeue(&dat->q); q 99 net/sched/sch_teql.c q = rcu_dereference_bh(dat_queue->qdisc); q 102 net/sched/sch_teql.c struct net_device *m = qdisc_dev(q); q 110 net/sched/sch_teql.c sch->q.qlen = dat->q.qlen + q->q.qlen; q 126 net/sched/sch_teql.c skb_queue_purge(&dat->q); q 127 net/sched/sch_teql.c sch->q.qlen = 0; q 133 net/sched/sch_teql.c struct Qdisc *q, *prev; q 140 net/sched/sch_teql.c q = NEXT_SLAVE(prev); q 141 net/sched/sch_teql.c if (q == sch) { q 142 net/sched/sch_teql.c NEXT_SLAVE(prev) = NEXT_SLAVE(q); q 143 net/sched/sch_teql.c if (q == master->slaves) { q 144 net/sched/sch_teql.c master->slaves = NEXT_SLAVE(q); q 145 net/sched/sch_teql.c if (q == master->slaves) { q 158 net/sched/sch_teql.c skb_queue_purge(&dat->q); q 162 net/sched/sch_teql.c } while ((prev = q) != master->slaves); q 171 net/sched/sch_teql.c struct teql_sched_data *q = qdisc_priv(sch); q 179 net/sched/sch_teql.c q->m = m; q 181 net/sched/sch_teql.c skb_queue_head_init(&q->q); q 203 net/sched/sch_teql.c q->next = NEXT_SLAVE(m->slaves); q 206 net/sched/sch_teql.c q->next = sch; q 278 net/sched/sch_teql.c struct Qdisc *start, *q; q 290 net/sched/sch_teql.c q = start; q 291 net/sched/sch_teql.c if (!q) q 295 net/sched/sch_teql.c struct net_device *slave = qdisc_dev(q); q 298 net/sched/sch_teql.c if (slave_txq->qdisc_sleeping != q) q 315 net/sched/sch_teql.c master->slaves = NEXT_SLAVE(q); q 327 net/sched/sch_teql.c master->slaves = NEXT_SLAVE(q); q 334 net/sched/sch_teql.c } while ((q = NEXT_SLAVE(q)) != start); q 355 net/sched/sch_teql.c struct Qdisc *q; q 365 net/sched/sch_teql.c q = m->slaves; q 367 net/sched/sch_teql.c struct net_device *slave = qdisc_dev(q); q 387 net/sched/sch_teql.c } while ((q = NEXT_SLAVE(q)) != m->slaves); q 415 net/sched/sch_teql.c struct Qdisc *q; q 417 net/sched/sch_teql.c q = m->slaves; q 418 net/sched/sch_teql.c if (q) { q 420 net/sched/sch_teql.c if (new_mtu > qdisc_dev(q)->mtu) q 422 net/sched/sch_teql.c } while ((q = NEXT_SLAVE(q)) != m->slaves); q 64 net/sctp/inqueue.c void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk) q 77 net/sctp/inqueue.c list_add_tail(&chunk->list, &q->in_chunk_list); q 80 net/sctp/inqueue.c q->immediate.func(&q->immediate); q 234 net/sctp/inqueue.c void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback) q 236 net/sctp/inqueue.c INIT_WORK(&q->immediate, callback); q 654 net/sctp/output.c struct sctp_outq *q = &asoc->outqueue; q 670 net/sctp/output.c inflight = q->outstanding_bytes; q 718 net/sctp/output.c if (chunk->skb->len + q->out_qlen > transport->pathmtu - q 42 net/sctp/outqueue.c static void sctp_check_transmitted(struct sctp_outq *q, q 49 net/sctp/outqueue.c static void sctp_mark_missing(struct sctp_outq *q, q 55 net/sctp/outqueue.c static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp); q 58 net/sctp/outqueue.c static inline void sctp_outq_head_data(struct sctp_outq *q, q 64 net/sctp/outqueue.c list_add(&ch->list, &q->out_chunk_list); q 65 net/sctp/outqueue.c q->out_qlen += ch->skb->len; q 68 net/sctp/outqueue.c oute = SCTP_SO(&q->asoc->stream, stream)->ext; q 73 net/sctp/outqueue.c static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q) q 75 net/sctp/outqueue.c return q->sched->dequeue(q); q 79 net/sctp/outqueue.c static inline void sctp_outq_tail_data(struct sctp_outq *q, q 85 net/sctp/outqueue.c list_add_tail(&ch->list, &q->out_chunk_list); q 86 net/sctp/outqueue.c q->out_qlen += ch->skb->len; q 89 net/sctp/outqueue.c oute = SCTP_SO(&q->asoc->stream, stream)->ext; q 190 net/sctp/outqueue.c void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q) q 192 net/sctp/outqueue.c memset(q, 0, sizeof(struct sctp_outq)); q 194 net/sctp/outqueue.c q->asoc = asoc; q 195 net/sctp/outqueue.c INIT_LIST_HEAD(&q->out_chunk_list); q 196 net/sctp/outqueue.c INIT_LIST_HEAD(&q->control_chunk_list); q 197 net/sctp/outqueue.c INIT_LIST_HEAD(&q->retransmit); q 198 net/sctp/outqueue.c INIT_LIST_HEAD(&q->sacked); q 199 net/sctp/outqueue.c INIT_LIST_HEAD(&q->abandoned); q 205 net/sctp/outqueue.c static void __sctp_outq_teardown(struct sctp_outq *q) q 212 net/sctp/outqueue.c list_for_each_entry(transport, &q->asoc->peer.transport_addr_list, q 218 net/sctp/outqueue.c sctp_chunk_fail(chunk, q->error); q 224 net/sctp/outqueue.c list_for_each_safe(lchunk, temp, &q->sacked) { q 228 net/sctp/outqueue.c sctp_chunk_fail(chunk, q->error); q 233 net/sctp/outqueue.c list_for_each_safe(lchunk, temp, &q->retransmit) { q 237 net/sctp/outqueue.c sctp_chunk_fail(chunk, q->error); q 242 net/sctp/outqueue.c list_for_each_safe(lchunk, temp, &q->abandoned) { q 246 net/sctp/outqueue.c sctp_chunk_fail(chunk, q->error); q 251 net/sctp/outqueue.c while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { q 252 net/sctp/outqueue.c sctp_sched_dequeue_done(q, chunk); q 255 net/sctp/outqueue.c sctp_chunk_fail(chunk, q->error); q 260 net/sctp/outqueue.c list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { q 266 net/sctp/outqueue.c void sctp_outq_teardown(struct sctp_outq *q) q 268 net/sctp/outqueue.c __sctp_outq_teardown(q); q 269 net/sctp/outqueue.c sctp_outq_init(q->asoc, q); q 273 net/sctp/outqueue.c void sctp_outq_free(struct sctp_outq *q) q 276 net/sctp/outqueue.c __sctp_outq_teardown(q); q 280 net/sctp/outqueue.c void sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp) q 282 net/sctp/outqueue.c struct net *net = sock_net(q->asoc->base.sk); q 284 net/sctp/outqueue.c pr_debug("%s: outq:%p, chunk:%p[%s]\n", __func__, q, chunk, q 294 net/sctp/outqueue.c __func__, q, chunk, chunk && chunk->chunk_hdr ? q 298 net/sctp/outqueue.c sctp_outq_tail_data(q, chunk); q 307 net/sctp/outqueue.c list_add_tail(&chunk->list, &q->control_chunk_list); q 311 net/sctp/outqueue.c if (!q->cork) q 312 net/sctp/outqueue.c sctp_outq_flush(q, 0, gfp); q 384 net/sctp/outqueue.c struct sctp_outq *q = &asoc->outqueue; q 387 net/sctp/outqueue.c q->sched->unsched_all(&asoc->stream); q 389 net/sctp/outqueue.c list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) { q 397 net/sctp/outqueue.c sctp_sched_dequeue_common(q, chk); q 413 net/sctp/outqueue.c q->sched->sched_all(&asoc->stream); q 446 net/sctp/outqueue.c void sctp_retransmit_mark(struct sctp_outq *q, q 461 net/sctp/outqueue.c sctp_insert_list(&q->abandoned, lchunk); q 472 net/sctp/outqueue.c q->outstanding_bytes -= sctp_data_size(chunk); q 473 net/sctp/outqueue.c q->asoc->peer.rwnd += sctp_data_size(chunk); q 493 net/sctp/outqueue.c q->asoc->peer.rwnd += sctp_data_size(chunk); q 494 net/sctp/outqueue.c q->outstanding_bytes -= sctp_data_size(chunk); q 520 net/sctp/outqueue.c sctp_insert_list(&q->retransmit, lchunk); q 533 net/sctp/outqueue.c void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, q 536 net/sctp/outqueue.c struct net *net = sock_net(q->asoc->base.sk); q 553 net/sctp/outqueue.c q->fast_rtx = 1; q 566 net/sctp/outqueue.c sctp_retransmit_mark(q, transport, reason); q 573 net/sctp/outqueue.c q->asoc->stream.si->generate_ftsn(q, q->asoc->ctsn_ack_point); q 580 net/sctp/outqueue.c sctp_outq_flush(q, /* rtx_timeout */ 1, GFP_ATOMIC); q 591 net/sctp/outqueue.c static int __sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, q 603 net/sctp/outqueue.c lqueue = &q->retransmit; q 604 net/sctp/outqueue.c fast_rtx = q->fast_rtx; q 633 net/sctp/outqueue.c sctp_insert_list(&q->abandoned, q 718 net/sctp/outqueue.c q->asoc->stats.rtxchunks++; q 746 net/sctp/outqueue.c q->fast_rtx = 0; q 752 net/sctp/outqueue.c void sctp_outq_uncork(struct sctp_outq *q, gfp_t gfp) q 754 net/sctp/outqueue.c if (q->cork) q 755 net/sctp/outqueue.c q->cork = 0; q 757 net/sctp/outqueue.c sctp_outq_flush(q, 0, gfp); q 777 net/sctp/outqueue.c struct sctp_outq *q; q 874 net/sctp/outqueue.c list_for_each_entry_safe(chunk, tmp, &ctx->q->control_chunk_list, list) { q 943 net/sctp/outqueue.c list_add(&chunk->list, &ctx->q->control_chunk_list); q 992 net/sctp/outqueue.c error = __sctp_outq_flush_rtx(ctx->q, ctx->packet, rtx_timeout, q 1011 net/sctp/outqueue.c if (!list_empty(&ctx->q->retransmit)) q 1051 net/sctp/outqueue.c if (!list_empty(&ctx->q->retransmit) && q 1064 net/sctp/outqueue.c while ((chunk = sctp_outq_dequeue_data(ctx->q)) != NULL) { q 1070 net/sctp/outqueue.c sctp_sched_dequeue_done(ctx->q, chunk); q 1077 net/sctp/outqueue.c sctp_outq_head_data(ctx->q, chunk); q 1084 net/sctp/outqueue.c __func__, ctx->q, chunk, chunk && chunk->chunk_hdr ? q 1101 net/sctp/outqueue.c sctp_outq_head_data(ctx->q, chunk); q 1119 net/sctp/outqueue.c sctp_sched_dequeue_done(ctx->q, chunk); q 1148 net/sctp/outqueue.c ctx->q->asoc->base.sk->sk_err = -error; q 1165 net/sctp/outqueue.c static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp) q 1168 net/sctp/outqueue.c .q = q, q 1171 net/sctp/outqueue.c .asoc = q->asoc, q 1187 net/sctp/outqueue.c if (q->asoc->src_out_of_asoc_ok) q 1221 net/sctp/outqueue.c int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk) q 1223 net/sctp/outqueue.c struct sctp_association *asoc = q->asoc; q 1294 net/sctp/outqueue.c sctp_check_transmitted(q, &q->retransmit, NULL, NULL, sack, &highest_new_tsn); q 1302 net/sctp/outqueue.c sctp_check_transmitted(q, &transport->transmitted, q 1326 net/sctp/outqueue.c sctp_mark_missing(q, &transport->transmitted, transport, q 1336 net/sctp/outqueue.c list_for_each_safe(lchunk, temp, &q->sacked) { q 1356 net/sctp/outqueue.c outstanding = q->outstanding_bytes; q 1365 net/sctp/outqueue.c asoc->stream.si->generate_ftsn(q, sack_ctsn); q 1372 net/sctp/outqueue.c return sctp_outq_is_empty(q); q 1379 net/sctp/outqueue.c int sctp_outq_is_empty(const struct sctp_outq *q) q 1381 net/sctp/outqueue.c return q->out_qlen == 0 && q->outstanding_bytes == 0 && q 1382 net/sctp/outqueue.c list_empty(&q->retransmit); q 1399 net/sctp/outqueue.c static void sctp_check_transmitted(struct sctp_outq *q, q 1428 net/sctp/outqueue.c sctp_insert_list(&q->abandoned, lchunk); q 1433 net/sctp/outqueue.c if (transmitted_queue != &q->retransmit && q 1438 net/sctp/outqueue.c q->outstanding_bytes -= sctp_data_size(tchunk); q 1485 net/sctp/outqueue.c q->asoc->peer.primary_path->cacc. q 1522 net/sctp/outqueue.c &q->sacked); q 1620 net/sctp/outqueue.c q->outstanding_bytes -= bytes_acked + migrate_bytes; q 1636 net/sctp/outqueue.c if (!q->asoc->peer.rwnd && q 1638 net/sctp/outqueue.c (sack_ctsn+2 == q->asoc->next_tsn) && q 1639 net/sctp/outqueue.c q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) { q 1643 net/sctp/outqueue.c q->asoc->overall_error_count = 0; q 1673 net/sctp/outqueue.c static void sctp_mark_missing(struct sctp_outq *q, q 1682 net/sctp/outqueue.c struct sctp_association *asoc = q->asoc; q 1727 net/sctp/outqueue.c sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX); q 1786 net/sctp/outqueue.c void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn) q 1788 net/sctp/outqueue.c struct sctp_association *asoc = q->asoc; q 1830 net/sctp/outqueue.c list_for_each_safe(lchunk, temp, &q->abandoned) { q 1886 net/sctp/outqueue.c list_add_tail(&ftsn_chunk->list, &q->control_chunk_list); q 169 net/sctp/socket.c struct sctp_outq *q = &asoc->outqueue; q 177 net/sctp/socket.c list_for_each_entry(chunk, &q->retransmit, transmitted_list) q 180 net/sctp/socket.c list_for_each_entry(chunk, &q->sacked, transmitted_list) q 183 net/sctp/socket.c list_for_each_entry(chunk, &q->abandoned, transmitted_list) q 186 net/sctp/socket.c list_for_each_entry(chunk, &q->out_chunk_list, list) q 1109 net/sctp/stream_interleave.c static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn) q 1112 net/sctp/stream_interleave.c struct sctp_association *asoc = q->asoc; q 1125 net/sctp/stream_interleave.c list_for_each_safe(lchunk, temp, &q->abandoned) { q 1161 net/sctp/stream_interleave.c list_add_tail(&ftsn_chunk->list, &q->control_chunk_list); q 53 net/sctp/stream_sched.c static void sctp_sched_fcfs_enqueue(struct sctp_outq *q, q 58 net/sctp/stream_sched.c static struct sctp_chunk *sctp_sched_fcfs_dequeue(struct sctp_outq *q) q 60 net/sctp/stream_sched.c struct sctp_stream *stream = &q->asoc->stream; q 64 net/sctp/stream_sched.c if (list_empty(&q->out_chunk_list)) q 71 net/sctp/stream_sched.c entry = q->out_chunk_list.next; q 75 net/sctp/stream_sched.c sctp_sched_dequeue_common(q, ch); q 81 net/sctp/stream_sched.c static void sctp_sched_fcfs_dequeue_done(struct sctp_outq *q, q 228 net/sctp/stream_sched.c void sctp_sched_dequeue_done(struct sctp_outq *q, struct sctp_chunk *ch) q 231 net/sctp/stream_sched.c !q->asoc->peer.intl_capable) { q 240 net/sctp/stream_sched.c sout = SCTP_SO(&q->asoc->stream, sid); q 241 net/sctp/stream_sched.c q->asoc->stream.out_curr = sout; q 245 net/sctp/stream_sched.c q->asoc->stream.out_curr = NULL; q 246 net/sctp/stream_sched.c q->sched->dequeue_done(q, ch); q 250 net/sctp/stream_sched.c void sctp_sched_dequeue_common(struct sctp_outq *q, struct sctp_chunk *ch) q 254 net/sctp/stream_sched.c q->out_qlen -= ch->skb->len; q 233 net/sctp/stream_sched_prio.c static void sctp_sched_prio_enqueue(struct sctp_outq *q, q 242 net/sctp/stream_sched_prio.c stream = &q->asoc->stream; q 246 net/sctp/stream_sched_prio.c static struct sctp_chunk *sctp_sched_prio_dequeue(struct sctp_outq *q) q 248 net/sctp/stream_sched_prio.c struct sctp_stream *stream = &q->asoc->stream; q 254 net/sctp/stream_sched_prio.c if (list_empty(&q->out_chunk_list)) q 268 net/sctp/stream_sched_prio.c sctp_sched_dequeue_common(q, ch); q 274 net/sctp/stream_sched_prio.c static void sctp_sched_prio_dequeue_done(struct sctp_outq *q, q 285 net/sctp/stream_sched_prio.c soute = SCTP_SO(&q->asoc->stream, sid)->ext; q 98 net/sctp/stream_sched_rr.c static void sctp_sched_rr_enqueue(struct sctp_outq *q, q 107 net/sctp/stream_sched_rr.c stream = &q->asoc->stream; q 111 net/sctp/stream_sched_rr.c static struct sctp_chunk *sctp_sched_rr_dequeue(struct sctp_outq *q) q 113 net/sctp/stream_sched_rr.c struct sctp_stream *stream = &q->asoc->stream; q 118 net/sctp/stream_sched_rr.c if (list_empty(&q->out_chunk_list)) q 128 net/sctp/stream_sched_rr.c sctp_sched_dequeue_common(q, ch); q 134 net/sctp/stream_sched_rr.c static void sctp_sched_rr_dequeue_done(struct sctp_outq *q, q 142 net/sctp/stream_sched_rr.c soute = SCTP_SO(&q->asoc->stream, sid)->ext; q 144 net/sctp/stream_sched_rr.c sctp_sched_rr_next_stream(&q->asoc->stream); q 147 net/sctp/stream_sched_rr.c sctp_sched_rr_unsched(&q->asoc->stream, soute); q 131 net/sunrpc/auth_gss/auth_gss.c const void *q = (const void *)((const char *)p + len); q 132 net/sunrpc/auth_gss/auth_gss.c if (unlikely(q > end || q < p)) q 135 net/sunrpc/auth_gss/auth_gss.c return q; q 141 net/sunrpc/auth_gss/auth_gss.c const void *q; q 147 net/sunrpc/auth_gss/auth_gss.c q = (const void *)((const char *)p + len); q 148 net/sunrpc/auth_gss/auth_gss.c if (unlikely(q > end || q < p)) q 154 net/sunrpc/auth_gss/auth_gss.c return q; q 190 net/sunrpc/auth_gss/auth_gss.c const void *q; q 234 net/sunrpc/auth_gss/auth_gss.c q = (const void *)((const char *)p + seclen); q 235 net/sunrpc/auth_gss/auth_gss.c if (unlikely(q > end || q < p)) { q 247 net/sunrpc/auth_gss/auth_gss.c if (q == end) { q 248 net/sunrpc/auth_gss/auth_gss.c p = q; q 253 net/sunrpc/auth_gss/auth_gss.c p = simple_get_netobj(q, end, &ctx->gc_acceptor); q 170 net/sunrpc/auth_gss/gss_krb5_mech.c const void *q = (const void *)((const char *)p + len); q 171 net/sunrpc/auth_gss/gss_krb5_mech.c if (unlikely(q > end || q < p)) q 174 net/sunrpc/auth_gss/gss_krb5_mech.c return q; q 180 net/sunrpc/auth_gss/gss_krb5_mech.c const void *q; q 186 net/sunrpc/auth_gss/gss_krb5_mech.c q = (const void *)((const char *)p + len); q 187 net/sunrpc/auth_gss/gss_krb5_mech.c if (unlikely(q > end || q < p)) q 193 net/sunrpc/auth_gss/gss_krb5_mech.c return q; q 120 net/sunrpc/auth_gss/gss_krb5_wrap.c u64 *q = (u64 *)p; q 139 net/sunrpc/auth_gss/gss_krb5_wrap.c *q++ = i++; q 142 net/sunrpc/auth_gss/gss_krb5_wrap.c *q++ = i++; q 773 net/sunrpc/cache.c struct cache_queue q; q 780 net/sunrpc/cache.c struct cache_queue q; q 812 net/sunrpc/cache.c while (rp->q.list.next != &cd->queue && q 813 net/sunrpc/cache.c list_entry(rp->q.list.next, struct cache_queue, list) q 815 net/sunrpc/cache.c struct list_head *next = rp->q.list.next; q 816 net/sunrpc/cache.c list_move(&rp->q.list, next); q 818 net/sunrpc/cache.c if (rp->q.list.next == &cd->queue) { q 824 net/sunrpc/cache.c rq = container_of(rp->q.list.next, struct cache_request, q.list); q 825 net/sunrpc/cache.c WARN_ON_ONCE(rq->q.reader); q 840 net/sunrpc/cache.c list_move(&rp->q.list, &rq->q.list); q 852 net/sunrpc/cache.c list_move(&rp->q.list, &rq->q.list); q 864 net/sunrpc/cache.c list_del(&rq->q.list); q 971 net/sunrpc/cache.c for (cq= &rp->q; &cq->list != &cd->queue; q 997 net/sunrpc/cache.c for (cq= &rp->q; &cq->list != &cd->queue; q 1001 net/sunrpc/cache.c container_of(cq, struct cache_request, q); q 1025 net/sunrpc/cache.c rp->q.reader = 1; q 1028 net/sunrpc/cache.c list_add(&rp->q.list, &cd->queue); q 1046 net/sunrpc/cache.c for (cq= &rp->q; &cq->list != &cd->queue; q 1049 net/sunrpc/cache.c container_of(cq, struct cache_request, q) q 1055 net/sunrpc/cache.c list_del(&rp->q.list); q 1082 net/sunrpc/cache.c cr = container_of(cq, struct cache_request, q); q 1090 net/sunrpc/cache.c list_move(&cr->q.list, &dequeued); q 1094 net/sunrpc/cache.c cr = list_entry(dequeued.next, struct cache_request, q.list); q 1095 net/sunrpc/cache.c list_del(&cr->q.list); q 1221 net/sunrpc/cache.c crq->q.reader = 0; q 1228 net/sunrpc/cache.c list_add_tail(&crq->q.list, &detail->queue); q 633 net/sunrpc/rpc_pipe.c struct qstr q = QSTR_INIT(name, strlen(name)); q 634 net/sunrpc/rpc_pipe.c struct dentry *dentry = d_hash_and_lookup(parent, &q); q 636 net/sunrpc/rpc_pipe.c dentry = d_alloc(parent, &q); q 1302 net/sunrpc/rpc_pipe.c struct qstr q = QSTR_INIT(files[RPCAUTH_gssd].name, q 1306 net/sunrpc/rpc_pipe.c gssd_dentry = d_hash_and_lookup(root, &q); q 1316 net/sunrpc/rpc_pipe.c q.name = gssd_dummy_clnt_dir[0].name; q 1317 net/sunrpc/rpc_pipe.c q.len = strlen(gssd_dummy_clnt_dir[0].name); q 1318 net/sunrpc/rpc_pipe.c clnt_dentry = d_hash_and_lookup(gssd_dentry, &q); q 140 net/sunrpc/sched.c __rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task) q 144 net/sunrpc/sched.c list_for_each_entry(t, q, u.tk_wait.list) { q 149 net/sunrpc/sched.c task->u.tk_wait.list.next = q; q 155 net/sunrpc/sched.c list_add_tail(&task->u.tk_wait.list, q); q 164 net/sunrpc/sched.c struct list_head *q; q 176 net/sunrpc/sched.c q = t->u.tk_wait.list.next; q 177 net/sunrpc/sched.c list_add_tail(&t->u.tk_wait.list, q); q 385 net/sunrpc/sched.c static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, q 390 net/sunrpc/sched.c task->tk_pid, rpc_qname(q), jiffies); q 392 net/sunrpc/sched.c trace_rpc_task_sleep(task, q); q 394 net/sunrpc/sched.c __rpc_add_wait_queue(q, task, queue_priority); q 398 net/sunrpc/sched.c static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q, q 403 net/sunrpc/sched.c __rpc_sleep_on_priority(q, task, queue_priority); q 404 net/sunrpc/sched.c __rpc_add_timer(q, task, timeout); q 426 net/sunrpc/sched.c void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task, q 437 net/sunrpc/sched.c spin_lock(&q->lock); q 438 net/sunrpc/sched.c __rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority); q 439 net/sunrpc/sched.c spin_unlock(&q->lock); q 443 net/sunrpc/sched.c void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, q 455 net/sunrpc/sched.c spin_lock(&q->lock); q 456 net/sunrpc/sched.c __rpc_sleep_on_priority(q, task, task->tk_priority); q 457 net/sunrpc/sched.c spin_unlock(&q->lock); q 461 net/sunrpc/sched.c void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q, q 471 net/sunrpc/sched.c spin_lock(&q->lock); q 472 net/sunrpc/sched.c __rpc_sleep_on_priority_timeout(q, task, timeout, priority); q 473 net/sunrpc/sched.c spin_unlock(&q->lock); q 477 net/sunrpc/sched.c void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task, q 488 net/sunrpc/sched.c spin_lock(&q->lock); q 489 net/sunrpc/sched.c __rpc_sleep_on_priority(q, task, priority); q 490 net/sunrpc/sched.c spin_unlock(&q->lock); q 606 net/sunrpc/sched.c struct list_head *q; q 612 net/sunrpc/sched.c q = &queue->tasks[queue->priority]; q 613 net/sunrpc/sched.c if (!list_empty(q) && --queue->nr) { q 614 net/sunrpc/sched.c task = list_first_entry(q, struct rpc_task, u.tk_wait.list); q 622 net/sunrpc/sched.c if (q == &queue->tasks[0]) q 623 net/sunrpc/sched.c q = &queue->tasks[queue->maxpriority]; q 625 net/sunrpc/sched.c q = q - 1; q 626 net/sunrpc/sched.c if (!list_empty(q)) { q 627 net/sunrpc/sched.c task = list_first_entry(q, struct rpc_task, u.tk_wait.list); q 630 net/sunrpc/sched.c } while (q != &queue->tasks[queue->priority]); q 636 net/sunrpc/sched.c rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0])); q 1169 net/sunrpc/sched.c struct workqueue_struct *q) q 1171 net/sunrpc/sched.c if (q != NULL) { q 1173 net/sunrpc/sched.c queue_work(q, &task->u.tk_work); q 1178 net/sunrpc/sched.c static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q) q 1182 net/sunrpc/sched.c rpc_final_put_task(task, q); q 632 net/sunrpc/xdr.c __be32 *q; q 638 net/sunrpc/xdr.c q = p + (nbytes >> 2); q 639 net/sunrpc/xdr.c if (unlikely(q > xdr->end || q < p)) q 641 net/sunrpc/xdr.c xdr->p = q; q 901 net/sunrpc/xdr.c __be32 *q = p + nwords; q 903 net/sunrpc/xdr.c if (unlikely(nwords > xdr->nwords || q > xdr->end || q < p)) q 905 net/sunrpc/xdr.c xdr->p = q; q 411 net/sunrpc/xprtrdma/svc_rdma_sendto.c __be32 *p, *q; q 416 net/sunrpc/xprtrdma/svc_rdma_sendto.c q = wr_ch; q 417 net/sunrpc/xprtrdma/svc_rdma_sendto.c while (*q != xdr_zero) { q 418 net/sunrpc/xprtrdma/svc_rdma_sendto.c nsegs = xdr_encode_write_chunk(p, q, consumed); q 419 net/sunrpc/xprtrdma/svc_rdma_sendto.c q += 2 + nsegs * rpcrdma_segment_maxsz; q 354 net/unix/af_unix.c static int unix_dgram_peer_wake_relay(wait_queue_entry_t *q, unsigned mode, int flags, q 360 net/unix/af_unix.c u = container_of(q, struct unix_sock, peer_wake); q 363 net/unix/af_unix.c q); q 208 net/x25/x25_in.c static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametype, int ns, int nr, int q, int d, int m) q 389 net/x25/x25_in.c int queued = 0, frametype, ns, nr, q, d, m; q 394 net/x25/x25_in.c frametype = x25_decode(sk, skb, &ns, &nr, &q, &d, &m); q 404 net/x25/x25_in.c queued = x25_state3_machine(sk, skb, frametype, ns, nr, q, d, m); q 260 net/x25/x25_subr.c int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q, q 270 net/x25/x25_subr.c *ns = *nr = *q = *d = *m = 0; q 315 net/x25/x25_subr.c *q = (frame[0] & X25_Q_BIT) == X25_Q_BIT; q 324 net/x25/x25_subr.c *q = (frame[0] & X25_Q_BIT) == X25_Q_BIT; q 463 net/xdp/xsk.c struct xsk_queue *q; q 468 net/xdp/xsk.c q = xskq_create(entries, umem_queue); q 469 net/xdp/xsk.c if (!q) q 474 net/xdp/xsk.c WRITE_ONCE(*queue, q); q 748 net/xdp/xsk.c struct xsk_queue **q; q 761 net/xdp/xsk.c q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx; q 762 net/xdp/xsk.c err = xsk_init_queue(entries, q, false); q 804 net/xdp/xsk.c struct xsk_queue **q; q 820 net/xdp/xsk.c q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq : q 822 net/xdp/xsk.c err = xsk_init_queue(entries, q, true); q 968 net/xdp/xsk.c struct xsk_queue *q = NULL; q 977 net/xdp/xsk.c q = READ_ONCE(xs->rx); q 979 net/xdp/xsk.c q = READ_ONCE(xs->tx); q 988 net/xdp/xsk.c q = READ_ONCE(umem->fq); q 990 net/xdp/xsk.c q = READ_ONCE(umem->cq); q 993 net/xdp/xsk.c if (!q) q 998 net/xdp/xsk.c qpg = virt_to_head_page(q->ring); q 1002 net/xdp/xsk.c pfn = virt_to_phys(q->ring) >> PAGE_SHIFT; q 12 net/xdp/xsk_queue.c void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask) q 14 net/xdp/xsk_queue.c if (!q) q 17 net/xdp/xsk_queue.c q->size = size; q 18 net/xdp/xsk_queue.c q->chunk_mask = chunk_mask; q 21 net/xdp/xsk_queue.c static u32 xskq_umem_get_ring_size(struct xsk_queue *q) q 23 net/xdp/xsk_queue.c return sizeof(struct xdp_umem_ring) + q->nentries * sizeof(u64); q 26 net/xdp/xsk_queue.c static u32 xskq_rxtx_get_ring_size(struct xsk_queue *q) q 28 net/xdp/xsk_queue.c return sizeof(struct xdp_ring) + q->nentries * sizeof(struct xdp_desc); q 33 net/xdp/xsk_queue.c struct xsk_queue *q; q 37 net/xdp/xsk_queue.c q = kzalloc(sizeof(*q), GFP_KERNEL); q 38 net/xdp/xsk_queue.c if (!q) q 41 net/xdp/xsk_queue.c q->nentries = nentries; q 42 net/xdp/xsk_queue.c q->ring_mask = nentries - 1; q 46 net/xdp/xsk_queue.c size = umem_queue ? xskq_umem_get_ring_size(q) : q 47 net/xdp/xsk_queue.c xskq_rxtx_get_ring_size(q); q 49 net/xdp/xsk_queue.c q->ring = (struct xdp_ring *)__get_free_pages(gfp_flags, q 51 net/xdp/xsk_queue.c if (!q->ring) { q 52 net/xdp/xsk_queue.c kfree(q); q 56 net/xdp/xsk_queue.c return q; q 59 net/xdp/xsk_queue.c void xskq_destroy(struct xsk_queue *q) q 61 net/xdp/xsk_queue.c if (!q) q 64 net/xdp/xsk_queue.c page_frag_free(q->ring); q 65 net/xdp/xsk_queue.c kfree(q); q 91 net/xdp/xsk_queue.h static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q) q 93 net/xdp/xsk_queue.h return q ? q->invalid_descs : 0; q 96 net/xdp/xsk_queue.h static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt) q 98 net/xdp/xsk_queue.h u32 entries = q->prod_tail - q->cons_tail; q 102 net/xdp/xsk_queue.h q->prod_tail = READ_ONCE(q->ring->producer); q 103 net/xdp/xsk_queue.h entries = q->prod_tail - q->cons_tail; q 109 net/xdp/xsk_queue.h static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt) q 111 net/xdp/xsk_queue.h u32 free_entries = q->nentries - (producer - q->cons_tail); q 117 net/xdp/xsk_queue.h q->cons_tail = READ_ONCE(q->ring->consumer); q 118 net/xdp/xsk_queue.h return q->nentries - (producer - q->cons_tail); q 121 net/xdp/xsk_queue.h static inline bool xskq_has_addrs(struct xsk_queue *q, u32 cnt) q 123 net/xdp/xsk_queue.h u32 entries = q->prod_tail - q->cons_tail; q 129 net/xdp/xsk_queue.h q->prod_tail = READ_ONCE(q->ring->producer); q 130 net/xdp/xsk_queue.h entries = q->prod_tail - q->cons_tail; q 148 net/xdp/xsk_queue.h static inline bool xskq_is_valid_addr(struct xsk_queue *q, u64 addr) q 150 net/xdp/xsk_queue.h if (addr >= q->size) { q 151 net/xdp/xsk_queue.h q->invalid_descs++; q 158 net/xdp/xsk_queue.h static inline bool xskq_is_valid_addr_unaligned(struct xsk_queue *q, u64 addr, q 165 net/xdp/xsk_queue.h if (base_addr >= q->size || addr >= q->size || q 167 net/xdp/xsk_queue.h q->invalid_descs++; q 174 net/xdp/xsk_queue.h static inline u64 *xskq_validate_addr(struct xsk_queue *q, u64 *addr, q 177 net/xdp/xsk_queue.h while (q->cons_tail != q->cons_head) { q 178 net/xdp/xsk_queue.h struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; q 179 net/xdp/xsk_queue.h unsigned int idx = q->cons_tail & q->ring_mask; q 181 net/xdp/xsk_queue.h *addr = READ_ONCE(ring->desc[idx]) & q->chunk_mask; q 184 net/xdp/xsk_queue.h if (xskq_is_valid_addr_unaligned(q, *addr, q 191 net/xdp/xsk_queue.h if (xskq_is_valid_addr(q, *addr)) q 195 net/xdp/xsk_queue.h q->cons_tail++; q 201 net/xdp/xsk_queue.h static inline u64 *xskq_peek_addr(struct xsk_queue *q, u64 *addr, q 204 net/xdp/xsk_queue.h if (q->cons_tail == q->cons_head) { q 206 net/xdp/xsk_queue.h WRITE_ONCE(q->ring->consumer, q->cons_tail); q 207 net/xdp/xsk_queue.h q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE); q 213 net/xdp/xsk_queue.h return xskq_validate_addr(q, addr, umem); q 216 net/xdp/xsk_queue.h static inline void xskq_discard_addr(struct xsk_queue *q) q 218 net/xdp/xsk_queue.h q->cons_tail++; q 221 net/xdp/xsk_queue.h static inline int xskq_produce_addr(struct xsk_queue *q, u64 addr) q 223 net/xdp/xsk_queue.h struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; q 225 net/xdp/xsk_queue.h if (xskq_nb_free(q, q->prod_tail, 1) == 0) q 229 net/xdp/xsk_queue.h ring->desc[q->prod_tail++ & q->ring_mask] = addr; q 234 net/xdp/xsk_queue.h WRITE_ONCE(q->ring->producer, q->prod_tail); q 238 net/xdp/xsk_queue.h static inline int xskq_produce_addr_lazy(struct xsk_queue *q, u64 addr) q 240 net/xdp/xsk_queue.h struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; q 242 net/xdp/xsk_queue.h if (xskq_nb_free(q, q->prod_head, LAZY_UPDATE_THRESHOLD) == 0) q 246 net/xdp/xsk_queue.h ring->desc[q->prod_head++ & q->ring_mask] = addr; q 250 net/xdp/xsk_queue.h static inline void xskq_produce_flush_addr_n(struct xsk_queue *q, q 256 net/xdp/xsk_queue.h q->prod_tail += nb_entries; q 257 net/xdp/xsk_queue.h WRITE_ONCE(q->ring->producer, q->prod_tail); q 260 net/xdp/xsk_queue.h static inline int xskq_reserve_addr(struct xsk_queue *q) q 262 net/xdp/xsk_queue.h if (xskq_nb_free(q, q->prod_head, 1) == 0) q 266 net/xdp/xsk_queue.h q->prod_head++; q 272 net/xdp/xsk_queue.h static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d, q 276 net/xdp/xsk_queue.h if (!xskq_is_valid_addr_unaligned(q, d->addr, d->len, umem)) q 280 net/xdp/xsk_queue.h q->invalid_descs++; q 287 net/xdp/xsk_queue.h if (!xskq_is_valid_addr(q, d->addr)) q 290 net/xdp/xsk_queue.h if (((d->addr + d->len) & q->chunk_mask) != (d->addr & q->chunk_mask) || q 292 net/xdp/xsk_queue.h q->invalid_descs++; q 299 net/xdp/xsk_queue.h static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q, q 303 net/xdp/xsk_queue.h while (q->cons_tail != q->cons_head) { q 304 net/xdp/xsk_queue.h struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; q 305 net/xdp/xsk_queue.h unsigned int idx = q->cons_tail & q->ring_mask; q 308 net/xdp/xsk_queue.h if (xskq_is_valid_desc(q, desc, umem)) q 311 net/xdp/xsk_queue.h q->cons_tail++; q 317 net/xdp/xsk_queue.h static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q, q 321 net/xdp/xsk_queue.h if (q->cons_tail == q->cons_head) { q 323 net/xdp/xsk_queue.h WRITE_ONCE(q->ring->consumer, q->cons_tail); q 324 net/xdp/xsk_queue.h q->cons_head = q->cons_tail + xskq_nb_avail(q, RX_BATCH_SIZE); q 330 net/xdp/xsk_queue.h return xskq_validate_desc(q, desc, umem); q 333 net/xdp/xsk_queue.h static inline void xskq_discard_desc(struct xsk_queue *q) q 335 net/xdp/xsk_queue.h q->cons_tail++; q 338 net/xdp/xsk_queue.h static inline int xskq_produce_batch_desc(struct xsk_queue *q, q 341 net/xdp/xsk_queue.h struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; q 344 net/xdp/xsk_queue.h if (xskq_nb_free(q, q->prod_head, 1) == 0) q 348 net/xdp/xsk_queue.h idx = (q->prod_head++) & q->ring_mask; q 355 net/xdp/xsk_queue.h static inline void xskq_produce_flush_desc(struct xsk_queue *q) q 360 net/xdp/xsk_queue.h q->prod_tail = q->prod_head; q 361 net/xdp/xsk_queue.h WRITE_ONCE(q->ring->producer, q->prod_tail); q 364 net/xdp/xsk_queue.h static inline bool xskq_full_desc(struct xsk_queue *q) q 366 net/xdp/xsk_queue.h return xskq_nb_avail(q, q->nentries) == q->nentries; q 369 net/xdp/xsk_queue.h static inline bool xskq_empty_desc(struct xsk_queue *q) q 371 net/xdp/xsk_queue.h return xskq_nb_free(q, q->prod_tail, q->nentries) == q->nentries; q 374 net/xdp/xsk_queue.h void xskq_set_umem(struct xsk_queue *q, u64 size, u64 chunk_mask); q 762 samples/v4l/v4l2-pci-skeleton.c struct vb2_queue *q; q 821 samples/v4l/v4l2-pci-skeleton.c q = &skel->queue; q 822 samples/v4l/v4l2-pci-skeleton.c q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q 823 samples/v4l/v4l2-pci-skeleton.c q->io_modes = VB2_MMAP | VB2_DMABUF | VB2_READ; q 824 samples/v4l/v4l2-pci-skeleton.c q->dev = &pdev->dev; q 825 samples/v4l/v4l2-pci-skeleton.c q->drv_priv = skel; q 826 samples/v4l/v4l2-pci-skeleton.c q->buf_struct_size = sizeof(struct skel_buffer); q 827 samples/v4l/v4l2-pci-skeleton.c q->ops = &skel_qops; q 828 samples/v4l/v4l2-pci-skeleton.c q->mem_ops = &vb2_dma_contig_memops; q 829 samples/v4l/v4l2-pci-skeleton.c q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q 835 samples/v4l/v4l2-pci-skeleton.c q->min_buffers_needed = 2; q 845 samples/v4l/v4l2-pci-skeleton.c q->lock = &skel->lock; q 850 samples/v4l/v4l2-pci-skeleton.c q->gfp_flags = GFP_DMA32; q 851 samples/v4l/v4l2-pci-skeleton.c ret = vb2_queue_init(q); q 876 samples/v4l/v4l2-pci-skeleton.c vdev->queue = q; q 349 scripts/asn1_compiler.c char *line, *nl, *start, *p, *q; q 381 scripts/asn1_compiler.c q = p + 2; q 382 scripts/asn1_compiler.c while ((q = memchr(q, '-', nl - q))) { q 383 scripts/asn1_compiler.c if (q[1] == '-') { q 385 scripts/asn1_compiler.c q += 2; q 386 scripts/asn1_compiler.c memmove(p, q, nl - q); q 389 scripts/asn1_compiler.c q++; q 417 scripts/asn1_compiler.c q = p + 1; q 418 scripts/asn1_compiler.c while (q < nl && (isalnum(*q) || *q == '-' || *q == '_')) q 419 scripts/asn1_compiler.c q++; q 420 scripts/asn1_compiler.c tokens[tix].size = q - p; q 421 scripts/asn1_compiler.c p = q; q 458 scripts/asn1_compiler.c q = p + 1; q 459 scripts/asn1_compiler.c while (q < nl && (isdigit(*q))) q 460 scripts/asn1_compiler.c q++; q 461 scripts/asn1_compiler.c tokens[tix].size = q - p; q 462 scripts/asn1_compiler.c p = q; q 244 scripts/basic/fixdep.c const char *q, *r; q 253 scripts/basic/fixdep.c q = p; q 254 scripts/basic/fixdep.c while (*q && (isalnum(*q) || *q == '_')) q 255 scripts/basic/fixdep.c q++; q 256 scripts/basic/fixdep.c if (str_ends_with(p, q - p, "_MODULE")) q 257 scripts/basic/fixdep.c r = q - 7; q 259 scripts/basic/fixdep.c r = q; q 262 scripts/basic/fixdep.c p = q; q 60 scripts/dtc/data.c char *q; q 65 scripts/dtc/data.c q = d.val; q 72 scripts/dtc/data.c q[d.len++] = c; q 75 scripts/dtc/data.c q[d.len++] = '\0'; q 245 scripts/dtc/libfdt/fdt_ro.c const char *q = memchr(path, '/', end - p); q 247 scripts/dtc/libfdt/fdt_ro.c if (!q) q 248 scripts/dtc/libfdt/fdt_ro.c q = end; q 250 scripts/dtc/libfdt/fdt_ro.c p = fdt_get_alias_namelen(fdt, p, q - p); q 255 scripts/dtc/libfdt/fdt_ro.c p = q; q 259 scripts/dtc/libfdt/fdt_ro.c const char *q; q 266 scripts/dtc/libfdt/fdt_ro.c q = memchr(p, '/', end - p); q 267 scripts/dtc/libfdt/fdt_ro.c if (! q) q 268 scripts/dtc/libfdt/fdt_ro.c q = end; q 270 scripts/dtc/libfdt/fdt_ro.c offset = fdt_subnode_offset_namelen(fdt, offset, p, q-p); q 274 scripts/dtc/libfdt/fdt_ro.c p = q; q 456 scripts/kconfig/preprocess.c const char *q; q 470 scripts/kconfig/preprocess.c q = p; q 471 scripts/kconfig/preprocess.c while (*q) { q 472 scripts/kconfig/preprocess.c if (*q == '(') { q 474 scripts/kconfig/preprocess.c } else if (*q == ')') { q 478 scripts/kconfig/preprocess.c q++; q 481 scripts/kconfig/preprocess.c if (!*q) q 485 scripts/kconfig/preprocess.c *str = q + 1; q 487 scripts/kconfig/preprocess.c return eval_clause(p, q - p, argc, argv); q 562 security/integrity/evm/evm_main.c struct list_head *pos, *q; q 579 security/integrity/evm/evm_main.c list_for_each_safe(pos, q, &evm_config_xattrnames) q 42 security/keys/keyctl_pkey.c char *c = params->info, *p, *q; q 53 security/keys/keyctl_pkey.c q = args[0].from; q 54 security/keys/keyctl_pkey.c if (!q[0]) q 59 security/keys/keyctl_pkey.c params->encoding = q; q 63 security/keys/keyctl_pkey.c params->hash_algo = q; q 2637 security/selinux/hooks.c char *p, *q; q 2641 security/selinux/hooks.c for (p = q = arg; p < from + len; p++) { q 2644 security/selinux/hooks.c *q++ = c; q 2646 security/selinux/hooks.c arg = kmemdup_nul(arg, q - arg, GFP_KERNEL); q 3496 security/selinux/hooks.c struct qstr q; q 3498 security/selinux/hooks.c q.name = kn->name; q 3499 security/selinux/hooks.c q.hash_len = hashlen_string(kn_dir, kn->name); q 3502 security/selinux/hooks.c parent_sid, secclass, &q, q 113 sound/core/misc.c const struct snd_pci_quirk *q; q 115 sound/core/misc.c for (q = list; q->subvendor; q++) { q 116 sound/core/misc.c if (q->subvendor != vendor) q 118 sound/core/misc.c if (!q->subdevice || q 119 sound/core/misc.c (device & q->subdevice_mask) == q->subdevice) q 120 sound/core/misc.c return q; q 524 sound/core/pcm_lib.c unsigned int q; q 527 sound/core/pcm_lib.c q = div32(a, b, &r); q 529 sound/core/pcm_lib.c ++q; q 530 sound/core/pcm_lib.c return q; q 791 sound/core/pcm_lib.c unsigned int q = i->min; q 793 sound/core/pcm_lib.c if (q == 0) q 794 sound/core/pcm_lib.c q = 1; q 795 sound/core/pcm_lib.c den = div_up(num, q); q 806 sound/core/pcm_lib.c diff = num - q * den; q 830 sound/core/pcm_lib.c unsigned int q = i->max; q 832 sound/core/pcm_lib.c if (q == 0) { q 836 sound/core/pcm_lib.c den = div_down(num, q); q 847 sound/core/pcm_lib.c diff = q * den - num; q 907 sound/core/pcm_lib.c unsigned int q = i->min; q 909 sound/core/pcm_lib.c num = mul(q, den); q 920 sound/core/pcm_lib.c diff = num - q * den; q 939 sound/core/pcm_lib.c unsigned int q = i->max; q 941 sound/core/pcm_lib.c num = mul(q, den); q 952 sound/core/pcm_lib.c diff = q * den - num; q 127 sound/core/seq/oss/seq_oss_device.h void snd_seq_oss_readq_info_read(struct seq_oss_readq *q, struct snd_info_buffer *buf); q 22 sound/core/seq/oss/seq_oss_event.c static int extended_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev); q 27 sound/core/seq/oss/seq_oss_event.c static int old_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev); q 42 sound/core/seq/oss/seq_oss_event.c snd_seq_oss_process_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev) q 44 sound/core/seq/oss/seq_oss_event.c switch (q->s.code) { q 46 sound/core/seq/oss/seq_oss_event.c return extended_event(dp, q, ev); q 49 sound/core/seq/oss/seq_oss_event.c return chn_voice_event(dp, q, ev); q 52 sound/core/seq/oss/seq_oss_event.c return chn_common_event(dp, q, ev); q 55 sound/core/seq/oss/seq_oss_event.c return timing_event(dp, q, ev); q 58 sound/core/seq/oss/seq_oss_event.c return local_event(dp, q, ev); q 61 sound/core/seq/oss/seq_oss_event.c return snd_seq_oss_synth_sysex(dp, q->x.dev, q->x.buf, ev); q 69 sound/core/seq/oss/seq_oss_event.c if (snd_seq_oss_midi_open(dp, q->s.dev, SNDRV_SEQ_OSS_FILE_WRITE)) q 71 sound/core/seq/oss/seq_oss_event.c if (snd_seq_oss_midi_filemode(dp, q->s.dev) & SNDRV_SEQ_OSS_FILE_WRITE) q 72 sound/core/seq/oss/seq_oss_event.c return snd_seq_oss_midi_putc(dp, q->s.dev, q->s.parm1, ev); q 78 sound/core/seq/oss/seq_oss_event.c return set_echo_event(dp, q, ev); q 83 sound/core/seq/oss/seq_oss_event.c return snd_seq_oss_synth_raw_event(dp, q->c[1], q->c, ev); q 88 sound/core/seq/oss/seq_oss_event.c return old_event(dp, q, ev); q 95 sound/core/seq/oss/seq_oss_event.c old_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev) q 97 sound/core/seq/oss/seq_oss_event.c switch (q->s.code) { q 99 sound/core/seq/oss/seq_oss_event.c return note_off_event(dp, 0, q->n.chn, q->n.note, q->n.vel, ev); q 102 sound/core/seq/oss/seq_oss_event.c return note_on_event(dp, 0, q->n.chn, q->n.note, q->n.vel, ev); q 110 sound/core/seq/oss/seq_oss_event.c q->n.chn, 0, q->n.note, ev); q 121 sound/core/seq/oss/seq_oss_event.c extended_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev) q 125 sound/core/seq/oss/seq_oss_event.c switch (q->e.cmd) { q 127 sound/core/seq/oss/seq_oss_event.c return note_off_event(dp, q->e.dev, q->e.chn, q->e.p1, q->e.p2, ev); q 130 sound/core/seq/oss/seq_oss_event.c return note_on_event(dp, q->e.dev, q->e.chn, q->e.p1, q->e.p2, ev); q 133 sound/core/seq/oss/seq_oss_event.c return set_control_event(dp, q->e.dev, SNDRV_SEQ_EVENT_PGMCHANGE, q 134 sound/core/seq/oss/seq_oss_event.c q->e.chn, 0, q->e.p1, ev); q 137 sound/core/seq/oss/seq_oss_event.c return set_control_event(dp, q->e.dev, SNDRV_SEQ_EVENT_CHANPRESS, q 138 sound/core/seq/oss/seq_oss_event.c q->e.chn, 0, q->e.p1, ev); q 142 sound/core/seq/oss/seq_oss_event.c val = (char)q->e.p1; q 144 sound/core/seq/oss/seq_oss_event.c return set_control_event(dp, q->e.dev, SNDRV_SEQ_EVENT_CONTROLLER, q 145 sound/core/seq/oss/seq_oss_event.c q->e.chn, CTL_PAN, val, ev); q 148 sound/core/seq/oss/seq_oss_event.c val = ((short)q->e.p3 << 8) | (short)q->e.p2; q 149 sound/core/seq/oss/seq_oss_event.c switch (q->e.p1) { q 152 sound/core/seq/oss/seq_oss_event.c return set_control_event(dp, q->e.dev, q 154 sound/core/seq/oss/seq_oss_event.c q->e.chn, 0, val, ev); q 157 sound/core/seq/oss/seq_oss_event.c return set_control_event(dp, q->e.dev, q 159 sound/core/seq/oss/seq_oss_event.c q->e.chn, 0, val*128/100, ev); q 161 sound/core/seq/oss/seq_oss_event.c return set_control_event(dp, q->e.dev, q 163 sound/core/seq/oss/seq_oss_event.c q->e.chn, q->e.p1, val, ev); q 167 sound/core/seq/oss/seq_oss_event.c return snd_seq_oss_synth_raw_event(dp, q->e.dev, q->c, ev); q 175 sound/core/seq/oss/seq_oss_event.c chn_voice_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev) q 177 sound/core/seq/oss/seq_oss_event.c if (q->v.chn >= 32) q 179 sound/core/seq/oss/seq_oss_event.c switch (q->v.cmd) { q 181 sound/core/seq/oss/seq_oss_event.c return note_on_event(dp, q->v.dev, q->v.chn, q->v.note, q->v.parm, ev); q 184 sound/core/seq/oss/seq_oss_event.c return note_off_event(dp, q->v.dev, q->v.chn, q->v.note, q->v.parm, ev); q 187 sound/core/seq/oss/seq_oss_event.c return set_note_event(dp, q->v.dev, SNDRV_SEQ_EVENT_KEYPRESS, q 188 sound/core/seq/oss/seq_oss_event.c q->v.chn, q->v.note, q->v.parm, ev); q 196 sound/core/seq/oss/seq_oss_event.c chn_common_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev) q 198 sound/core/seq/oss/seq_oss_event.c if (q->l.chn >= 32) q 200 sound/core/seq/oss/seq_oss_event.c switch (q->l.cmd) { q 202 sound/core/seq/oss/seq_oss_event.c return set_control_event(dp, q->l.dev, SNDRV_SEQ_EVENT_PGMCHANGE, q 203 sound/core/seq/oss/seq_oss_event.c q->l.chn, 0, q->l.p1, ev); q 206 sound/core/seq/oss/seq_oss_event.c return set_control_event(dp, q->l.dev, SNDRV_SEQ_EVENT_CONTROLLER, q 207 sound/core/seq/oss/seq_oss_event.c q->l.chn, q->l.p1, q->l.val, ev); q 211 sound/core/seq/oss/seq_oss_event.c return set_control_event(dp, q->l.dev, SNDRV_SEQ_EVENT_PITCHBEND, q 212 sound/core/seq/oss/seq_oss_event.c q->l.chn, 0, q->l.val - 8192, ev); q 215 sound/core/seq/oss/seq_oss_event.c return set_control_event(dp, q->l.dev, SNDRV_SEQ_EVENT_CHANPRESS, q 216 sound/core/seq/oss/seq_oss_event.c q->l.chn, 0, q->l.val, ev); q 223 sound/core/seq/oss/seq_oss_event.c timing_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev) q 225 sound/core/seq/oss/seq_oss_event.c switch (q->t.cmd) { q 228 sound/core/seq/oss/seq_oss_event.c return set_echo_event(dp, q, ev); q 233 sound/core/seq/oss/seq_oss_event.c tmp.echo = (q->t.time << 8) | SEQ_ECHO; q 249 sound/core/seq/oss/seq_oss_event.c return snd_seq_oss_timer_tempo(dp->timer, q->t.time); q 258 sound/core/seq/oss/seq_oss_event.c local_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev) q 94 sound/core/seq/oss/seq_oss_event.h int snd_seq_oss_process_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev); q 95 sound/core/seq/oss/seq_oss_event.h int snd_seq_oss_process_timer_event(struct seq_oss_timer *rec, union evrec *q); q 35 sound/core/seq/oss/seq_oss_readq.c struct seq_oss_readq *q; q 37 sound/core/seq/oss/seq_oss_readq.c q = kzalloc(sizeof(*q), GFP_KERNEL); q 38 sound/core/seq/oss/seq_oss_readq.c if (!q) q 41 sound/core/seq/oss/seq_oss_readq.c q->q = kcalloc(maxlen, sizeof(union evrec), GFP_KERNEL); q 42 sound/core/seq/oss/seq_oss_readq.c if (!q->q) { q 43 sound/core/seq/oss/seq_oss_readq.c kfree(q); q 47 sound/core/seq/oss/seq_oss_readq.c q->maxlen = maxlen; q 48 sound/core/seq/oss/seq_oss_readq.c q->qlen = 0; q 49 sound/core/seq/oss/seq_oss_readq.c q->head = q->tail = 0; q 50 sound/core/seq/oss/seq_oss_readq.c init_waitqueue_head(&q->midi_sleep); q 51 sound/core/seq/oss/seq_oss_readq.c spin_lock_init(&q->lock); q 52 sound/core/seq/oss/seq_oss_readq.c q->pre_event_timeout = SNDRV_SEQ_OSS_MAX_TIMEOUT; q 53 sound/core/seq/oss/seq_oss_readq.c q->input_time = (unsigned long)-1; q 55 sound/core/seq/oss/seq_oss_readq.c return q; q 62 sound/core/seq/oss/seq_oss_readq.c snd_seq_oss_readq_delete(struct seq_oss_readq *q) q 64 sound/core/seq/oss/seq_oss_readq.c if (q) { q 65 sound/core/seq/oss/seq_oss_readq.c kfree(q->q); q 66 sound/core/seq/oss/seq_oss_readq.c kfree(q); q 74 sound/core/seq/oss/seq_oss_readq.c snd_seq_oss_readq_clear(struct seq_oss_readq *q) q 76 sound/core/seq/oss/seq_oss_readq.c if (q->qlen) { q 77 sound/core/seq/oss/seq_oss_readq.c q->qlen = 0; q 78 sound/core/seq/oss/seq_oss_readq.c q->head = q->tail = 0; q 81 sound/core/seq/oss/seq_oss_readq.c wake_up(&q->midi_sleep); q 82 sound/core/seq/oss/seq_oss_readq.c q->input_time = (unsigned long)-1; q 89 sound/core/seq/oss/seq_oss_readq.c snd_seq_oss_readq_puts(struct seq_oss_readq *q, int dev, unsigned char *data, int len) q 100 sound/core/seq/oss/seq_oss_readq.c result = snd_seq_oss_readq_put_event(q, &rec); q 123 sound/core/seq/oss/seq_oss_readq.c int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev, q 127 sound/core/seq/oss/seq_oss_readq.c .readq = q, q 141 sound/core/seq/oss/seq_oss_readq.c snd_seq_oss_readq_put_event(struct seq_oss_readq *q, union evrec *ev) q 145 sound/core/seq/oss/seq_oss_readq.c spin_lock_irqsave(&q->lock, flags); q 146 sound/core/seq/oss/seq_oss_readq.c if (q->qlen >= q->maxlen - 1) { q 147 sound/core/seq/oss/seq_oss_readq.c spin_unlock_irqrestore(&q->lock, flags); q 151 sound/core/seq/oss/seq_oss_readq.c memcpy(&q->q[q->tail], ev, sizeof(*ev)); q 152 sound/core/seq/oss/seq_oss_readq.c q->tail = (q->tail + 1) % q->maxlen; q 153 sound/core/seq/oss/seq_oss_readq.c q->qlen++; q 156 sound/core/seq/oss/seq_oss_readq.c wake_up(&q->midi_sleep); q 158 sound/core/seq/oss/seq_oss_readq.c spin_unlock_irqrestore(&q->lock, flags); q 169 sound/core/seq/oss/seq_oss_readq.c snd_seq_oss_readq_pick(struct seq_oss_readq *q, union evrec *rec) q 171 sound/core/seq/oss/seq_oss_readq.c if (q->qlen == 0) q 173 sound/core/seq/oss/seq_oss_readq.c memcpy(rec, &q->q[q->head], sizeof(*rec)); q 181 sound/core/seq/oss/seq_oss_readq.c snd_seq_oss_readq_wait(struct seq_oss_readq *q) q 183 sound/core/seq/oss/seq_oss_readq.c wait_event_interruptible_timeout(q->midi_sleep, q 184 sound/core/seq/oss/seq_oss_readq.c (q->qlen > 0 || q->head == q->tail), q 185 sound/core/seq/oss/seq_oss_readq.c q->pre_event_timeout); q 193 sound/core/seq/oss/seq_oss_readq.c snd_seq_oss_readq_free(struct seq_oss_readq *q) q 195 sound/core/seq/oss/seq_oss_readq.c if (q->qlen > 0) { q 196 sound/core/seq/oss/seq_oss_readq.c q->head = (q->head + 1) % q->maxlen; q 197 sound/core/seq/oss/seq_oss_readq.c q->qlen--; q 206 sound/core/seq/oss/seq_oss_readq.c snd_seq_oss_readq_poll(struct seq_oss_readq *q, struct file *file, poll_table *wait) q 208 sound/core/seq/oss/seq_oss_readq.c poll_wait(file, &q->midi_sleep, wait); q 209 sound/core/seq/oss/seq_oss_readq.c return q->qlen; q 216 sound/core/seq/oss/seq_oss_readq.c snd_seq_oss_readq_put_timestamp(struct seq_oss_readq *q, unsigned long curt, int seq_mode) q 218 sound/core/seq/oss/seq_oss_readq.c if (curt != q->input_time) { q 224 sound/core/seq/oss/seq_oss_readq.c snd_seq_oss_readq_put_event(q, &rec); q 230 sound/core/seq/oss/seq_oss_readq.c snd_seq_oss_readq_put_event(q, &rec); q 233 sound/core/seq/oss/seq_oss_readq.c q->input_time = curt; q 244 sound/core/seq/oss/seq_oss_readq.c snd_seq_oss_readq_info_read(struct seq_oss_readq *q, struct snd_info_buffer *buf) q 247 sound/core/seq/oss/seq_oss_readq.c (waitqueue_active(&q->midi_sleep) ? "sleeping":"running"), q 248 sound/core/seq/oss/seq_oss_readq.c q->qlen, q->input_time); q 19 sound/core/seq/oss/seq_oss_readq.h union evrec *q; q 30 sound/core/seq/oss/seq_oss_readq.h void snd_seq_oss_readq_delete(struct seq_oss_readq *q); q 34 sound/core/seq/oss/seq_oss_readq.h int snd_seq_oss_readq_sysex(struct seq_oss_readq *q, int dev, q 38 sound/core/seq/oss/seq_oss_readq.h int snd_seq_oss_readq_pick(struct seq_oss_readq *q, union evrec *rec); q 39 sound/core/seq/oss/seq_oss_readq.h void snd_seq_oss_readq_wait(struct seq_oss_readq *q); q 40 sound/core/seq/oss/seq_oss_readq.h void snd_seq_oss_readq_free(struct seq_oss_readq *q); q 42 sound/core/seq/oss/seq_oss_readq.h #define snd_seq_oss_readq_lock(q, flags) spin_lock_irqsave(&(q)->lock, flags) q 43 sound/core/seq/oss/seq_oss_readq.h #define snd_seq_oss_readq_unlock(q, flags) spin_unlock_irqrestore(&(q)->lock, flags) q 27 sound/core/seq/oss/seq_oss_writeq.c struct seq_oss_writeq *q; q 30 sound/core/seq/oss/seq_oss_writeq.c if ((q = kzalloc(sizeof(*q), GFP_KERNEL)) == NULL) q 32 sound/core/seq/oss/seq_oss_writeq.c q->dp = dp; q 33 sound/core/seq/oss/seq_oss_writeq.c q->maxlen = maxlen; q 34 sound/core/seq/oss/seq_oss_writeq.c spin_lock_init(&q->sync_lock); q 35 sound/core/seq/oss/seq_oss_writeq.c q->sync_event_put = 0; q 36 sound/core/seq/oss/seq_oss_writeq.c q->sync_time = 0; q 37 sound/core/seq/oss/seq_oss_writeq.c init_waitqueue_head(&q->sync_sleep); q 46 sound/core/seq/oss/seq_oss_writeq.c return q; q 53 sound/core/seq/oss/seq_oss_writeq.c snd_seq_oss_writeq_delete(struct seq_oss_writeq *q) q 55 sound/core/seq/oss/seq_oss_writeq.c if (q) { q 56 sound/core/seq/oss/seq_oss_writeq.c snd_seq_oss_writeq_clear(q); /* to be sure */ q 57 sound/core/seq/oss/seq_oss_writeq.c kfree(q); q 66 sound/core/seq/oss/seq_oss_writeq.c snd_seq_oss_writeq_clear(struct seq_oss_writeq *q) q 72 sound/core/seq/oss/seq_oss_writeq.c snd_seq_oss_control(q->dp, SNDRV_SEQ_IOCTL_REMOVE_EVENTS, &reset); q 75 sound/core/seq/oss/seq_oss_writeq.c snd_seq_oss_writeq_wakeup(q, 0); q 82 sound/core/seq/oss/seq_oss_writeq.c snd_seq_oss_writeq_sync(struct seq_oss_writeq *q) q 84 sound/core/seq/oss/seq_oss_writeq.c struct seq_oss_devinfo *dp = q->dp; q 88 sound/core/seq/oss/seq_oss_writeq.c if (q->sync_time >= time) q 91 sound/core/seq/oss/seq_oss_writeq.c if (! q->sync_event_put) { q 105 sound/core/seq/oss/seq_oss_writeq.c q->sync_event_put = 1; q 109 sound/core/seq/oss/seq_oss_writeq.c wait_event_interruptible_timeout(q->sync_sleep, ! q->sync_event_put, HZ); q 112 sound/core/seq/oss/seq_oss_writeq.c q->sync_event_put = 0; q 113 sound/core/seq/oss/seq_oss_writeq.c if (! q->sync_event_put || q->sync_time >= time) q 122 sound/core/seq/oss/seq_oss_writeq.c snd_seq_oss_writeq_wakeup(struct seq_oss_writeq *q, abstime_t time) q 126 sound/core/seq/oss/seq_oss_writeq.c spin_lock_irqsave(&q->sync_lock, flags); q 127 sound/core/seq/oss/seq_oss_writeq.c q->sync_time = time; q 128 sound/core/seq/oss/seq_oss_writeq.c q->sync_event_put = 0; q 129 sound/core/seq/oss/seq_oss_writeq.c wake_up(&q->sync_sleep); q 130 sound/core/seq/oss/seq_oss_writeq.c spin_unlock_irqrestore(&q->sync_lock, flags); q 138 sound/core/seq/oss/seq_oss_writeq.c snd_seq_oss_writeq_get_free_size(struct seq_oss_writeq *q) q 141 sound/core/seq/oss/seq_oss_writeq.c pool.client = q->dp->cseq; q 142 sound/core/seq/oss/seq_oss_writeq.c snd_seq_oss_control(q->dp, SNDRV_SEQ_IOCTL_GET_CLIENT_POOL, &pool); q 151 sound/core/seq/oss/seq_oss_writeq.c snd_seq_oss_writeq_set_output(struct seq_oss_writeq *q, int val) q 154 sound/core/seq/oss/seq_oss_writeq.c pool.client = q->dp->cseq; q 155 sound/core/seq/oss/seq_oss_writeq.c snd_seq_oss_control(q->dp, SNDRV_SEQ_IOCTL_GET_CLIENT_POOL, &pool); q 157 sound/core/seq/oss/seq_oss_writeq.c snd_seq_oss_control(q->dp, SNDRV_SEQ_IOCTL_SET_CLIENT_POOL, &pool); q 29 sound/core/seq/oss/seq_oss_writeq.h void snd_seq_oss_writeq_delete(struct seq_oss_writeq *q); q 30 sound/core/seq/oss/seq_oss_writeq.h void snd_seq_oss_writeq_clear(struct seq_oss_writeq *q); q 31 sound/core/seq/oss/seq_oss_writeq.h int snd_seq_oss_writeq_sync(struct seq_oss_writeq *q); q 32 sound/core/seq/oss/seq_oss_writeq.h void snd_seq_oss_writeq_wakeup(struct seq_oss_writeq *q, abstime_t time); q 33 sound/core/seq/oss/seq_oss_writeq.h int snd_seq_oss_writeq_get_free_size(struct seq_oss_writeq *q); q 34 sound/core/seq/oss/seq_oss_writeq.h void snd_seq_oss_writeq_set_output(struct seq_oss_writeq *q, int size); q 575 sound/core/seq/seq_clientmgr.c struct snd_seq_queue *q; q 577 sound/core/seq/seq_clientmgr.c q = queueptr(queue); q 578 sound/core/seq/seq_clientmgr.c if (! q) q 583 sound/core/seq/seq_clientmgr.c event->time.time = snd_seq_timer_get_cur_time(q->timer, true); q 586 sound/core/seq/seq_clientmgr.c event->time.tick = snd_seq_timer_get_cur_tick(q->timer); q 589 sound/core/seq/seq_clientmgr.c queuefree(q); q 1546 sound/core/seq/seq_clientmgr.c struct snd_seq_queue *q; q 1548 sound/core/seq/seq_clientmgr.c q = snd_seq_queue_alloc(client->number, info->locked, info->flags); q 1549 sound/core/seq/seq_clientmgr.c if (IS_ERR(q)) q 1550 sound/core/seq/seq_clientmgr.c return PTR_ERR(q); q 1552 sound/core/seq/seq_clientmgr.c info->queue = q->queue; q 1553 sound/core/seq/seq_clientmgr.c info->locked = q->locked; q 1554 sound/core/seq/seq_clientmgr.c info->owner = q->owner; q 1558 sound/core/seq/seq_clientmgr.c snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue); q 1559 sound/core/seq/seq_clientmgr.c strscpy(q->name, info->name, sizeof(q->name)); q 1560 sound/core/seq/seq_clientmgr.c snd_use_lock_free(&q->use_lock); q 1578 sound/core/seq/seq_clientmgr.c struct snd_seq_queue *q; q 1580 sound/core/seq/seq_clientmgr.c q = queueptr(info->queue); q 1581 sound/core/seq/seq_clientmgr.c if (q == NULL) q 1585 sound/core/seq/seq_clientmgr.c info->queue = q->queue; q 1586 sound/core/seq/seq_clientmgr.c info->owner = q->owner; q 1587 sound/core/seq/seq_clientmgr.c info->locked = q->locked; q 1588 sound/core/seq/seq_clientmgr.c strlcpy(info->name, q->name, sizeof(info->name)); q 1589 sound/core/seq/seq_clientmgr.c queuefree(q); q 1599 sound/core/seq/seq_clientmgr.c struct snd_seq_queue *q; q 1614 sound/core/seq/seq_clientmgr.c q = queueptr(info->queue); q 1615 sound/core/seq/seq_clientmgr.c if (! q) q 1617 sound/core/seq/seq_clientmgr.c if (q->owner != client->number) { q 1618 sound/core/seq/seq_clientmgr.c queuefree(q); q 1621 sound/core/seq/seq_clientmgr.c strscpy(q->name, info->name, sizeof(q->name)); q 1622 sound/core/seq/seq_clientmgr.c queuefree(q); q 1632 sound/core/seq/seq_clientmgr.c struct snd_seq_queue *q; q 1634 sound/core/seq/seq_clientmgr.c q = snd_seq_queue_find_name(info->name); q 1635 sound/core/seq/seq_clientmgr.c if (q == NULL) q 1637 sound/core/seq/seq_clientmgr.c info->queue = q->queue; q 1638 sound/core/seq/seq_clientmgr.c info->owner = q->owner; q 1639 sound/core/seq/seq_clientmgr.c info->locked = q->locked; q 1640 sound/core/seq/seq_clientmgr.c queuefree(q); q 1760 sound/core/seq/seq_clientmgr.c struct snd_seq_queue *q; q 1763 sound/core/seq/seq_clientmgr.c q = queueptr(timer->queue); q 1764 sound/core/seq/seq_clientmgr.c if (q == NULL) q 1766 sound/core/seq/seq_clientmgr.c mutex_lock(&q->timer_mutex); q 1767 sound/core/seq/seq_clientmgr.c tmr = q->timer; q 1775 sound/core/seq/seq_clientmgr.c mutex_unlock(&q->timer_mutex); q 1776 sound/core/seq/seq_clientmgr.c queuefree(q); q 50 sound/core/seq/seq_queue.c static int queue_list_add(struct snd_seq_queue *q) q 58 sound/core/seq/seq_queue.c queue_list[i] = q; q 59 sound/core/seq/seq_queue.c q->queue = i; q 71 sound/core/seq/seq_queue.c struct snd_seq_queue *q; q 75 sound/core/seq/seq_queue.c q = queue_list[id]; q 76 sound/core/seq/seq_queue.c if (q) { q 77 sound/core/seq/seq_queue.c spin_lock(&q->owner_lock); q 78 sound/core/seq/seq_queue.c if (q->owner == client) { q 80 sound/core/seq/seq_queue.c q->klocked = 1; q 81 sound/core/seq/seq_queue.c spin_unlock(&q->owner_lock); q 85 sound/core/seq/seq_queue.c return q; q 87 sound/core/seq/seq_queue.c spin_unlock(&q->owner_lock); q 98 sound/core/seq/seq_queue.c struct snd_seq_queue *q; q 100 sound/core/seq/seq_queue.c q = kzalloc(sizeof(*q), GFP_KERNEL); q 101 sound/core/seq/seq_queue.c if (!q) q 104 sound/core/seq/seq_queue.c spin_lock_init(&q->owner_lock); q 105 sound/core/seq/seq_queue.c spin_lock_init(&q->check_lock); q 106 sound/core/seq/seq_queue.c mutex_init(&q->timer_mutex); q 107 sound/core/seq/seq_queue.c snd_use_lock_init(&q->use_lock); q 108 sound/core/seq/seq_queue.c q->queue = -1; q 110 sound/core/seq/seq_queue.c q->tickq = snd_seq_prioq_new(); q 111 sound/core/seq/seq_queue.c q->timeq = snd_seq_prioq_new(); q 112 sound/core/seq/seq_queue.c q->timer = snd_seq_timer_new(); q 113 sound/core/seq/seq_queue.c if (q->tickq == NULL || q->timeq == NULL || q->timer == NULL) { q 114 sound/core/seq/seq_queue.c snd_seq_prioq_delete(&q->tickq); q 115 sound/core/seq/seq_queue.c snd_seq_prioq_delete(&q->timeq); q 116 sound/core/seq/seq_queue.c snd_seq_timer_delete(&q->timer); q 117 sound/core/seq/seq_queue.c kfree(q); q 121 sound/core/seq/seq_queue.c q->owner = owner; q 122 sound/core/seq/seq_queue.c q->locked = locked; q 123 sound/core/seq/seq_queue.c q->klocked = 0; q 125 sound/core/seq/seq_queue.c return q; q 129 sound/core/seq/seq_queue.c static void queue_delete(struct snd_seq_queue *q) q 132 sound/core/seq/seq_queue.c mutex_lock(&q->timer_mutex); q 133 sound/core/seq/seq_queue.c snd_seq_timer_stop(q->timer); q 134 sound/core/seq/seq_queue.c snd_seq_timer_close(q); q 135 sound/core/seq/seq_queue.c mutex_unlock(&q->timer_mutex); q 137 sound/core/seq/seq_queue.c snd_use_lock_sync(&q->use_lock); q 139 sound/core/seq/seq_queue.c snd_seq_prioq_delete(&q->tickq); q 140 sound/core/seq/seq_queue.c snd_seq_prioq_delete(&q->timeq); q 141 sound/core/seq/seq_queue.c snd_seq_timer_delete(&q->timer); q 143 sound/core/seq/seq_queue.c kfree(q); q 170 sound/core/seq/seq_queue.c struct snd_seq_queue *q; q 172 sound/core/seq/seq_queue.c q = queue_new(client, locked); q 173 sound/core/seq/seq_queue.c if (q == NULL) q 175 sound/core/seq/seq_queue.c q->info_flags = info_flags; q 176 sound/core/seq/seq_queue.c queue_use(q, client, 1); q 177 sound/core/seq/seq_queue.c snd_use_lock_use(&q->use_lock); q 178 sound/core/seq/seq_queue.c if (queue_list_add(q) < 0) { q 179 sound/core/seq/seq_queue.c snd_use_lock_free(&q->use_lock); q 180 sound/core/seq/seq_queue.c queue_delete(q); q 183 sound/core/seq/seq_queue.c return q; q 189 sound/core/seq/seq_queue.c struct snd_seq_queue *q; q 193 sound/core/seq/seq_queue.c q = queue_list_remove(queueid, client); q 194 sound/core/seq/seq_queue.c if (q == NULL) q 196 sound/core/seq/seq_queue.c queue_delete(q); q 205 sound/core/seq/seq_queue.c struct snd_seq_queue *q; q 211 sound/core/seq/seq_queue.c q = queue_list[queueid]; q 212 sound/core/seq/seq_queue.c if (q) q 213 sound/core/seq/seq_queue.c snd_use_lock_use(&q->use_lock); q 215 sound/core/seq/seq_queue.c return q; q 222 sound/core/seq/seq_queue.c struct snd_seq_queue *q; q 225 sound/core/seq/seq_queue.c if ((q = queueptr(i)) != NULL) { q 226 sound/core/seq/seq_queue.c if (strncmp(q->name, name, sizeof(q->name)) == 0) q 227 sound/core/seq/seq_queue.c return q; q 228 sound/core/seq/seq_queue.c queuefree(q); q 237 sound/core/seq/seq_queue.c void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop) q 244 sound/core/seq/seq_queue.c if (q == NULL) q 248 sound/core/seq/seq_queue.c spin_lock_irqsave(&q->check_lock, flags); q 249 sound/core/seq/seq_queue.c if (q->check_blocked) { q 250 sound/core/seq/seq_queue.c q->check_again = 1; q 251 sound/core/seq/seq_queue.c spin_unlock_irqrestore(&q->check_lock, flags); q 254 sound/core/seq/seq_queue.c q->check_blocked = 1; q 255 sound/core/seq/seq_queue.c spin_unlock_irqrestore(&q->check_lock, flags); q 259 sound/core/seq/seq_queue.c cur_tick = snd_seq_timer_get_cur_tick(q->timer); q 261 sound/core/seq/seq_queue.c cell = snd_seq_prioq_cell_out(q->tickq, &cur_tick); q 268 sound/core/seq/seq_queue.c cur_time = snd_seq_timer_get_cur_time(q->timer, false); q 270 sound/core/seq/seq_queue.c cell = snd_seq_prioq_cell_out(q->timeq, &cur_time); q 277 sound/core/seq/seq_queue.c spin_lock_irqsave(&q->check_lock, flags); q 278 sound/core/seq/seq_queue.c if (q->check_again) { q 279 sound/core/seq/seq_queue.c q->check_again = 0; q 280 sound/core/seq/seq_queue.c spin_unlock_irqrestore(&q->check_lock, flags); q 283 sound/core/seq/seq_queue.c q->check_blocked = 0; q 284 sound/core/seq/seq_queue.c spin_unlock_irqrestore(&q->check_lock, flags); q 292 sound/core/seq/seq_queue.c struct snd_seq_queue *q; q 297 sound/core/seq/seq_queue.c q = queueptr(dest); q 298 sound/core/seq/seq_queue.c if (q == NULL) q 304 sound/core/seq/seq_queue.c cell->event.time.tick += q->timer->tick.cur_tick; q 309 sound/core/seq/seq_queue.c &q->timer->cur_time); q 318 sound/core/seq/seq_queue.c err = snd_seq_prioq_cell_in(q->tickq, cell); q 323 sound/core/seq/seq_queue.c err = snd_seq_prioq_cell_in(q->timeq, cell); q 328 sound/core/seq/seq_queue.c queuefree(q); /* unlock */ q 333 sound/core/seq/seq_queue.c snd_seq_check_queue(q, atomic, hop); q 335 sound/core/seq/seq_queue.c queuefree(q); /* unlock */ q 343 sound/core/seq/seq_queue.c static inline int check_access(struct snd_seq_queue *q, int client) q 345 sound/core/seq/seq_queue.c return (q->owner == client) || (!q->locked && !q->klocked); q 351 sound/core/seq/seq_queue.c static int queue_access_lock(struct snd_seq_queue *q, int client) q 356 sound/core/seq/seq_queue.c spin_lock_irqsave(&q->owner_lock, flags); q 357 sound/core/seq/seq_queue.c access_ok = check_access(q, client); q 359 sound/core/seq/seq_queue.c q->klocked = 1; q 360 sound/core/seq/seq_queue.c spin_unlock_irqrestore(&q->owner_lock, flags); q 365 sound/core/seq/seq_queue.c static inline void queue_access_unlock(struct snd_seq_queue *q) q 369 sound/core/seq/seq_queue.c spin_lock_irqsave(&q->owner_lock, flags); q 370 sound/core/seq/seq_queue.c q->klocked = 0; q 371 sound/core/seq/seq_queue.c spin_unlock_irqrestore(&q->owner_lock, flags); q 377 sound/core/seq/seq_queue.c struct snd_seq_queue *q = queueptr(queueid); q 381 sound/core/seq/seq_queue.c if (! q) q 383 sound/core/seq/seq_queue.c spin_lock_irqsave(&q->owner_lock, flags); q 384 sound/core/seq/seq_queue.c access_ok = check_access(q, client); q 385 sound/core/seq/seq_queue.c spin_unlock_irqrestore(&q->owner_lock, flags); q 386 sound/core/seq/seq_queue.c queuefree(q); q 397 sound/core/seq/seq_queue.c struct snd_seq_queue *q = queueptr(queueid); q 400 sound/core/seq/seq_queue.c if (q == NULL) q 403 sound/core/seq/seq_queue.c if (! queue_access_lock(q, client)) { q 404 sound/core/seq/seq_queue.c queuefree(q); q 408 sound/core/seq/seq_queue.c spin_lock_irqsave(&q->owner_lock, flags); q 409 sound/core/seq/seq_queue.c q->locked = locked ? 1 : 0; q 410 sound/core/seq/seq_queue.c q->owner = client; q 411 sound/core/seq/seq_queue.c spin_unlock_irqrestore(&q->owner_lock, flags); q 412 sound/core/seq/seq_queue.c queue_access_unlock(q); q 413 sound/core/seq/seq_queue.c queuefree(q); q 463 sound/core/seq/seq_queue.c struct snd_seq_queue *q = queueptr(queueid); q 466 sound/core/seq/seq_queue.c if (q == NULL) q 468 sound/core/seq/seq_queue.c if (! queue_access_lock(q, client)) { q 469 sound/core/seq/seq_queue.c queuefree(q); q 473 sound/core/seq/seq_queue.c result = snd_seq_timer_set_tempo_ppq(q->timer, info->tempo, info->ppq); q 475 sound/core/seq/seq_queue.c result = snd_seq_timer_set_skew(q->timer, info->skew_value, q 477 sound/core/seq/seq_queue.c queue_access_unlock(q); q 478 sound/core/seq/seq_queue.c queuefree(q); q 526 sound/core/seq/seq_queue.c struct snd_seq_queue *q; q 529 sound/core/seq/seq_queue.c q = queueptr(queueid); q 530 sound/core/seq/seq_queue.c if (q == NULL) q 532 sound/core/seq/seq_queue.c result = test_bit(client, q->clients_bitmap) ? 1 : 0; q 533 sound/core/seq/seq_queue.c queuefree(q); q 547 sound/core/seq/seq_queue.c struct snd_seq_queue *q; q 551 sound/core/seq/seq_queue.c if ((q = queueptr(i)) == NULL) q 553 sound/core/seq/seq_queue.c spin_lock_irqsave(&q->owner_lock, flags); q 554 sound/core/seq/seq_queue.c matched = (q->owner == client); q 556 sound/core/seq/seq_queue.c q->klocked = 1; q 557 sound/core/seq/seq_queue.c spin_unlock_irqrestore(&q->owner_lock, flags); q 559 sound/core/seq/seq_queue.c if (q->timer->running) q 560 sound/core/seq/seq_queue.c snd_seq_timer_stop(q->timer); q 561 sound/core/seq/seq_queue.c snd_seq_timer_reset(q->timer); q 563 sound/core/seq/seq_queue.c queuefree(q); q 574 sound/core/seq/seq_queue.c struct snd_seq_queue *q; q 578 sound/core/seq/seq_queue.c if ((q = queue_list_remove(i, client)) != NULL) q 579 sound/core/seq/seq_queue.c queue_delete(q); q 586 sound/core/seq/seq_queue.c if ((q = queueptr(i)) == NULL) q 588 sound/core/seq/seq_queue.c if (test_bit(client, q->clients_bitmap)) { q 589 sound/core/seq/seq_queue.c snd_seq_prioq_leave(q->tickq, client, 0); q 590 sound/core/seq/seq_queue.c snd_seq_prioq_leave(q->timeq, client, 0); q 591 sound/core/seq/seq_queue.c snd_seq_queue_use(q->queue, client, 0); q 593 sound/core/seq/seq_queue.c queuefree(q); q 605 sound/core/seq/seq_queue.c struct snd_seq_queue *q; q 608 sound/core/seq/seq_queue.c if ((q = queueptr(i)) == NULL) q 610 sound/core/seq/seq_queue.c snd_seq_prioq_leave(q->tickq, client, 0); q 611 sound/core/seq/seq_queue.c snd_seq_prioq_leave(q->timeq, client, 0); q 612 sound/core/seq/seq_queue.c queuefree(q); q 620 sound/core/seq/seq_queue.c struct snd_seq_queue *q; q 623 sound/core/seq/seq_queue.c if ((q = queueptr(i)) == NULL) q 625 sound/core/seq/seq_queue.c if (test_bit(client, q->clients_bitmap) && q 627 sound/core/seq/seq_queue.c q->queue == info->queue)) { q 628 sound/core/seq/seq_queue.c snd_seq_prioq_remove_events(q->tickq, client, info); q 629 sound/core/seq/seq_queue.c snd_seq_prioq_remove_events(q->timeq, client, info); q 631 sound/core/seq/seq_queue.c queuefree(q); q 640 sound/core/seq/seq_queue.c static void queue_broadcast_event(struct snd_seq_queue *q, struct snd_seq_event *ev, q 648 sound/core/seq/seq_queue.c sev.time.tick = q->timer->tick.cur_tick; q 649 sound/core/seq/seq_queue.c sev.queue = q->queue; q 650 sound/core/seq/seq_queue.c sev.data.queue.queue = q->queue; q 663 sound/core/seq/seq_queue.c static void snd_seq_queue_process_event(struct snd_seq_queue *q, q 669 sound/core/seq/seq_queue.c snd_seq_prioq_leave(q->tickq, ev->source.client, 1); q 670 sound/core/seq/seq_queue.c snd_seq_prioq_leave(q->timeq, ev->source.client, 1); q 671 sound/core/seq/seq_queue.c if (! snd_seq_timer_start(q->timer)) q 672 sound/core/seq/seq_queue.c queue_broadcast_event(q, ev, atomic, hop); q 676 sound/core/seq/seq_queue.c if (! snd_seq_timer_continue(q->timer)) q 677 sound/core/seq/seq_queue.c queue_broadcast_event(q, ev, atomic, hop); q 681 sound/core/seq/seq_queue.c snd_seq_timer_stop(q->timer); q 682 sound/core/seq/seq_queue.c queue_broadcast_event(q, ev, atomic, hop); q 686 sound/core/seq/seq_queue.c snd_seq_timer_set_tempo(q->timer, ev->data.queue.param.value); q 687 sound/core/seq/seq_queue.c queue_broadcast_event(q, ev, atomic, hop); q 691 sound/core/seq/seq_queue.c if (snd_seq_timer_set_position_tick(q->timer, ev->data.queue.param.time.tick) == 0) { q 692 sound/core/seq/seq_queue.c queue_broadcast_event(q, ev, atomic, hop); q 697 sound/core/seq/seq_queue.c if (snd_seq_timer_set_position_time(q->timer, ev->data.queue.param.time.time) == 0) { q 698 sound/core/seq/seq_queue.c queue_broadcast_event(q, ev, atomic, hop); q 702 sound/core/seq/seq_queue.c if (snd_seq_timer_set_skew(q->timer, q 705 sound/core/seq/seq_queue.c queue_broadcast_event(q, ev, atomic, hop); q 718 sound/core/seq/seq_queue.c struct snd_seq_queue *q; q 722 sound/core/seq/seq_queue.c q = queueptr(ev->data.queue.queue); q 724 sound/core/seq/seq_queue.c if (q == NULL) q 727 sound/core/seq/seq_queue.c if (! queue_access_lock(q, ev->source.client)) { q 728 sound/core/seq/seq_queue.c queuefree(q); q 732 sound/core/seq/seq_queue.c snd_seq_queue_process_event(q, ev, atomic, hop); q 734 sound/core/seq/seq_queue.c queue_access_unlock(q); q 735 sound/core/seq/seq_queue.c queuefree(q); q 748 sound/core/seq/seq_queue.c struct snd_seq_queue *q; q 754 sound/core/seq/seq_queue.c if ((q = queueptr(i)) == NULL) q 757 sound/core/seq/seq_queue.c tmr = q->timer; q 763 sound/core/seq/seq_queue.c spin_lock_irq(&q->owner_lock); q 764 sound/core/seq/seq_queue.c locked = q->locked; q 765 sound/core/seq/seq_queue.c owner = q->owner; q 766 sound/core/seq/seq_queue.c spin_unlock_irq(&q->owner_lock); q 768 sound/core/seq/seq_queue.c snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name); q 771 sound/core/seq/seq_queue.c snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq)); q 772 sound/core/seq/seq_queue.c snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq)); q 780 sound/core/seq/seq_queue.c queuefree(q); q 78 sound/core/seq/seq_queue.h #define queuefree(q) snd_use_lock_free(&(q)->use_lock) q 84 sound/core/seq/seq_queue.h void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop); q 125 sound/core/seq/seq_timer.c struct snd_seq_queue *q = timeri->callback_data; q 128 sound/core/seq/seq_timer.c if (q == NULL) q 130 sound/core/seq/seq_timer.c tmr = q->timer; q 158 sound/core/seq/seq_timer.c snd_seq_check_queue(q, 1, 0); q 258 sound/core/seq/seq_timer.c int snd_seq_timer_open(struct snd_seq_queue *q) q 265 sound/core/seq/seq_timer.c tmr = q->timer; q 270 sound/core/seq/seq_timer.c sprintf(str, "sequencer queue %i", q->queue); q 275 sound/core/seq/seq_timer.c err = snd_timer_open(&t, str, &tmr->alsa_id, q->queue); q 285 sound/core/seq/seq_timer.c err = snd_timer_open(&t, str, &tid, q->queue); q 293 sound/core/seq/seq_timer.c t->callback_data = q; q 301 sound/core/seq/seq_timer.c int snd_seq_timer_close(struct snd_seq_queue *q) q 306 sound/core/seq/seq_timer.c tmr = q->timer; q 466 sound/core/seq/seq_timer.c struct snd_seq_queue *q; q 472 sound/core/seq/seq_timer.c q = queueptr(idx); q 473 sound/core/seq/seq_timer.c if (q == NULL) q 475 sound/core/seq/seq_timer.c mutex_lock(&q->timer_mutex); q 476 sound/core/seq/seq_timer.c tmr = q->timer; q 482 sound/core/seq/seq_timer.c snd_iprintf(buffer, "Timer for queue %i : %s\n", q->queue, ti->timer->name); q 487 sound/core/seq/seq_timer.c mutex_unlock(&q->timer_mutex); q 488 sound/core/seq/seq_timer.c queuefree(q); q 109 sound/core/seq/seq_timer.h int snd_seq_timer_open(struct snd_seq_queue *q); q 110 sound/core/seq/seq_timer.h int snd_seq_timer_close(struct snd_seq_queue *q); q 111 sound/core/seq/seq_timer.h int snd_seq_timer_midi_open(struct snd_seq_queue *q); q 112 sound/core/seq/seq_timer.h int snd_seq_timer_midi_close(struct snd_seq_queue *q); q 2859 sound/pci/ac97/ac97_codec.c struct quirk_table *q; q 2862 sound/pci/ac97/ac97_codec.c q = &applicable_quirks[i]; q 2863 sound/pci/ac97/ac97_codec.c if (q->name && ! strcmp(typestr, q->name)) q 552 sound/pci/atiixp.c const struct snd_pci_quirk *q; q 554 sound/pci/atiixp.c q = snd_pci_quirk_lookup(pci, atiixp_quirks); q 555 sound/pci/atiixp.c if (q) { q 557 sound/pci/atiixp.c snd_pci_quirk_name(q), q->value); q 558 sound/pci/atiixp.c return q->value; q 169 sound/pci/emu10k1/memory.c struct snd_emu10k1_memblk *q; q 173 sound/pci/emu10k1/memory.c q = get_emu10k1_memblk(p, mapped_link); q 174 sound/pci/emu10k1/memory.c start_page = q->mapped_page + q->pages; q 178 sound/pci/emu10k1/memory.c q = get_emu10k1_memblk(p, mapped_link); q 179 sound/pci/emu10k1/memory.c end_page = q->mapped_page; q 454 sound/pci/emu10k1/memory.c struct snd_emu10k1_memblk *q; q 458 sound/pci/emu10k1/memory.c q = get_emu10k1_memblk(p, mem.list); q 459 sound/pci/emu10k1/memory.c if (q->last_page == first_page) q 464 sound/pci/emu10k1/memory.c q = get_emu10k1_memblk(p, mem.list); q 465 sound/pci/emu10k1/memory.c if (q->first_page == last_page) q 976 sound/pci/hda/hda_auto_parser.c const struct snd_pci_quirk *q; q 1007 sound/pci/hda/hda_auto_parser.c q = snd_pci_quirk_lookup(codec->bus->pci, quirk); q 1008 sound/pci/hda/hda_auto_parser.c if (q) { q 1009 sound/pci/hda/hda_auto_parser.c id = q->value; q 1011 sound/pci/hda/hda_auto_parser.c name = q->name; q 1013 sound/pci/hda/hda_auto_parser.c codec->core.chip_name, name, q->subdevice_mask ? "" : " - vendor generic"); q 1018 sound/pci/hda/hda_auto_parser.c for (q = quirk; q->subvendor || q->subdevice; q++) { q 1020 sound/pci/hda/hda_auto_parser.c q->subdevice | (q->subvendor << 16); q 1021 sound/pci/hda/hda_auto_parser.c unsigned int mask = 0xffff0000 | q->subdevice_mask; q 1023 sound/pci/hda/hda_auto_parser.c id = q->value; q 1025 sound/pci/hda/hda_auto_parser.c name = q->name; q 1120 sound/pci/hda/hda_codec.c struct hda_cvt_setup *q); q 1155 sound/pci/hda/hda_codec.c struct hda_cvt_setup *q) q 1157 sound/pci/hda/hda_codec.c hda_nid_t nid = q->nid; q 1158 sound/pci/hda/hda_codec.c if (q->stream_tag || q->channel_id) q 1160 sound/pci/hda/hda_codec.c if (q->format_id) q 1163 sound/pci/hda/hda_codec.c memset(q, 0, sizeof(*q)); q 1164 sound/pci/hda/hda_codec.c q->nid = nid; q 1474 sound/pci/hda/hda_intel.c const struct snd_pci_quirk *q; q 1487 sound/pci/hda/hda_intel.c q = snd_pci_quirk_lookup(chip->pci, position_fix_list); q 1488 sound/pci/hda/hda_intel.c if (q) { q 1491 sound/pci/hda/hda_intel.c q->value, q->subvendor, q->subdevice); q 1492 sound/pci/hda/hda_intel.c return q->value; q 1570 sound/pci/hda/hda_intel.c const struct snd_pci_quirk *q; q 1574 sound/pci/hda/hda_intel.c q = snd_pci_quirk_lookup(chip->pci, probe_mask_list); q 1575 sound/pci/hda/hda_intel.c if (q) { q 1578 sound/pci/hda/hda_intel.c q->value, q->subvendor, q->subdevice); q 1579 sound/pci/hda/hda_intel.c chip->codec_probe_mask = q->value; q 1611 sound/pci/hda/hda_intel.c const struct snd_pci_quirk *q; q 1618 sound/pci/hda/hda_intel.c q = snd_pci_quirk_lookup(chip->pci, msi_black_list); q 1619 sound/pci/hda/hda_intel.c if (q) { q 1622 sound/pci/hda/hda_intel.c q->subvendor, q->subdevice, q->value); q 1623 sound/pci/hda/hda_intel.c chip->msi = q->value; q 2202 sound/pci/hda/hda_intel.c const struct snd_pci_quirk *q; q 2204 sound/pci/hda/hda_intel.c q = snd_pci_quirk_lookup(chip->pci, power_save_blacklist); q 2205 sound/pci/hda/hda_intel.c if (q && val) { q 2207 sound/pci/hda/hda_intel.c q->subvendor, q->subdevice); q 1017 sound/pci/hda/patch_realtek.c const struct alc_codec_rename_pci_table *q; q 1028 sound/pci/hda/patch_realtek.c for (q = rename_pci_tbl; q->codec_vendor_id; q++) { q 1029 sound/pci/hda/patch_realtek.c if (q->codec_vendor_id != codec->core.vendor_id) q 1031 sound/pci/hda/patch_realtek.c if (q->pci_subvendor != codec->bus->pci->subsystem_vendor) q 1033 sound/pci/hda/patch_realtek.c if (!q->pci_subdevice || q 1034 sound/pci/hda/patch_realtek.c q->pci_subdevice == codec->bus->pci->subsystem_device) q 1035 sound/pci/hda/patch_realtek.c return alc_codec_rename(codec, q->name); q 1090 sound/pci/hda/patch_realtek.c const struct snd_pci_quirk *q; q 1091 sound/pci/hda/patch_realtek.c q = snd_pci_quirk_lookup(codec->bus->pci, beep_white_list); q 1092 sound/pci/hda/patch_realtek.c if (q) q 1093 sound/pci/hda/patch_realtek.c return q->value; q 1654 sound/pci/nm256/nm256.c const struct snd_pci_quirk *q; q 1656 sound/pci/nm256/nm256.c q = snd_pci_quirk_lookup(pci, nm256_quirks); q 1657 sound/pci/nm256/nm256.c if (q) { q 1659 sound/pci/nm256/nm256.c snd_pci_quirk_name(q)); q 1660 sound/pci/nm256/nm256.c switch (q->value) { q 218 sound/sh/aica.c int q, err, period_offset; q 227 sound/sh/aica.c for (q = 0; q < channels; q++) { q 231 sound/sh/aica.c (AICA_BUFFER_SIZE * q) / q 235 sound/sh/aica.c AICA_CHANNEL0_OFFSET + q * CHANNEL_OFFSET + q 34 sound/soc/codecs/rt5645.c #define QUIRK_INV_JD1_1(q) ((q) & 1) q 35 sound/soc/codecs/rt5645.c #define QUIRK_LEVEL_IRQ(q) (((q) >> 1) & 1) q 36 sound/soc/codecs/rt5645.c #define QUIRK_IN2_DIFF(q) (((q) >> 2) & 1) q 37 sound/soc/codecs/rt5645.c #define QUIRK_JD_MODE(q) (((q) >> 4) & 7) q 38 sound/soc/codecs/rt5645.c #define QUIRK_DMIC1_DATA_PIN(q) (((q) >> 8) & 3) q 39 sound/soc/codecs/rt5645.c #define QUIRK_DMIC2_DATA_PIN(q) (((q) >> 12) & 3) q 187 sound/soc/codecs/tas2552.c unsigned int d, q, t; q 199 sound/soc/codecs/tas2552.c q = d / (t + 1); q 200 sound/soc/codecs/tas2552.c d = q + ((9999 - pll_clkin % 10000) * (d / t - q)) / 10000; q 31 tools/hv/hv_fcopy_daemon.c char *q, *p; q 43 tools/hv/hv_fcopy_daemon.c while ((q = strchr(p, '/')) != NULL) { q 44 tools/hv/hv_fcopy_daemon.c if (q == p) { q 48 tools/hv/hv_fcopy_daemon.c *q = '\0'; q 62 tools/hv/hv_fcopy_daemon.c p = q + 1; q 63 tools/hv/hv_fcopy_daemon.c *q = '/'; q 445 tools/hv/hv_kvp_daemon.c char *value, *q; q 459 tools/hv/hv_kvp_daemon.c q = p; q 465 tools/hv/hv_kvp_daemon.c *q++ = *p++; q 470 tools/hv/hv_kvp_daemon.c *q++ = *p++; q 473 tools/hv/hv_kvp_daemon.c *q = 0; q 127 tools/perf/pmu-events/jevents.c char *p, *q; q 145 tools/perf/pmu-events/jevents.c q = fixed; q 148 tools/perf/pmu-events/jevents.c *q = '\\'; q 149 tools/perf/pmu-events/jevents.c ++q; q 151 tools/perf/pmu-events/jevents.c *q = *p; q 152 tools/perf/pmu-events/jevents.c ++q; q 154 tools/perf/pmu-events/jevents.c *q = '\0'; q 50 tools/perf/util/demangle-java.c const char *q; q 55 tools/perf/util/demangle-java.c for (q = str; q != end; q++) { q 60 tools/perf/util/demangle-java.c switch (*q) { q 72 tools/perf/util/demangle-java.c buf[rlen++] = *q; q 85 tools/perf/util/demangle-java.c rlen += scnprintf(buf + rlen, maxlen - rlen, "%s", base_types[*q - 'A']); q 91 tools/perf/util/demangle-java.c buf[rlen++] = *q; q 100 tools/perf/util/demangle-java.c buf[rlen++] = *q; q 110 tools/perf/util/demangle-java.c buf[rlen++] = *q; q 116 tools/perf/util/demangle-java.c buf[rlen++] = *q; q 123 tools/perf/util/demangle-java.c if (isalpha(*(q + 1))) q 136 tools/perf/util/demangle-java.c buf[rlen++] = *q; q 415 tools/perf/util/header.c char *q = skip_spaces(r); q 417 tools/perf/util/header.c if (q != (p+1)) q 418 tools/perf/util/header.c while ((*r++ = *q++)); q 2824 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c unsigned char *q = buf_a + len_a - MAX_PADDING; q 2827 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c for (i = MAX_PADDING; i; i--, p++, q++) { q 2828 tools/perf/util/intel-pt-decoder/intel-pt-decoder.c if (*p != *q) q 199 tools/perf/util/s390-cpumsf.c struct auxtrace_queue *q; q 205 tools/perf/util/s390-cpumsf.c q = &sf->queues.queue_array[sample->cpu]; q 206 tools/perf/util/s390-cpumsf.c sfq = q->priv; q 105 tools/power/cpupower/utils/helpers/bitmask.c static const char *nexttoken(const char *q, int sep) q 107 tools/power/cpupower/utils/helpers/bitmask.c if (q) q 108 tools/power/cpupower/utils/helpers/bitmask.c q = strchr(q, sep); q 109 tools/power/cpupower/utils/helpers/bitmask.c if (q) q 110 tools/power/cpupower/utils/helpers/bitmask.c q++; q 111 tools/power/cpupower/utils/helpers/bitmask.c return q; q 194 tools/power/cpupower/utils/helpers/bitmask.c const char *p, *q; q 198 tools/power/cpupower/utils/helpers/bitmask.c q = buf; q 199 tools/power/cpupower/utils/helpers/bitmask.c while (p = q, q = nexttoken(q, ','), p) { q 202 tools/testing/selftests/bpf/progs/btf_dump_test_case_syntax.c void (*q)(int); q 39 tools/testing/selftests/bpf/progs/test_map_lock.c struct array_elem *q; q 51 tools/testing/selftests/bpf/progs/test_map_lock.c q = bpf_map_lookup_elem(&array_map, &key); q 52 tools/testing/selftests/bpf/progs/test_map_lock.c if (!q) q 54 tools/testing/selftests/bpf/progs/test_map_lock.c bpf_spin_lock(&q->lock); q 56 tools/testing/selftests/bpf/progs/test_map_lock.c q->var[i] = rnd; q 57 tools/testing/selftests/bpf/progs/test_map_lock.c bpf_spin_unlock(&q->lock); q 54 tools/testing/selftests/bpf/progs/test_spin_lock.c struct bpf_vqueue *q; q 79 tools/testing/selftests/bpf/progs/test_spin_lock.c q = bpf_map_lookup_elem(&vqueue, &key); q 80 tools/testing/selftests/bpf/progs/test_spin_lock.c if (!q) q 83 tools/testing/selftests/bpf/progs/test_spin_lock.c bpf_spin_lock(&q->lock); q 84 tools/testing/selftests/bpf/progs/test_spin_lock.c q->credit += CREDIT_PER_NS(curtime - q->lasttime, q->rate); q 85 tools/testing/selftests/bpf/progs/test_spin_lock.c q->lasttime = curtime; q 86 tools/testing/selftests/bpf/progs/test_spin_lock.c if (q->credit > max_credit) q 87 tools/testing/selftests/bpf/progs/test_spin_lock.c q->credit = max_credit; q 88 tools/testing/selftests/bpf/progs/test_spin_lock.c q->credit -= pkt_len; q 89 tools/testing/selftests/bpf/progs/test_spin_lock.c credit = q->credit; q 90 tools/testing/selftests/bpf/progs/test_spin_lock.c bpf_spin_unlock(&q->lock); q 867 tools/testing/selftests/bpf/test_verifier.c const char *p, *q; q 882 tools/testing/selftests/bpf/test_verifier.c q = strstr(log, needle); q 883 tools/testing/selftests/bpf/test_verifier.c if (!q) { q 888 tools/testing/selftests/bpf/test_verifier.c log = q + len; q 82 tools/testing/selftests/powerpc/copyloops/exc_validate.c static char *p, *q; q 103 tools/testing/selftests/powerpc/copyloops/exc_validate.c q = p + page_size - MAX_LEN; q 109 tools/testing/selftests/powerpc/copyloops/exc_validate.c do_one_test(q+dst, q+src, len); q 61 tools/testing/selftests/timers/mqueue-lat.c mqd_t q; q 66 tools/testing/selftests/timers/mqueue-lat.c q = mq_open("/foo", O_CREAT | O_RDONLY, 0666, NULL); q 67 tools/testing/selftests/timers/mqueue-lat.c if (q < 0) { q 71 tools/testing/selftests/timers/mqueue-lat.c mq_getattr(q, &attr); q 84 tools/testing/selftests/timers/mqueue-lat.c ret = mq_timedreceive(q, buf, sizeof(buf), NULL, &target); q 92 tools/testing/selftests/timers/mqueue-lat.c mq_close(q);