pp 40 arch/arm/mach-dove/pcie.c struct pcie_port *pp; pp 45 arch/arm/mach-dove/pcie.c pp = &pcie_port[nr]; pp 46 arch/arm/mach-dove/pcie.c sys->private_data = pp; pp 47 arch/arm/mach-dove/pcie.c pp->root_bus_nr = sys->busnr; pp 52 arch/arm/mach-dove/pcie.c orion_pcie_set_local_bus_nr(pp->base, sys->busnr); pp 54 arch/arm/mach-dove/pcie.c orion_pcie_setup(pp->base); pp 56 arch/arm/mach-dove/pcie.c if (pp->index == 0) pp 64 arch/arm/mach-dove/pcie.c snprintf(pp->mem_space_name, sizeof(pp->mem_space_name), pp 65 arch/arm/mach-dove/pcie.c "PCIe %d MEM", pp->index); pp 66 arch/arm/mach-dove/pcie.c pp->mem_space_name[sizeof(pp->mem_space_name) - 1] = 0; pp 67 arch/arm/mach-dove/pcie.c pp->res.name = pp->mem_space_name; pp 68 arch/arm/mach-dove/pcie.c if (pp->index == 0) { pp 69 arch/arm/mach-dove/pcie.c pp->res.start = DOVE_PCIE0_MEM_PHYS_BASE; pp 70 arch/arm/mach-dove/pcie.c pp->res.end = pp->res.start + DOVE_PCIE0_MEM_SIZE - 1; pp 72 arch/arm/mach-dove/pcie.c pp->res.start = DOVE_PCIE1_MEM_PHYS_BASE; pp 73 arch/arm/mach-dove/pcie.c pp->res.end = pp->res.start + DOVE_PCIE1_MEM_SIZE - 1; pp 75 arch/arm/mach-dove/pcie.c pp->res.flags = IORESOURCE_MEM; pp 76 arch/arm/mach-dove/pcie.c if (request_resource(&iomem_resource, &pp->res)) pp 78 arch/arm/mach-dove/pcie.c pci_add_resource_offset(&sys->resources, &pp->res, sys->mem_offset); pp 83 arch/arm/mach-dove/pcie.c static int pcie_valid_config(struct pcie_port *pp, int bus, int dev) pp 89 arch/arm/mach-dove/pcie.c if (bus == pp->root_bus_nr && dev > 1) pp 99 arch/arm/mach-dove/pcie.c struct pcie_port *pp = sys->private_data; pp 103 arch/arm/mach-dove/pcie.c if (pcie_valid_config(pp, bus->number, PCI_SLOT(devfn)) == 0) { pp 108 arch/arm/mach-dove/pcie.c spin_lock_irqsave(&pp->conf_lock, flags); pp 109 arch/arm/mach-dove/pcie.c ret = orion_pcie_rd_conf(pp->base, bus, devfn, where, size, val); pp 110 arch/arm/mach-dove/pcie.c spin_unlock_irqrestore(&pp->conf_lock, flags); pp 119 arch/arm/mach-dove/pcie.c struct pcie_port *pp = sys->private_data; pp 123 arch/arm/mach-dove/pcie.c if (pcie_valid_config(pp, bus->number, PCI_SLOT(devfn)) == 0) pp 126 arch/arm/mach-dove/pcie.c spin_lock_irqsave(&pp->conf_lock, flags); pp 127 arch/arm/mach-dove/pcie.c ret = orion_pcie_wr_conf(pp->base, bus, devfn, where, size, val); pp 128 arch/arm/mach-dove/pcie.c spin_unlock_irqrestore(&pp->conf_lock, flags); pp 177 arch/arm/mach-dove/pcie.c struct pcie_port *pp = sys->private_data; pp 179 arch/arm/mach-dove/pcie.c return pp->index ? IRQ_DOVE_PCIE1 : IRQ_DOVE_PCIE0; pp 194 arch/arm/mach-dove/pcie.c struct pcie_port *pp = &pcie_port[num_pcie_ports++]; pp 202 arch/arm/mach-dove/pcie.c pp->index = index; pp 203 arch/arm/mach-dove/pcie.c pp->root_bus_nr = -1; pp 204 arch/arm/mach-dove/pcie.c pp->base = base; pp 205 arch/arm/mach-dove/pcie.c spin_lock_init(&pp->conf_lock); pp 206 arch/arm/mach-dove/pcie.c memset(&pp->res, 0, sizeof(pp->res)); pp 78 arch/arm/mach-mv78xx0/pcie.c struct pcie_port *pp = pcie_port + i; pp 80 arch/arm/mach-mv78xx0/pcie.c snprintf(pp->mem_space_name, sizeof(pp->mem_space_name), pp 81 arch/arm/mach-mv78xx0/pcie.c "PCIe %d.%d MEM", pp->maj, pp->min); pp 82 arch/arm/mach-mv78xx0/pcie.c pp->mem_space_name[sizeof(pp->mem_space_name) - 1] = 0; pp 83 arch/arm/mach-mv78xx0/pcie.c pp->res.name = pp->mem_space_name; pp 84 arch/arm/mach-mv78xx0/pcie.c pp->res.flags = IORESOURCE_MEM; pp 85 arch/arm/mach-mv78xx0/pcie.c pp->res.start = start; pp 86 arch/arm/mach-mv78xx0/pcie.c pp->res.end = start + size_each - 1; pp 89 arch/arm/mach-mv78xx0/pcie.c if (request_resource(&iomem_resource, &pp->res)) pp 92 arch/arm/mach-mv78xx0/pcie.c mvebu_mbus_add_window_by_id(MV78XX0_MBUS_PCIE_MEM_TARGET(pp->maj, pp->min), pp 93 arch/arm/mach-mv78xx0/pcie.c MV78XX0_MBUS_PCIE_MEM_ATTR(pp->maj, pp->min), pp 94 arch/arm/mach-mv78xx0/pcie.c pp->res.start, resource_size(&pp->res)); pp 95 arch/arm/mach-mv78xx0/pcie.c mvebu_mbus_add_window_remap_by_id(MV78XX0_MBUS_PCIE_IO_TARGET(pp->maj, pp->min), pp 96 arch/arm/mach-mv78xx0/pcie.c MV78XX0_MBUS_PCIE_IO_ATTR(pp->maj, pp->min), pp 103 arch/arm/mach-mv78xx0/pcie.c struct pcie_port *pp; pp 108 arch/arm/mach-mv78xx0/pcie.c pp = &pcie_port[nr]; pp 109 arch/arm/mach-mv78xx0/pcie.c sys->private_data = pp; pp 110 arch/arm/mach-mv78xx0/pcie.c pp->root_bus_nr = sys->busnr; pp 115 arch/arm/mach-mv78xx0/pcie.c orion_pcie_set_local_bus_nr(pp->base, sys->busnr); pp 116 arch/arm/mach-mv78xx0/pcie.c orion_pcie_setup(pp->base); pp 120 arch/arm/mach-mv78xx0/pcie.c pci_add_resource_offset(&sys->resources, &pp->res, sys->mem_offset); pp 125 arch/arm/mach-mv78xx0/pcie.c static int pcie_valid_config(struct pcie_port *pp, int bus, int dev) pp 131 arch/arm/mach-mv78xx0/pcie.c if (bus == pp->root_bus_nr && dev > 1) pp 141 arch/arm/mach-mv78xx0/pcie.c struct pcie_port *pp = sys->private_data; pp 145 arch/arm/mach-mv78xx0/pcie.c if (pcie_valid_config(pp, bus->number, PCI_SLOT(devfn)) == 0) { pp 150 arch/arm/mach-mv78xx0/pcie.c spin_lock_irqsave(&pp->conf_lock, flags); pp 151 arch/arm/mach-mv78xx0/pcie.c ret = orion_pcie_rd_conf(pp->base, bus, devfn, where, size, val); pp 152 arch/arm/mach-mv78xx0/pcie.c spin_unlock_irqrestore(&pp->conf_lock, flags); pp 161 arch/arm/mach-mv78xx0/pcie.c struct pcie_port *pp = sys->private_data; pp 165 arch/arm/mach-mv78xx0/pcie.c if (pcie_valid_config(pp, bus->number, PCI_SLOT(devfn)) == 0) pp 168 arch/arm/mach-mv78xx0/pcie.c spin_lock_irqsave(&pp->conf_lock, flags); pp 169 arch/arm/mach-mv78xx0/pcie.c ret = orion_pcie_wr_conf(pp->base, bus, devfn, where, size, val); pp 170 arch/arm/mach-mv78xx0/pcie.c spin_unlock_irqrestore(&pp->conf_lock, flags); pp 219 arch/arm/mach-mv78xx0/pcie.c struct pcie_port *pp = sys->private_data; pp 221 arch/arm/mach-mv78xx0/pcie.c return IRQ_MV78XX0_PCIE_00 + (pp->maj << 2) + pp->min; pp 237 arch/arm/mach-mv78xx0/pcie.c struct pcie_port *pp = &pcie_port[num_pcie_ports++]; pp 241 arch/arm/mach-mv78xx0/pcie.c pp->maj = maj; pp 242 arch/arm/mach-mv78xx0/pcie.c pp->min = min; pp 243 arch/arm/mach-mv78xx0/pcie.c pp->root_bus_nr = -1; pp 244 arch/arm/mach-mv78xx0/pcie.c pp->base = base; pp 245 arch/arm/mach-mv78xx0/pcie.c spin_lock_init(&pp->conf_lock); pp 246 arch/arm/mach-mv78xx0/pcie.c memset(&pp->res, 0, sizeof(pp->res)); pp 194 arch/arm64/kernel/insn.c struct aarch64_insn_patch *pp = arg; pp 197 arch/arm64/kernel/insn.c if (atomic_inc_return(&pp->cpu_count) == 1) { pp 198 arch/arm64/kernel/insn.c for (i = 0; ret == 0 && i < pp->insn_cnt; i++) pp 199 arch/arm64/kernel/insn.c ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i], pp 200 arch/arm64/kernel/insn.c pp->new_insns[i]); pp 202 arch/arm64/kernel/insn.c atomic_inc(&pp->cpu_count); pp 204 arch/arm64/kernel/insn.c while (atomic_read(&pp->cpu_count) <= num_online_cpus()) pp 101 arch/ia64/include/asm/processor.h __u64 pp : 1; pp 3940 arch/ia64/kernel/perfmon.c ia64_psr(regs)->pp = 0; pp 4005 arch/ia64/kernel/perfmon.c ia64_psr(regs)->pp = 1; pp 4318 arch/ia64/kernel/perfmon.c ia64_psr(regs)->up = ia64_psr(regs)->pp = 0; pp 4545 arch/ia64/kernel/perfmon.c BUG_ON(ia64_psr(regs)->pp); pp 5683 arch/ia64/kernel/perfmon.c ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0; pp 6336 arch/ia64/kernel/perfmon.c ia64_psr(regs)->pp = 0; pp 6362 arch/ia64/kernel/perfmon.c ia64_psr(regs)->pp = 0; pp 6621 arch/ia64/kernel/perfmon.c if (info == 0 && ia64_psr(regs)->pp == 0 && (dcr & IA64_DCR_PP) == 0) { pp 6648 arch/ia64/kernel/perfmon.c ia64_psr(regs)->pp); pp 6651 arch/ia64/kernel/perfmon.c ia64_psr(regs)->pp = 0; pp 52 arch/m68k/include/asm/nettel.h volatile unsigned short *pp; pp 53 arch/m68k/include/asm/nettel.h pp = (volatile unsigned short *) MCFSIM_PADAT; pp 54 arch/m68k/include/asm/nettel.h return((unsigned int) *pp); pp 59 arch/m68k/include/asm/nettel.h volatile unsigned short *pp; pp 60 arch/m68k/include/asm/nettel.h pp = (volatile unsigned short *) MCFSIM_PADAT; pp 62 arch/m68k/include/asm/nettel.h *pp = ppdata; pp 38 arch/microblaze/include/asm/mmu.h unsigned long pp:2; /* Page protection */ pp 611 arch/microblaze/pci/pci-common.c struct resource *p, **pp; pp 614 arch/microblaze/pci/pci-common.c for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) { pp 622 arch/microblaze/pci/pci-common.c firstpp = pp; pp 628 arch/microblaze/pci/pci-common.c res->sibling = *pp; pp 630 arch/microblaze/pci/pci-common.c *pp = NULL; pp 183 arch/mips/include/asm/octeon/cvmx-ciu3-defs.h uint64_t pp : 48; pp 185 arch/mips/include/asm/octeon/cvmx-ciu3-defs.h uint64_t pp : 48; pp 3464 arch/mips/include/asm/octeon/cvmx-npei-defs.h uint64_t pp:64; pp 3466 arch/mips/include/asm/octeon/cvmx-npei-defs.h uint64_t pp:64; pp 3608 arch/mips/include/asm/octeon/cvmx-npei-defs.h uint64_t pp:64; pp 3610 arch/mips/include/asm/octeon/cvmx-npei-defs.h uint64_t pp:64; pp 66 arch/mips/include/asm/octeon/cvmx-pow-defs.h uint64_t pp:16; pp 70 arch/mips/include/asm/octeon/cvmx-pow-defs.h uint64_t pp:16; pp 77 arch/mips/include/asm/octeon/cvmx-pow-defs.h uint64_t pp:1; pp 99 arch/mips/include/asm/octeon/cvmx-pow-defs.h uint64_t pp:1; pp 106 arch/mips/include/asm/octeon/cvmx-pow-defs.h uint64_t pp:2; pp 128 arch/mips/include/asm/octeon/cvmx-pow-defs.h uint64_t pp:2; pp 135 arch/mips/include/asm/octeon/cvmx-pow-defs.h uint64_t pp:16; pp 159 arch/mips/include/asm/octeon/cvmx-pow-defs.h uint64_t pp:16; pp 166 arch/mips/include/asm/octeon/cvmx-pow-defs.h uint64_t pp:4; pp 188 arch/mips/include/asm/octeon/cvmx-pow-defs.h uint64_t pp:4; pp 195 arch/mips/include/asm/octeon/cvmx-pow-defs.h uint64_t pp:12; pp 219 arch/mips/include/asm/octeon/cvmx-pow-defs.h uint64_t pp:12; pp 226 arch/mips/include/asm/octeon/cvmx-pow-defs.h uint64_t pp:4; pp 244 arch/mips/include/asm/octeon/cvmx-pow-defs.h uint64_t pp:4; pp 251 arch/mips/include/asm/octeon/cvmx-pow-defs.h uint64_t pp:6; pp 269 arch/mips/include/asm/octeon/cvmx-pow-defs.h uint64_t pp:6; pp 276 arch/mips/include/asm/octeon/cvmx-pow-defs.h uint64_t pp:10; pp 294 arch/mips/include/asm/octeon/cvmx-pow-defs.h uint64_t pp:10; pp 192 arch/mips/kernel/pm-cps.c static void cps_gen_cache_routine(u32 **pp, struct uasm_label **pl, pp 206 arch/mips/kernel/pm-cps.c UASM_i_LA(pp, t0, (long)CKSEG0); pp 210 arch/mips/kernel/pm-cps.c uasm_i_addiu(pp, t1, t0, cache_size); pp 212 arch/mips/kernel/pm-cps.c UASM_i_LA(pp, t1, (long)(CKSEG0 + cache_size)); pp 215 arch/mips/kernel/pm-cps.c uasm_build_label(pl, *pp, lbl); pp 220 arch/mips/kernel/pm-cps.c uasm_i_cache(pp, op, 0, t0); pp 221 arch/mips/kernel/pm-cps.c uasm_i_addiu(pp, t0, t0, cache->linesz); pp 223 arch/mips/kernel/pm-cps.c uasm_i_cache(pp, op, i * cache->linesz, t0); pp 229 arch/mips/kernel/pm-cps.c uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz); pp 232 arch/mips/kernel/pm-cps.c uasm_il_bne(pp, pr, t0, t1, lbl); pp 233 arch/mips/kernel/pm-cps.c uasm_i_nop(pp); pp 236 arch/mips/kernel/pm-cps.c static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl, pp 278 arch/mips/kernel/pm-cps.c uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ pp 279 arch/mips/kernel/pm-cps.c uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */ pp 282 arch/mips/kernel/pm-cps.c uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf); pp 283 arch/mips/kernel/pm-cps.c uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */ pp 284 arch/mips/kernel/pm-cps.c uasm_i_ehb(pp); pp 285 arch/mips/kernel/pm-cps.c uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */ pp 286 arch/mips/kernel/pm-cps.c uasm_i_ehb(pp); pp 289 arch/mips/kernel/pm-cps.c UASM_i_LA(pp, t0, (long)CKSEG0); pp 292 arch/mips/kernel/pm-cps.c uasm_build_label(pl, *pp, lbl); pp 296 arch/mips/kernel/pm-cps.c uasm_i_lw(pp, zero, i * line_size * line_stride, t0); pp 303 arch/mips/kernel/pm-cps.c uasm_i_cache(pp, Hit_Invalidate_D, pp 305 arch/mips/kernel/pm-cps.c uasm_i_cache(pp, Hit_Writeback_Inv_SD, pp 310 arch/mips/kernel/pm-cps.c uasm_i_sync(pp, STYPE_SYNC); pp 311 arch/mips/kernel/pm-cps.c uasm_i_ehb(pp); pp 314 arch/mips/kernel/pm-cps.c uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */ pp 317 arch/mips/kernel/pm-cps.c uasm_il_beqz(pp, pr, t1, lbl); pp 318 arch/mips/kernel/pm-cps.c uasm_i_nop(pp); pp 321 arch/mips/kernel/pm-cps.c uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ pp 322 arch/mips/kernel/pm-cps.c uasm_i_ehb(pp); pp 323 arch/mips/kernel/pm-cps.c uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */ pp 324 arch/mips/kernel/pm-cps.c uasm_i_ehb(pp); pp 329 arch/mips/kernel/pm-cps.c static void cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl, pp 333 arch/mips/kernel/pm-cps.c uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000)); pp 334 arch/mips/kernel/pm-cps.c uasm_build_label(pl, *pp, lbl); pp 335 arch/mips/kernel/pm-cps.c uasm_i_ll(pp, t1, 0, r_addr); pp 336 arch/mips/kernel/pm-cps.c uasm_i_or(pp, t1, t1, t0); pp 337 arch/mips/kernel/pm-cps.c uasm_i_sc(pp, t1, 0, r_addr); pp 338 arch/mips/kernel/pm-cps.c uasm_il_beqz(pp, pr, t1, lbl); pp 339 arch/mips/kernel/pm-cps.c uasm_i_nop(pp); pp 116 arch/powerpc/include/asm/asm-prototypes.h unsigned long pp, pp 88 arch/powerpc/include/asm/book3s/32/mmu-hash.h unsigned long pp:2; /* Page protection */ pp 463 arch/powerpc/include/asm/book3s/64/mmu-hash.h unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap); pp 22 arch/powerpc/include/asm/kvm_book3s.h u8 pp; pp 382 arch/powerpc/include/asm/kvm_book3s_64.h unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP); pp 384 arch/powerpc/include/asm/kvm_book3s_64.h return pp != PP_RXRX && pp != PP_RXXX; pp 450 arch/powerpc/include/asm/kvm_book3s_64.h static inline bool hpte_read_permission(unsigned long pp, unsigned long key) pp 453 arch/powerpc/include/asm/kvm_book3s_64.h return PP_RWRX <= pp && pp <= PP_RXRX; pp 457 arch/powerpc/include/asm/kvm_book3s_64.h static inline bool hpte_write_permission(unsigned long pp, unsigned long key) pp 460 arch/powerpc/include/asm/kvm_book3s_64.h return pp == PP_RWRW; pp 461 arch/powerpc/include/asm/kvm_book3s_64.h return pp <= PP_RWRW; pp 1098 arch/powerpc/kernel/pci-common.c struct resource *p, **pp; pp 1101 arch/powerpc/kernel/pci-common.c for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) { pp 1109 arch/powerpc/kernel/pci-common.c firstpp = pp; pp 1115 arch/powerpc/kernel/pci-common.c res->sibling = *pp; pp 1117 arch/powerpc/kernel/pci-common.c *pp = NULL; pp 2156 arch/powerpc/kernel/prom_init.c static void __init prom_init_client_services(unsigned long pp) pp 2159 arch/powerpc/kernel/prom_init.c prom_entry = pp; pp 3286 arch/powerpc/kernel/prom_init.c unsigned long pp, pp 3308 arch/powerpc/kernel/prom_init.c prom_init_client_services(pp); pp 168 arch/powerpc/kvm/book3s_32_mmu.c pte->may_read = bat->pp; pp 169 arch/powerpc/kvm/book3s_32_mmu.c pte->may_write = bat->pp > 1; pp 224 arch/powerpc/kvm/book3s_32_mmu.c u8 pp; pp 227 arch/powerpc/kvm/book3s_32_mmu.c pp = pte1 & 3; pp 231 arch/powerpc/kvm/book3s_32_mmu.c pp |= 4; pp 236 arch/powerpc/kvm/book3s_32_mmu.c switch (pp) { pp 251 arch/powerpc/kvm/book3s_32_mmu.c pte0, pte1, pp); pp 217 arch/powerpc/kvm/book3s_64_mmu.c u8 pp, key = 0; pp 304 arch/powerpc/kvm/book3s_64_mmu.c pp = (r & HPTE_R_PP) | key; pp 306 arch/powerpc/kvm/book3s_64_mmu.c pp |= 8; pp 322 arch/powerpc/kvm/book3s_64_mmu.c switch (pp) { pp 346 arch/powerpc/kvm/book3s_64_mmu_hv.c unsigned long pp, key; pp 387 arch/powerpc/kvm/book3s_64_mmu_hv.c pp = gr & (HPTE_R_PP0 | HPTE_R_PP); pp 392 arch/powerpc/kvm/book3s_64_mmu_hv.c gpte->may_read = hpte_read_permission(pp, key); pp 393 arch/powerpc/kvm/book3s_64_mmu_hv.c gpte->may_write = hpte_write_permission(pp, key); pp 652 arch/powerpc/kvm/book3s_emulate.c bat->pp = val & 3; pp 1217 arch/powerpc/kvm/book3s_hv_rm_mmu.c unsigned long pp, key; pp 1258 arch/powerpc/kvm/book3s_hv_rm_mmu.c pp = gr & (HPTE_R_PP0 | HPTE_R_PP); pp 1264 arch/powerpc/kvm/book3s_hv_rm_mmu.c if (!hpte_read_permission(pp, slb_v & key)) pp 1268 arch/powerpc/kvm/book3s_hv_rm_mmu.c if (!hpte_write_permission(pp, slb_v & key)) pp 1271 arch/powerpc/kvm/book3s_hv_rm_mmu.c if (!hpte_read_permission(pp, slb_v & key)) pp 33 arch/powerpc/lib/rheap.c unsigned long *pp; pp 35 arch/powerpc/lib/rheap.c pp = (unsigned long *)&l->next; pp 36 arch/powerpc/lib/rheap.c if (*pp >= s && *pp < e) pp 37 arch/powerpc/lib/rheap.c *pp += d; pp 39 arch/powerpc/lib/rheap.c pp = (unsigned long *)&l->prev; pp 40 arch/powerpc/lib/rheap.c if (*pp >= s && *pp < e) pp 41 arch/powerpc/lib/rheap.c *pp += d; pp 452 arch/powerpc/mm/book3s64/hash_pgtable.c unsigned long start, end, pp; pp 457 arch/powerpc/mm/book3s64/hash_pgtable.c pp = htab_convert_pte_flags(pgprot_val(PAGE_KERNEL)); pp 459 arch/powerpc/mm/book3s64/hash_pgtable.c WARN_ON(!hash__change_memory_range(start, end, pp)); pp 1093 arch/powerpc/mm/book3s64/hash_utils.c unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap) pp 1098 arch/powerpc/mm/book3s64/hash_utils.c return pp; pp 1108 arch/powerpc/mm/book3s64/hash_utils.c pp |= HPTE_R_N; pp 1110 arch/powerpc/mm/book3s64/hash_utils.c return pp; pp 13 arch/powerpc/mm/ptdump/bats.c static char *pp_601(int k, int pp) pp 15 arch/powerpc/mm/ptdump/bats.c if (pp == 0) pp 17 arch/powerpc/mm/ptdump/bats.c if (pp == 1) pp 19 arch/powerpc/mm/ptdump/bats.c if (pp == 2) pp 28 arch/powerpc/mm/ptdump/bats.c u32 pp = upper & 3; pp 46 arch/powerpc/mm/ptdump/bats.c seq_printf(m, "Kernel %s User %s", pp_601(k & 2, pp), pp_601(k & 1, pp)); pp 243 arch/powerpc/perf/power5+-pmu.c int pmc, altpmc, pp, j; pp 249 arch/powerpc/perf/power5+-pmu.c pp = event & PM_PMCSEL_MSK; pp 251 arch/powerpc/perf/power5+-pmu.c if (bytedecode_alternatives[pmc - 1][j] == pp) { pp 259 arch/powerpc/perf/power5+-pmu.c if (pmc == 1 && (pp == 0x0d || pp == 0x0e)) pp 261 arch/powerpc/perf/power5+-pmu.c if (pmc == 3 && (pp == 0x2e || pp == 0x2f)) pp 265 arch/powerpc/perf/power5+-pmu.c if (pp == 0x10 || pp == 0x28) pp 251 arch/powerpc/perf/power5-pmu.c int pmc, altpmc, pp, j; pp 257 arch/powerpc/perf/power5-pmu.c pp = event & PM_PMCSEL_MSK; pp 259 arch/powerpc/perf/power5-pmu.c if (bytedecode_alternatives[pmc - 1][j] == pp) { pp 97 arch/powerpc/platforms/powermac/bootx_init.c struct bootx_dt_prop *pp = pp 100 arch/powerpc/platforms/powermac/bootx_init.c if (strcmp((char *)((unsigned long)pp->name + base), pp 102 arch/powerpc/platforms/powermac/bootx_init.c return (void *)((unsigned long)pp->value + base); pp 104 arch/powerpc/platforms/powermac/bootx_init.c ppp = &pp->next; pp 251 arch/powerpc/platforms/powermac/bootx_init.c struct bootx_dt_prop *pp = pp 254 arch/powerpc/platforms/powermac/bootx_init.c namep = pp->name ? (char *)(base + pp->name) : NULL; pp 262 arch/powerpc/platforms/powermac/bootx_init.c ppp = &pp->next; pp 310 arch/powerpc/platforms/powermac/bootx_init.c struct bootx_dt_prop *pp = pp 313 arch/powerpc/platforms/powermac/bootx_init.c namep = pp->name ? (char *)(base + pp->name) : NULL; pp 323 arch/powerpc/platforms/powermac/bootx_init.c pp->value ? (void *)(base + pp->value): NULL, pp 324 arch/powerpc/platforms/powermac/bootx_init.c pp->length, mem_end); pp 326 arch/powerpc/platforms/powermac/bootx_init.c ppp = &pp->next; pp 683 arch/powerpc/platforms/powermac/pfunc_core.c struct property *pp; pp 688 arch/powerpc/platforms/powermac/pfunc_core.c for (pp = dev->node->properties; pp != 0; pp = pp->next) { pp 690 arch/powerpc/platforms/powermac/pfunc_core.c if (strncmp(pp->name, PP_PREFIX, plen) != 0) pp 692 arch/powerpc/platforms/powermac/pfunc_core.c name = pp->name + plen; pp 693 arch/powerpc/platforms/powermac/pfunc_core.c if (strlen(name) && pp->length >= 12) pp 695 arch/powerpc/platforms/powermac/pfunc_core.c pp->value, pp->length); pp 97 arch/powerpc/platforms/powermac/setup.c const char *pp; pp 115 arch/powerpc/platforms/powermac/setup.c pp = of_get_property(np, "model", NULL); pp 116 arch/powerpc/platforms/powermac/setup.c if (pp != NULL) pp 117 arch/powerpc/platforms/powermac/setup.c seq_printf(m, "%s\n", pp); pp 120 arch/powerpc/platforms/powermac/setup.c pp = of_get_property(np, "compatible", &plen); pp 121 arch/powerpc/platforms/powermac/setup.c if (pp != NULL) { pp 124 arch/powerpc/platforms/powermac/setup.c int l = strlen(pp) + 1; pp 125 arch/powerpc/platforms/powermac/setup.c seq_printf(m, " %s", pp); pp 127 arch/powerpc/platforms/powermac/setup.c pp += l; pp 159 arch/powerpc/platforms/powermac/setup.c pp = of_get_property(np, "ram-type", NULL); pp 160 arch/powerpc/platforms/powermac/setup.c if (pp) pp 161 arch/powerpc/platforms/powermac/setup.c seq_printf(m, " %s", pp); pp 84 arch/powerpc/platforms/pseries/lpar.c struct paca_struct *pp; pp 88 arch/powerpc/platforms/pseries/lpar.c pp = paca_ptrs[cpu]; pp 89 arch/powerpc/platforms/pseries/lpar.c if (pp->dispatch_log) pp 101 arch/powerpc/platforms/pseries/lpar.c pp->dtl_ridx = 0; pp 102 arch/powerpc/platforms/pseries/lpar.c pp->dispatch_log = dtl; pp 103 arch/powerpc/platforms/pseries/lpar.c pp->dispatch_log_end = dtl + N_DISPATCH_LOG; pp 104 arch/powerpc/platforms/pseries/lpar.c pp->dtl_curr = dtl; pp 116 arch/powerpc/platforms/pseries/lpar.c struct paca_struct *pp; pp 120 arch/powerpc/platforms/pseries/lpar.c pp = paca_ptrs[cpu]; pp 121 arch/powerpc/platforms/pseries/lpar.c dtl = pp->dispatch_log; pp 123 arch/powerpc/platforms/pseries/lpar.c pp->dtl_ridx = 0; pp 124 arch/powerpc/platforms/pseries/lpar.c pp->dtl_curr = dtl; pp 183 arch/powerpc/platforms/pseries/lpar.c struct paca_struct *pp; pp 186 arch/powerpc/platforms/pseries/lpar.c pp = paca_ptrs[cpu]; pp 187 arch/powerpc/platforms/pseries/lpar.c if (!pp->dispatch_log) pp 189 arch/powerpc/platforms/pseries/lpar.c kmem_cache_free(dtl_cache, pp->dispatch_log); pp 190 arch/powerpc/platforms/pseries/lpar.c pp->dtl_ridx = 0; pp 191 arch/powerpc/platforms/pseries/lpar.c pp->dispatch_log = 0; pp 192 arch/powerpc/platforms/pseries/lpar.c pp->dispatch_log_end = 0; pp 193 arch/powerpc/platforms/pseries/lpar.c pp->dtl_curr = 0; pp 158 arch/s390/include/asm/cpu_mf.h static inline void lpp(void *pp) pp 160 arch/s390/include/asm/cpu_mf.h asm volatile(".insn s,0xb2800000,0(%0)\n":: "a" (pp) : "memory"); pp 285 arch/s390/include/asm/kvm_host.h __u64 pp; /* 0x01de */ pp 159 arch/s390/include/asm/sysinfo.h unsigned char pp:2; pp 252 arch/s390/include/uapi/asm/kvm.h __u64 pp; /* program parameter [ARCH0] */ pp 549 arch/s390/kernel/smp.c struct ec_creg_mask_parms *pp = info; pp 553 arch/s390/kernel/smp.c cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval; pp 142 arch/s390/kernel/topology.c smp_cpu_set_polarization(lcpu + i, tl_core->pp); pp 2871 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->pp = 0; pp 3227 arch/s390/kvm/kvm-s390.c r = put_user(vcpu->arch.sie_block->pp, pp 3280 arch/s390/kvm/kvm-s390.c r = get_user(vcpu->arch.sie_block->pp, pp 3887 arch/s390/kvm/kvm-s390.c vcpu->arch.sie_block->pp = kvm_run->s.regs.pp; pp 3968 arch/s390/kvm/kvm-s390.c kvm_run->s.regs.pp = vcpu->arch.sie_block->pp; pp 403 arch/s390/kvm/vsie.c scb_o->pp = scb_s->pp; pp 474 arch/s390/kvm/vsie.c scb_s->pp = scb_o->pp; pp 309 arch/sparc/kernel/leon_kernel.c struct property *pp; pp 335 arch/sparc/kernel/leon_kernel.c pp = of_find_property(rootnp, "systemid", &len); pp 336 arch/sparc/kernel/leon_kernel.c if (pp) pp 337 arch/sparc/kernel/leon_kernel.c amba_system_id = *(unsigned long *)pp->value; pp 346 arch/sparc/kernel/leon_kernel.c pp = of_find_property(np, "reg", &len); pp 347 arch/sparc/kernel/leon_kernel.c if (!pp) pp 349 arch/sparc/kernel/leon_kernel.c leon3_irqctrl_regs = *(struct leon3_irqctrl_regs_map **)pp->value; pp 363 arch/sparc/kernel/leon_kernel.c pp = of_find_property(np, "ampopts", &len); pp 364 arch/sparc/kernel/leon_kernel.c if (pp) { pp 365 arch/sparc/kernel/leon_kernel.c ampopts = *(int *)pp->value; pp 377 arch/sparc/kernel/leon_kernel.c pp = of_find_property(np, "reg", &len); pp 378 arch/sparc/kernel/leon_kernel.c if (pp) pp 380 arch/sparc/kernel/leon_kernel.c pp->value; pp 381 arch/sparc/kernel/leon_kernel.c pp = of_find_property(np, "interrupts", &len); pp 382 arch/sparc/kernel/leon_kernel.c if (pp) pp 383 arch/sparc/kernel/leon_kernel.c leon3_gptimer_irq = *(unsigned int *)pp->value; pp 278 arch/sparc/kernel/leon_smp.c struct property *pp; pp 286 arch/sparc/kernel/leon_smp.c pp = of_find_property(rootnp, "ipi_num", &len); pp 287 arch/sparc/kernel/leon_smp.c if (pp && (*(int *)pp->value)) pp 288 arch/sparc/kernel/leon_smp.c leon_ipi_irq = *(int *)pp->value; pp 223 arch/sparc/kernel/of_device_32.c static int __init use_1to1_mapping(struct device_node *pp) pp 226 arch/sparc/kernel/of_device_32.c if (of_find_property(pp, "ranges", NULL) != NULL) pp 235 arch/sparc/kernel/of_device_32.c if (of_node_name_eq(pp, "dma") || pp 236 arch/sparc/kernel/of_device_32.c of_node_name_eq(pp, "espdma") || pp 237 arch/sparc/kernel/of_device_32.c of_node_name_eq(pp, "ledma") || pp 238 arch/sparc/kernel/of_device_32.c of_node_name_eq(pp, "lebuffer")) pp 279 arch/sparc/kernel/of_device_32.c struct device_node *pp = p_op->dev.of_node; pp 292 arch/sparc/kernel/of_device_32.c if (use_1to1_mapping(pp)) { pp 302 arch/sparc/kernel/of_device_32.c dp = pp; pp 303 arch/sparc/kernel/of_device_32.c pp = dp->parent; pp 304 arch/sparc/kernel/of_device_32.c if (!pp) { pp 309 arch/sparc/kernel/of_device_32.c pbus = of_match_bus(pp); pp 283 arch/sparc/kernel/of_device_64.c static int __init use_1to1_mapping(struct device_node *pp) pp 286 arch/sparc/kernel/of_device_64.c if (of_find_property(pp, "ranges", NULL) != NULL) pp 298 arch/sparc/kernel/of_device_64.c if (of_node_name_eq(pp, "dma") || pp 299 arch/sparc/kernel/of_device_64.c of_node_name_eq(pp, "espdma") || pp 300 arch/sparc/kernel/of_device_64.c of_node_name_eq(pp, "ledma") || pp 301 arch/sparc/kernel/of_device_64.c of_node_name_eq(pp, "lebuffer")) pp 308 arch/sparc/kernel/of_device_64.c if (of_node_name_eq(pp, "pci")) pp 357 arch/sparc/kernel/of_device_64.c struct device_node *pp = p_op->dev.of_node; pp 369 arch/sparc/kernel/of_device_64.c if (use_1to1_mapping(pp)) { pp 379 arch/sparc/kernel/of_device_64.c dp = pp; pp 380 arch/sparc/kernel/of_device_64.c pp = dp->parent; pp 381 arch/sparc/kernel/of_device_64.c if (!pp) { pp 386 arch/sparc/kernel/of_device_64.c pbus = of_match_bus(pp); pp 421 arch/sparc/kernel/of_device_64.c apply_interrupt_map(struct device_node *dp, struct device_node *pp, pp 432 arch/sparc/kernel/of_device_64.c bus = of_match_bus(pp); pp 468 arch/sparc/kernel/of_device_64.c if (pp->irq_trans) pp 469 arch/sparc/kernel/of_device_64.c return pp; pp 481 arch/sparc/kernel/of_device_64.c struct device_node *pp, pp 498 arch/sparc/kernel/of_device_64.c if (pp->irq_trans) { pp 539 arch/sparc/kernel/of_device_64.c struct device_node *pp, *ip; pp 565 arch/sparc/kernel/of_device_64.c pp = dp->parent; pp 567 arch/sparc/kernel/of_device_64.c while (pp) { pp 571 arch/sparc/kernel/of_device_64.c imap = of_get_property(pp, "interrupt-map", &imlen); pp 572 arch/sparc/kernel/of_device_64.c imsk = of_get_property(pp, "interrupt-map-mask", NULL); pp 577 arch/sparc/kernel/of_device_64.c iret = apply_interrupt_map(dp, pp, pp 584 arch/sparc/kernel/of_device_64.c pp, this_orig_irq, iret, irq); pp 594 arch/sparc/kernel/of_device_64.c if (of_node_name_eq(pp, "pci")) { pp 597 arch/sparc/kernel/of_device_64.c irq = pci_irq_swizzle(dp, pp, irq); pp 602 arch/sparc/kernel/of_device_64.c pp, this_orig_irq, pp 607 arch/sparc/kernel/of_device_64.c if (pp->irq_trans) { pp 608 arch/sparc/kernel/of_device_64.c ip = pp; pp 612 arch/sparc/kernel/of_device_64.c dp = pp; pp 613 arch/sparc/kernel/of_device_64.c pp = pp->parent; pp 324 arch/sparc/kernel/process_64.c struct global_pmu_snapshot *pp; pp 330 arch/sparc/kernel/process_64.c pp = &global_cpu_snapshot[this_cpu].pmu; pp 338 arch/sparc/kernel/process_64.c pp->pcr[i] = pcr_ops->read_pcr(i); pp 339 arch/sparc/kernel/process_64.c pp->pic[i] = pcr_ops->read_pic(i); pp 343 arch/sparc/kernel/process_64.c static void __global_pmu_poll(struct global_pmu_snapshot *pp) pp 347 arch/sparc/kernel/process_64.c while (!pp->pcr[0] && ++limit < 100) { pp 369 arch/sparc/kernel/process_64.c struct global_pmu_snapshot *pp = &global_cpu_snapshot[cpu].pmu; pp 371 arch/sparc/kernel/process_64.c __global_pmu_poll(pp); pp 375 arch/sparc/kernel/process_64.c pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3], pp 376 arch/sparc/kernel/process_64.c pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]); pp 1721 arch/x86/xen/mmu_pv.c struct xen_platform_parameters pp; pp 1723 arch/x86/xen/mmu_pv.c if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) pp 1724 arch/x86/xen/mmu_pv.c top = pp.virt_start; pp 570 block/partitions/ldm.c static bool ldm_create_data_partitions (struct parsed_partitions *pp, pp 579 block/partitions/ldm.c BUG_ON (!pp || !ldb); pp 587 block/partitions/ldm.c strlcat(pp->pp_buf, " [LDM]", PAGE_SIZE); pp 597 block/partitions/ldm.c put_partition (pp, part_num, ldb->ph.logical_disk_start + pp 602 block/partitions/ldm.c strlcat(pp->pp_buf, "\n", PAGE_SIZE); pp 216 drivers/ata/acard-ahci.c struct ahci_port_priv *pp = ap->private_data; pp 227 drivers/ata/acard-ahci.c cmd_tbl = pp->cmd_tbl + qc->hw_tag * AHCI_CMD_TBL_SZ; pp 250 drivers/ata/acard-ahci.c ahci_fill_cmd_slot(pp, qc->hw_tag, opts); pp 255 drivers/ata/acard-ahci.c struct ahci_port_priv *pp = qc->ap->private_data; pp 256 drivers/ata/acard-ahci.c u8 *rx_fis = pp->rx_fis; pp 258 drivers/ata/acard-ahci.c if (pp->fbs_enabled) pp 281 drivers/ata/acard-ahci.c struct ahci_port_priv *pp; pp 286 drivers/ata/acard-ahci.c pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); pp 287 drivers/ata/acard-ahci.c if (!pp) pp 295 drivers/ata/acard-ahci.c pp->fbs_supported = true; pp 299 drivers/ata/acard-ahci.c pp->fbs_supported = true; pp 305 drivers/ata/acard-ahci.c if (pp->fbs_supported) { pp 321 drivers/ata/acard-ahci.c pp->cmd_slot = mem; pp 322 drivers/ata/acard-ahci.c pp->cmd_slot_dma = mem_dma; pp 330 drivers/ata/acard-ahci.c pp->rx_fis = mem; pp 331 drivers/ata/acard-ahci.c pp->rx_fis_dma = mem_dma; pp 340 drivers/ata/acard-ahci.c pp->cmd_tbl = mem; pp 341 drivers/ata/acard-ahci.c pp->cmd_tbl_dma = mem_dma; pp 347 drivers/ata/acard-ahci.c pp->intr_mask = DEF_PORT_IRQ; pp 349 drivers/ata/acard-ahci.c ap->private_data = pp; pp 697 drivers/ata/ahci.c struct ahci_port_priv *pp = ap->private_data; pp 699 drivers/ata/ahci.c u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; pp 758 drivers/ata/ahci.c struct ahci_port_priv *pp = ap->private_data; pp 760 drivers/ata/ahci.c u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; pp 395 drivers/ata/ahci.h void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, pp 90 drivers/ata/ahci_qoriq.c struct ahci_port_priv *pp = ap->private_data; pp 93 drivers/ata/ahci_qoriq.c u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; pp 83 drivers/ata/ahci_seattle.c struct ahci_port_priv *pp = ap->private_data; pp 94 drivers/ata/ahci_seattle.c emp = &pp->em_priv[pmp]; pp 141 drivers/ata/ahci_xgene.c struct ahci_port_priv *pp = ap->private_data; pp 162 drivers/ata/ahci_xgene.c if (pp->fbs_supported) { pp 357 drivers/ata/ahci_xgene.c struct ahci_port_priv *pp = ap->private_data; pp 358 drivers/ata/ahci_xgene.c u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; pp 683 drivers/ata/libahci.c struct ahci_port_priv *pp = ap->private_data; pp 688 drivers/ata/libahci.c writel((pp->cmd_slot_dma >> 16) >> 16, pp 690 drivers/ata/libahci.c writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR); pp 693 drivers/ata/libahci.c writel((pp->rx_fis_dma >> 16) >> 16, pp 695 drivers/ata/libahci.c writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR); pp 749 drivers/ata/libahci.c struct ahci_port_priv *pp = ap->private_data; pp 761 drivers/ata/libahci.c pp->intr_mask &= ~PORT_IRQ_PHYRDY; pp 762 drivers/ata/libahci.c writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); pp 810 drivers/ata/libahci.c pp->intr_mask |= PORT_IRQ_PHYRDY; pp 811 drivers/ata/libahci.c writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); pp 842 drivers/ata/libahci.c struct ahci_port_priv *pp = ap->private_data; pp 858 drivers/ata/libahci.c emp = &pp->em_priv[link->pmp]; pp 961 drivers/ata/libahci.c struct ahci_port_priv *pp = ap->private_data; pp 962 drivers/ata/libahci.c struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; pp 1019 drivers/ata/libahci.c struct ahci_port_priv *pp = ap->private_data; pp 1020 drivers/ata/libahci.c struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; pp 1051 drivers/ata/libahci.c struct ahci_port_priv *pp = ap->private_data; pp 1062 drivers/ata/libahci.c emp = &pp->em_priv[pmp]; pp 1111 drivers/ata/libahci.c struct ahci_port_priv *pp = ap->private_data; pp 1117 drivers/ata/libahci.c emp = &pp->em_priv[link->pmp]; pp 1128 drivers/ata/libahci.c struct ahci_port_priv *pp = ap->private_data; pp 1138 drivers/ata/libahci.c emp = &pp->em_priv[pmp]; pp 1157 drivers/ata/libahci.c struct ahci_port_priv *pp = ap->private_data; pp 1158 drivers/ata/libahci.c struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; pp 1188 drivers/ata/libahci.c struct ahci_port_priv *pp = ap->private_data; pp 1189 drivers/ata/libahci.c struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; pp 1283 drivers/ata/libahci.c void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, pp 1288 drivers/ata/libahci.c cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ; pp 1290 drivers/ata/libahci.c pp->cmd_slot[tag].opts = cpu_to_le32(opts); pp 1291 drivers/ata/libahci.c pp->cmd_slot[tag].status = 0; pp 1292 drivers/ata/libahci.c pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff); pp 1293 drivers/ata/libahci.c pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16); pp 1347 drivers/ata/libahci.c struct ahci_port_priv *pp = ap->private_data; pp 1349 drivers/ata/libahci.c u8 *fis = pp->cmd_tbl; pp 1354 drivers/ata/libahci.c ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12)); pp 1357 drivers/ata/libahci.c if (pp->fbs_enabled && pp->fbs_last_dev != pmp) { pp 1362 drivers/ata/libahci.c pp->fbs_last_dev = pmp; pp 1387 drivers/ata/libahci.c struct ahci_port_priv *pp = ap->private_data; pp 1406 drivers/ata/libahci.c if (!ata_is_host_link(link) && pp->fbs_enabled) { pp 1537 drivers/ata/libahci.c struct ahci_port_priv *pp = ap->private_data; pp 1539 drivers/ata/libahci.c u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; pp 1619 drivers/ata/libahci.c struct ahci_port_priv *pp = ap->private_data; pp 1621 drivers/ata/libahci.c if (!sata_pmp_attached(ap) || pp->fbs_enabled) pp 1630 drivers/ata/libahci.c struct ahci_port_priv *pp = ap->private_data; pp 1641 drivers/ata/libahci.c cmd_tbl = pp->cmd_tbl + qc->hw_tag * AHCI_CMD_TBL_SZ; pp 1662 drivers/ata/libahci.c ahci_fill_cmd_slot(pp, qc->hw_tag, opts); pp 1667 drivers/ata/libahci.c struct ahci_port_priv *pp = ap->private_data; pp 1673 drivers/ata/libahci.c BUG_ON(!pp->fbs_enabled); pp 1692 drivers/ata/libahci.c struct ahci_port_priv *pp = ap->private_data; pp 1701 drivers/ata/libahci.c if (pp->fbs_enabled) { pp 1750 drivers/ata/libahci.c u32 *unk = pp->rx_fis + RX_FIS_UNK; pp 1804 drivers/ata/libahci.c struct ahci_port_priv *pp = ap->private_data; pp 1845 drivers/ata/libahci.c if (pp->fbs_enabled) pp 1848 drivers/ata/libahci.c const __le32 *f = pp->rx_fis + RX_FIS_SDB; pp 1860 drivers/ata/libahci.c if (pp->fbs_enabled) { pp 1867 drivers/ata/libahci.c if (ap->qc_active && pp->active_link->sactive) pp 1989 drivers/ata/libahci.c struct ahci_port_priv *pp = ap->private_data; pp 1995 drivers/ata/libahci.c pp->active_link = qc->dev->link; pp 2000 drivers/ata/libahci.c if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) { pp 2005 drivers/ata/libahci.c pp->fbs_last_dev = qc->dev->link->pmp; pp 2018 drivers/ata/libahci.c struct ahci_port_priv *pp = qc->ap->private_data; pp 2019 drivers/ata/libahci.c u8 *rx_fis = pp->rx_fis; pp 2021 drivers/ata/libahci.c if (pp->fbs_enabled) pp 2054 drivers/ata/libahci.c struct ahci_port_priv *pp = ap->private_data; pp 2062 drivers/ata/libahci.c writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); pp 2174 drivers/ata/libahci.c struct ahci_port_priv *pp = ap->private_data; pp 2179 drivers/ata/libahci.c if (!pp->fbs_supported) pp 2184 drivers/ata/libahci.c pp->fbs_enabled = true; pp 2185 drivers/ata/libahci.c pp->fbs_last_dev = -1; /* initialization */ pp 2197 drivers/ata/libahci.c pp->fbs_enabled = true; pp 2198 drivers/ata/libahci.c pp->fbs_last_dev = -1; /* initialization */ pp 2208 drivers/ata/libahci.c struct ahci_port_priv *pp = ap->private_data; pp 2213 drivers/ata/libahci.c if (!pp->fbs_supported) pp 2218 drivers/ata/libahci.c pp->fbs_enabled = false; pp 2232 drivers/ata/libahci.c pp->fbs_enabled = false; pp 2241 drivers/ata/libahci.c struct ahci_port_priv *pp = ap->private_data; pp 2250 drivers/ata/libahci.c pp->intr_mask |= PORT_IRQ_BAD_PMP; pp 2261 drivers/ata/libahci.c writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); pp 2267 drivers/ata/libahci.c struct ahci_port_priv *pp = ap->private_data; pp 2276 drivers/ata/libahci.c pp->intr_mask &= ~PORT_IRQ_BAD_PMP; pp 2280 drivers/ata/libahci.c writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK); pp 2322 drivers/ata/libahci.c struct ahci_port_priv *pp; pp 2327 drivers/ata/libahci.c pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); pp 2328 drivers/ata/libahci.c if (!pp) pp 2332 drivers/ata/libahci.c pp->irq_desc = devm_kzalloc(dev, 8, GFP_KERNEL); pp 2333 drivers/ata/libahci.c if (!pp->irq_desc) { pp 2334 drivers/ata/libahci.c devm_kfree(dev, pp); pp 2337 drivers/ata/libahci.c snprintf(pp->irq_desc, 8, pp 2346 drivers/ata/libahci.c pp->fbs_supported = true; pp 2350 drivers/ata/libahci.c pp->fbs_supported = true; pp 2356 drivers/ata/libahci.c if (pp->fbs_supported) { pp 2372 drivers/ata/libahci.c pp->cmd_slot = mem; pp 2373 drivers/ata/libahci.c pp->cmd_slot_dma = mem_dma; pp 2381 drivers/ata/libahci.c pp->rx_fis = mem; pp 2382 drivers/ata/libahci.c pp->rx_fis_dma = mem_dma; pp 2391 drivers/ata/libahci.c pp->cmd_tbl = mem; pp 2392 drivers/ata/libahci.c pp->cmd_tbl_dma = mem_dma; pp 2398 drivers/ata/libahci.c pp->intr_mask = DEF_PORT_IRQ; pp 2404 drivers/ata/libahci.c spin_lock_init(&pp->lock); pp 2405 drivers/ata/libahci.c ap->lock = &pp->lock; pp 2408 drivers/ata/libahci.c ap->private_data = pp; pp 2546 drivers/ata/libahci.c struct ahci_port_priv *pp = host->ports[i]->private_data; pp 2550 drivers/ata/libahci.c if (!pp) { pp 2556 drivers/ata/libahci.c 0, pp->irq_desc, host->ports[i]); pp 6331 drivers/ata/libata-core.c void **pp; pp 6341 drivers/ata/libata-core.c for (pp = begin; pp < end; pp++, inherit++) pp 6342 drivers/ata/libata-core.c if (!*pp) pp 6343 drivers/ata/libata-core.c *pp = *inherit; pp 6346 drivers/ata/libata-core.c for (pp = begin; pp < end; pp++) pp 6347 drivers/ata/libata-core.c if (IS_ERR(*pp)) pp 6348 drivers/ata/libata-core.c *pp = NULL; pp 190 drivers/ata/pdc_adma.c struct adma_port_priv *pp = ap->private_data; pp 204 drivers/ata/pdc_adma.c writel((u32)pp->pkt_dma, chan + ADMA_CPB_NEXT); pp 247 drivers/ata/pdc_adma.c struct adma_port_priv *pp = ap->private_data; pp 249 drivers/ata/pdc_adma.c if (pp->state != adma_state_idle) /* healthy paranoia */ pp 250 drivers/ata/pdc_adma.c pp->state = adma_state_mmio; pp 260 drivers/ata/pdc_adma.c struct adma_port_priv *pp = ap->private_data; pp 261 drivers/ata/pdc_adma.c u8 *buf = pp->pkt, *last_buf = NULL; pp 285 drivers/ata/pdc_adma.c (pFLAGS & pEND) ? 0 : cpu_to_le32(pp->pkt_dma + i + 4); pp 300 drivers/ata/pdc_adma.c struct adma_port_priv *pp = qc->ap->private_data; pp 301 drivers/ata/pdc_adma.c u8 *buf = pp->pkt; pp 302 drivers/ata/pdc_adma.c u32 pkt_dma = (u32)pp->pkt_dma; pp 389 drivers/ata/pdc_adma.c struct adma_port_priv *pp = qc->ap->private_data; pp 393 drivers/ata/pdc_adma.c pp->state = adma_state_pkt; pp 405 drivers/ata/pdc_adma.c pp->state = adma_state_mmio; pp 415 drivers/ata/pdc_adma.c struct adma_port_priv *pp; pp 424 drivers/ata/pdc_adma.c pp = ap->private_data; pp 425 drivers/ata/pdc_adma.c if (!pp || pp->state != adma_state_pkt) pp 434 drivers/ata/pdc_adma.c if (pp->pkt[0] & cATERR) pp 436 drivers/ata/pdc_adma.c else if (pp->pkt[0] != cDONE) pp 447 drivers/ata/pdc_adma.c "pkt[0] 0x%02X", pp->pkt[0]); pp 465 drivers/ata/pdc_adma.c struct adma_port_priv *pp = ap->private_data; pp 468 drivers/ata/pdc_adma.c if (!pp || pp->state != adma_state_mmio) pp 481 drivers/ata/pdc_adma.c pp->state = adma_state_idle; pp 537 drivers/ata/pdc_adma.c struct adma_port_priv *pp; pp 540 drivers/ata/pdc_adma.c pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); pp 541 drivers/ata/pdc_adma.c if (!pp) pp 543 drivers/ata/pdc_adma.c pp->pkt = dmam_alloc_coherent(dev, ADMA_PKT_BYTES, &pp->pkt_dma, pp 545 drivers/ata/pdc_adma.c if (!pp->pkt) pp 548 drivers/ata/pdc_adma.c if ((pp->pkt_dma & 7) != 0) { pp 550 drivers/ata/pdc_adma.c (u32)pp->pkt_dma); pp 553 drivers/ata/pdc_adma.c ap->private_data = pp; pp 406 drivers/ata/sata_fsl.c static void sata_fsl_setup_cmd_hdr_entry(struct sata_fsl_port_priv *pp, pp 413 drivers/ata/sata_fsl.c cmd_descriptor_address = pp->cmdentry_paddr + pp 418 drivers/ata/sata_fsl.c pp->cmdslot[tag].cda = cpu_to_le32(cmd_descriptor_address); pp 419 drivers/ata/sata_fsl.c pp->cmdslot[tag].prde_fis_len = pp 421 drivers/ata/sata_fsl.c pp->cmdslot[tag].ttl = cpu_to_le32(data_xfer_len & ~0x03); pp 422 drivers/ata/sata_fsl.c pp->cmdslot[tag].desc_info = cpu_to_le32(desc_info | (tag & 0x1F)); pp 425 drivers/ata/sata_fsl.c pp->cmdslot[tag].cda, pp 426 drivers/ata/sata_fsl.c pp->cmdslot[tag].prde_fis_len, pp 427 drivers/ata/sata_fsl.c pp->cmdslot[tag].ttl, pp->cmdslot[tag].desc_info); pp 508 drivers/ata/sata_fsl.c struct sata_fsl_port_priv *pp = ap->private_data; pp 518 drivers/ata/sata_fsl.c cd = (struct command_desc *)pp->cmdentry + tag; pp 519 drivers/ata/sata_fsl.c cd_paddr = pp->cmdentry_paddr + tag * SATA_FSL_CMD_DESC_SIZE; pp 546 drivers/ata/sata_fsl.c sata_fsl_setup_cmd_hdr_entry(pp, tag, desc_info, ttl_dwords, pp 584 drivers/ata/sata_fsl.c struct sata_fsl_port_priv *pp = qc->ap->private_data; pp 590 drivers/ata/sata_fsl.c cd = pp->cmdentry + tag; pp 716 drivers/ata/sata_fsl.c struct sata_fsl_port_priv *pp; pp 723 drivers/ata/sata_fsl.c pp = kzalloc(sizeof(*pp), GFP_KERNEL); pp 724 drivers/ata/sata_fsl.c if (!pp) pp 730 drivers/ata/sata_fsl.c kfree(pp); pp 734 drivers/ata/sata_fsl.c pp->cmdslot = mem; pp 735 drivers/ata/sata_fsl.c pp->cmdslot_paddr = mem_dma; pp 740 drivers/ata/sata_fsl.c pp->cmdentry = mem; pp 741 drivers/ata/sata_fsl.c pp->cmdentry_paddr = mem_dma; pp 743 drivers/ata/sata_fsl.c ap->private_data = pp; pp 746 drivers/ata/sata_fsl.c pp->cmdslot_paddr, pp->cmdentry_paddr); pp 749 drivers/ata/sata_fsl.c iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA); pp 772 drivers/ata/sata_fsl.c struct sata_fsl_port_priv *pp = ap->private_data; pp 790 drivers/ata/sata_fsl.c pp->cmdslot, pp->cmdslot_paddr); pp 792 drivers/ata/sata_fsl.c kfree(pp); pp 933 drivers/ata/sata_fsl.c struct sata_fsl_port_priv *pp = ap->private_data; pp 961 drivers/ata/sata_fsl.c cfis = (u8 *) &pp->cmdentry->cfis; pp 964 drivers/ata/sata_fsl.c sata_fsl_setup_cmd_hdr_entry(pp, 0, pp 1013 drivers/ata/sata_fsl.c sata_fsl_setup_cmd_hdr_entry(pp, 0, CMD_DESC_RES | CMD_DESC_SNOOP_ENABLE, pp 1579 drivers/ata/sata_fsl.c struct sata_fsl_port_priv *pp = ap->private_data; pp 1588 drivers/ata/sata_fsl.c iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA); pp 133 drivers/ata/sata_highbank.c struct ahci_port_priv *pp = ap->private_data; pp 142 drivers/ata/sata_highbank.c emp = &pp->em_priv[pmp]; pp 390 drivers/ata/sata_highbank.c struct ahci_port_priv *pp = ap->private_data; pp 392 drivers/ata/sata_highbank.c u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; pp 329 drivers/ata/sata_inic162x.c struct inic_port_priv *pp = ap->private_data; pp 330 drivers/ata/sata_inic162x.c struct inic_cpb *cpb = &pp->pkt->cpb; pp 483 drivers/ata/sata_inic162x.c struct inic_port_priv *pp = qc->ap->private_data; pp 484 drivers/ata/sata_inic162x.c struct inic_pkt *pkt = pp->pkt; pp 504 drivers/ata/sata_inic162x.c cpb->prd = cpu_to_le32(pp->pkt_dma + offsetof(struct inic_pkt, prd)); pp 527 drivers/ata/sata_inic162x.c prd->mad = cpu_to_le32(pp->pkt_dma + pp 540 drivers/ata/sata_inic162x.c pp->cpb_tbl[0] = pp->pkt_dma; pp 682 drivers/ata/sata_inic162x.c struct inic_port_priv *pp = ap->private_data; pp 685 drivers/ata/sata_inic162x.c memset(pp->pkt, 0, sizeof(struct inic_pkt)); pp 686 drivers/ata/sata_inic162x.c memset(pp->cpb_tbl, 0, IDMA_CPB_TBL_SIZE); pp 689 drivers/ata/sata_inic162x.c writel(pp->cpb_tbl_dma, port_base + PORT_CPB_CPBLAR); pp 701 drivers/ata/sata_inic162x.c struct inic_port_priv *pp; pp 704 drivers/ata/sata_inic162x.c pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); pp 705 drivers/ata/sata_inic162x.c if (!pp) pp 707 drivers/ata/sata_inic162x.c ap->private_data = pp; pp 710 drivers/ata/sata_inic162x.c pp->pkt = dmam_alloc_coherent(dev, sizeof(struct inic_pkt), pp 711 drivers/ata/sata_inic162x.c &pp->pkt_dma, GFP_KERNEL); pp 712 drivers/ata/sata_inic162x.c if (!pp->pkt) pp 715 drivers/ata/sata_inic162x.c pp->cpb_tbl = dmam_alloc_coherent(dev, IDMA_CPB_TBL_SIZE, pp 716 drivers/ata/sata_inic162x.c &pp->cpb_tbl_dma, GFP_KERNEL); pp 717 drivers/ata/sata_inic162x.c if (!pp->cpb_tbl) pp 647 drivers/ata/sata_mv.c struct mv_port_priv *pp); pp 945 drivers/ata/sata_mv.c struct mv_port_priv *pp = ap->private_data; pp 947 drivers/ata/sata_mv.c pp->cached.fiscfg = readl(port_mmio + FISCFG); pp 948 drivers/ata/sata_mv.c pp->cached.ltmode = readl(port_mmio + LTMODE); pp 949 drivers/ata/sata_mv.c pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND); pp 950 drivers/ata/sata_mv.c pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD); pp 990 drivers/ata/sata_mv.c struct mv_port_priv *pp) pp 997 drivers/ata/sata_mv.c pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ pp 998 drivers/ata/sata_mv.c index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; pp 1000 drivers/ata/sata_mv.c WARN_ON(pp->crqb_dma & 0x3ff); pp 1001 drivers/ata/sata_mv.c writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI); pp 1002 drivers/ata/sata_mv.c writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, pp 1009 drivers/ata/sata_mv.c pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ pp 1010 drivers/ata/sata_mv.c index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT; pp 1012 drivers/ata/sata_mv.c WARN_ON(pp->crpb_dma & 0xff); pp 1013 drivers/ata/sata_mv.c writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI); pp 1015 drivers/ata/sata_mv.c writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, pp 1161 drivers/ata/sata_mv.c struct mv_port_priv *pp, u8 protocol) pp 1165 drivers/ata/sata_mv.c if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { pp 1166 drivers/ata/sata_mv.c int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0); pp 1170 drivers/ata/sata_mv.c if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { pp 1175 drivers/ata/sata_mv.c mv_set_edma_ptrs(port_mmio, hpriv, pp); pp 1179 drivers/ata/sata_mv.c pp->pp_flags |= MV_PP_FLAG_EDMA_EN; pp 1233 drivers/ata/sata_mv.c struct mv_port_priv *pp = ap->private_data; pp 1236 drivers/ata/sata_mv.c if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) pp 1238 drivers/ata/sata_mv.c pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; pp 1426 drivers/ata/sata_mv.c struct mv_port_priv *pp = ap->private_data; pp 1432 drivers/ata/sata_mv.c if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) pp 1465 drivers/ata/sata_mv.c if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) && pp 1466 drivers/ata/sata_mv.c (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) { pp 1480 drivers/ata/sata_mv.c struct mv_port_priv *pp = ap->private_data; pp 1483 drivers/ata/sata_mv.c u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg; pp 1484 drivers/ata/sata_mv.c u32 ltmode, *old_ltmode = &pp->cached.ltmode; pp 1485 drivers/ata/sata_mv.c u32 haltcond, *old_haltcond = &pp->cached.haltcond; pp 1536 drivers/ata/sata_mv.c struct mv_port_priv *pp = ap->private_data; pp 1537 drivers/ata/sata_mv.c u32 new, *old = &pp->cached.unknown_rsvd; pp 1589 drivers/ata/sata_mv.c struct mv_port_priv *pp = this_ap->private_data; pp 1591 drivers/ata/sata_mv.c if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) pp 1604 drivers/ata/sata_mv.c struct mv_port_priv *pp = ap->private_data; pp 1610 drivers/ata/sata_mv.c pp->pp_flags &= pp 1635 drivers/ata/sata_mv.c pp->pp_flags |= MV_PP_FLAG_FBS_EN; pp 1659 drivers/ata/sata_mv.c pp->pp_flags |= MV_PP_FLAG_NCQ_EN; pp 1668 drivers/ata/sata_mv.c struct mv_port_priv *pp = ap->private_data; pp 1671 drivers/ata/sata_mv.c if (pp->crqb) { pp 1672 drivers/ata/sata_mv.c dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma); pp 1673 drivers/ata/sata_mv.c pp->crqb = NULL; pp 1675 drivers/ata/sata_mv.c if (pp->crpb) { pp 1676 drivers/ata/sata_mv.c dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma); pp 1677 drivers/ata/sata_mv.c pp->crpb = NULL; pp 1684 drivers/ata/sata_mv.c if (pp->sg_tbl[tag]) { pp 1687 drivers/ata/sata_mv.c pp->sg_tbl[tag], pp 1688 drivers/ata/sata_mv.c pp->sg_tbl_dma[tag]); pp 1689 drivers/ata/sata_mv.c pp->sg_tbl[tag] = NULL; pp 1708 drivers/ata/sata_mv.c struct mv_port_priv *pp; pp 1712 drivers/ata/sata_mv.c pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); pp 1713 drivers/ata/sata_mv.c if (!pp) pp 1715 drivers/ata/sata_mv.c ap->private_data = pp; pp 1717 drivers/ata/sata_mv.c pp->crqb = dma_pool_zalloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma); pp 1718 drivers/ata/sata_mv.c if (!pp->crqb) pp 1721 drivers/ata/sata_mv.c pp->crpb = dma_pool_zalloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma); pp 1722 drivers/ata/sata_mv.c if (!pp->crpb) pp 1734 drivers/ata/sata_mv.c pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool, pp 1735 drivers/ata/sata_mv.c GFP_KERNEL, &pp->sg_tbl_dma[tag]); pp 1736 drivers/ata/sata_mv.c if (!pp->sg_tbl[tag]) pp 1739 drivers/ata/sata_mv.c pp->sg_tbl[tag] = pp->sg_tbl[0]; pp 1740 drivers/ata/sata_mv.c pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0]; pp 1787 drivers/ata/sata_mv.c struct mv_port_priv *pp = qc->ap->private_data; pp 1792 drivers/ata/sata_mv.c mv_sg = pp->sg_tbl[qc->hw_tag]; pp 1885 drivers/ata/sata_mv.c struct mv_port_priv *pp = ap->private_data; pp 1893 drivers/ata/sata_mv.c writel((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16, pp 1895 drivers/ata/sata_mv.c writelfl(pp->sg_tbl_dma[qc->hw_tag], pp 2037 drivers/ata/sata_mv.c struct mv_port_priv *pp = ap->private_data; pp 2066 drivers/ata/sata_mv.c in_index = pp->req_idx; pp 2068 drivers/ata/sata_mv.c pp->crqb[in_index].sg_addr = pp 2069 drivers/ata/sata_mv.c cpu_to_le32(pp->sg_tbl_dma[qc->hw_tag] & 0xffffffff); pp 2070 drivers/ata/sata_mv.c pp->crqb[in_index].sg_addr_hi = pp 2071 drivers/ata/sata_mv.c cpu_to_le32((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16); pp 2072 drivers/ata/sata_mv.c pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); pp 2074 drivers/ata/sata_mv.c cw = &pp->crqb[in_index].ata_cmd[0]; pp 2138 drivers/ata/sata_mv.c struct mv_port_priv *pp = ap->private_data; pp 2160 drivers/ata/sata_mv.c in_index = pp->req_idx; pp 2162 drivers/ata/sata_mv.c crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; pp 2163 drivers/ata/sata_mv.c crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->hw_tag] & 0xffffffff); pp 2164 drivers/ata/sata_mv.c crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->hw_tag] >> 16) >> 16); pp 2209 drivers/ata/sata_mv.c struct mv_port_priv *pp = ap->private_data; pp 2211 drivers/ata/sata_mv.c if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) { pp 2213 drivers/ata/sata_mv.c pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; pp 2284 drivers/ata/sata_mv.c struct mv_port_priv *pp = ap->private_data; pp 2296 drivers/ata/sata_mv.c pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY; pp 2302 drivers/ata/sata_mv.c pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY; pp 2335 drivers/ata/sata_mv.c struct mv_port_priv *pp = ap->private_data; pp 2339 drivers/ata/sata_mv.c pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */ pp 2350 drivers/ata/sata_mv.c mv_start_edma(ap, port_mmio, pp, qc->tf.protocol); pp 2351 drivers/ata/sata_mv.c pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK; pp 2352 drivers/ata/sata_mv.c in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; pp 2355 drivers/ata/sata_mv.c writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index, pp 2421 drivers/ata/sata_mv.c struct mv_port_priv *pp = ap->private_data; pp 2424 drivers/ata/sata_mv.c if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) pp 2435 drivers/ata/sata_mv.c struct mv_port_priv *pp = ap->private_data; pp 2437 drivers/ata/sata_mv.c if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) { pp 2444 drivers/ata/sata_mv.c pmp_map = pp->delayed_eh_pmp_map; pp 2445 drivers/ata/sata_mv.c pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH; pp 2503 drivers/ata/sata_mv.c struct mv_port_priv *pp = ap->private_data; pp 2515 drivers/ata/sata_mv.c if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) { pp 2516 drivers/ata/sata_mv.c pp->pp_flags |= MV_PP_FLAG_DELAYED_EH; pp 2517 drivers/ata/sata_mv.c pp->delayed_eh_pmp_map = 0; pp 2519 drivers/ata/sata_mv.c old_map = pp->delayed_eh_pmp_map; pp 2523 drivers/ata/sata_mv.c pp->delayed_eh_pmp_map = new_map; pp 2530 drivers/ata/sata_mv.c __func__, pp->delayed_eh_pmp_map, pp 2535 drivers/ata/sata_mv.c mv_process_crpb_entries(ap, pp); pp 2563 drivers/ata/sata_mv.c struct mv_port_priv *pp = ap->private_data; pp 2565 drivers/ata/sata_mv.c if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) pp 2567 drivers/ata/sata_mv.c if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN)) pp 2576 drivers/ata/sata_mv.c if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) { pp 2584 drivers/ata/sata_mv.c __func__, edma_err_cause, pp->pp_flags); pp 2596 drivers/ata/sata_mv.c __func__, edma_err_cause, pp->pp_flags); pp 2639 drivers/ata/sata_mv.c struct mv_port_priv *pp = ap->private_data; pp 2673 drivers/ata/sata_mv.c edma_err_cause, pp->pp_flags); pp 2715 drivers/ata/sata_mv.c pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; pp 2721 drivers/ata/sata_mv.c pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; pp 2797 drivers/ata/sata_mv.c static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp) pp 2804 drivers/ata/sata_mv.c int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN); pp 2811 drivers/ata/sata_mv.c while (in_index != pp->resp_idx) { pp 2813 drivers/ata/sata_mv.c struct mv_crpb *response = &pp->crpb[pp->resp_idx]; pp 2815 drivers/ata/sata_mv.c pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK; pp 2833 drivers/ata/sata_mv.c writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | pp 2834 drivers/ata/sata_mv.c (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT), pp 2841 drivers/ata/sata_mv.c struct mv_port_priv *pp; pp 2849 drivers/ata/sata_mv.c pp = ap->private_data; pp 2850 drivers/ata/sata_mv.c edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN); pp 2855 drivers/ata/sata_mv.c mv_process_crpb_entries(ap, pp); pp 2856 drivers/ata/sata_mv.c if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) pp 3622 drivers/ata/sata_mv.c struct mv_port_priv *pp = ap->private_data; pp 3629 drivers/ata/sata_mv.c pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; pp 3630 drivers/ata/sata_mv.c pp->pp_flags &= pp 591 drivers/ata/sata_nv.c struct nv_adma_port_priv *pp = ap->private_data; pp 592 drivers/ata/sata_nv.c void __iomem *mmio = pp->ctl_block; pp 596 drivers/ata/sata_nv.c if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) pp 624 drivers/ata/sata_nv.c pp->flags |= NV_ADMA_PORT_REGISTER_MODE; pp 629 drivers/ata/sata_nv.c struct nv_adma_port_priv *pp = ap->private_data; pp 630 drivers/ata/sata_nv.c void __iomem *mmio = pp->ctl_block; pp 634 drivers/ata/sata_nv.c if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) pp 637 drivers/ata/sata_nv.c WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE); pp 654 drivers/ata/sata_nv.c pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE; pp 660 drivers/ata/sata_nv.c struct nv_adma_port_priv *pp = ap->private_data; pp 711 drivers/ata/sata_nv.c pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE; pp 714 drivers/ata/sata_nv.c pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE; pp 733 drivers/ata/sata_nv.c rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask); pp 750 drivers/ata/sata_nv.c struct nv_adma_port_priv *pp = qc->ap->private_data; pp 751 drivers/ata/sata_nv.c return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE); pp 802 drivers/ata/sata_nv.c struct nv_adma_port_priv *pp = ap->private_data; pp 803 drivers/ata/sata_nv.c u8 flags = pp->cpb[cpb_num].resp_flags; pp 879 drivers/ata/sata_nv.c struct nv_adma_port_priv *pp = ap->private_data; pp 880 drivers/ata/sata_nv.c void __iomem *mmio = pp->ctl_block; pp 888 drivers/ata/sata_nv.c if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) { pp 896 drivers/ata/sata_nv.c if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) { pp 912 drivers/ata/sata_nv.c gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); pp 994 drivers/ata/sata_nv.c struct nv_adma_port_priv *pp = host->ports[0]->private_data; pp 995 drivers/ata/sata_nv.c writel(notifier_clears[0], pp->notifier_clear_block); pp 996 drivers/ata/sata_nv.c pp = host->ports[1]->private_data; pp 997 drivers/ata/sata_nv.c writel(notifier_clears[1], pp->notifier_clear_block); pp 1007 drivers/ata/sata_nv.c struct nv_adma_port_priv *pp = ap->private_data; pp 1008 drivers/ata/sata_nv.c void __iomem *mmio = pp->ctl_block; pp 1013 drivers/ata/sata_nv.c if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) pp 1029 drivers/ata/sata_nv.c struct nv_adma_port_priv *pp = ap->private_data; pp 1030 drivers/ata/sata_nv.c void __iomem *mmio = pp->ctl_block; pp 1035 drivers/ata/sata_nv.c if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) pp 1047 drivers/ata/sata_nv.c struct nv_adma_port_priv *pp = ap->private_data; pp 1048 drivers/ata/sata_nv.c void __iomem *mmio = pp->ctl_block; pp 1051 drivers/ata/sata_nv.c if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) { pp 1072 drivers/ata/sata_nv.c pp = ap->host->ports[0]->private_data; pp 1073 drivers/ata/sata_nv.c writel(notifier_clears[0], pp->notifier_clear_block); pp 1074 drivers/ata/sata_nv.c pp = ap->host->ports[1]->private_data; pp 1075 drivers/ata/sata_nv.c writel(notifier_clears[1], pp->notifier_clear_block); pp 1080 drivers/ata/sata_nv.c struct nv_adma_port_priv *pp = qc->ap->private_data; pp 1082 drivers/ata/sata_nv.c if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) pp 1089 drivers/ata/sata_nv.c struct nv_adma_port_priv *pp; pp 1112 drivers/ata/sata_nv.c pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); pp 1113 drivers/ata/sata_nv.c if (!pp) pp 1118 drivers/ata/sata_nv.c pp->ctl_block = mmio; pp 1119 drivers/ata/sata_nv.c pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN; pp 1120 drivers/ata/sata_nv.c pp->notifier_clear_block = pp->gen_block + pp 1129 drivers/ata/sata_nv.c pp->adma_dma_mask = *dev->dma_mask; pp 1141 drivers/ata/sata_nv.c pp->cpb = mem; pp 1142 drivers/ata/sata_nv.c pp->cpb_dma = mem_dma; pp 1153 drivers/ata/sata_nv.c pp->aprd = mem; pp 1154 drivers/ata/sata_nv.c pp->aprd_dma = mem_dma; pp 1156 drivers/ata/sata_nv.c ap->private_data = pp; pp 1162 drivers/ata/sata_nv.c pp->flags = NV_ADMA_PORT_REGISTER_MODE; pp 1184 drivers/ata/sata_nv.c struct nv_adma_port_priv *pp = ap->private_data; pp 1185 drivers/ata/sata_nv.c void __iomem *mmio = pp->ctl_block; pp 1194 drivers/ata/sata_nv.c struct nv_adma_port_priv *pp = ap->private_data; pp 1195 drivers/ata/sata_nv.c void __iomem *mmio = pp->ctl_block; pp 1211 drivers/ata/sata_nv.c struct nv_adma_port_priv *pp = ap->private_data; pp 1212 drivers/ata/sata_nv.c void __iomem *mmio = pp->ctl_block; pp 1216 drivers/ata/sata_nv.c writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW); pp 1217 drivers/ata/sata_nv.c writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH); pp 1223 drivers/ata/sata_nv.c pp->flags |= NV_ADMA_PORT_REGISTER_MODE; pp 1312 drivers/ata/sata_nv.c struct nv_adma_port_priv *pp = qc->ap->private_data; pp 1321 drivers/ata/sata_nv.c &pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)]; pp 1325 drivers/ata/sata_nv.c cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag))); pp 1332 drivers/ata/sata_nv.c struct nv_adma_port_priv *pp = qc->ap->private_data; pp 1336 drivers/ata/sata_nv.c if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) || pp 1349 drivers/ata/sata_nv.c struct nv_adma_port_priv *pp = qc->ap->private_data; pp 1350 drivers/ata/sata_nv.c struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag]; pp 1355 drivers/ata/sata_nv.c BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) && pp 1395 drivers/ata/sata_nv.c struct nv_adma_port_priv *pp = qc->ap->private_data; pp 1396 drivers/ata/sata_nv.c void __iomem *mmio = pp->ctl_block; pp 1413 drivers/ata/sata_nv.c BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) && pp 1424 drivers/ata/sata_nv.c if (curr_ncq != pp->last_issue_ncq) { pp 1428 drivers/ata/sata_nv.c pp->last_issue_ncq = curr_ncq; pp 1634 drivers/ata/sata_nv.c struct nv_adma_port_priv *pp = ap->private_data; pp 1635 drivers/ata/sata_nv.c if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) { pp 1636 drivers/ata/sata_nv.c void __iomem *mmio = pp->ctl_block; pp 1643 drivers/ata/sata_nv.c u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); pp 1656 drivers/ata/sata_nv.c struct nv_adma_cpb *cpb = &pp->cpb[i]; pp 1671 drivers/ata/sata_nv.c pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID; pp 1690 drivers/ata/sata_nv.c struct nv_swncq_port_priv *pp = ap->private_data; pp 1691 drivers/ata/sata_nv.c struct defer_queue *dq = &pp->defer_queue; pp 1701 drivers/ata/sata_nv.c struct nv_swncq_port_priv *pp = ap->private_data; pp 1702 drivers/ata/sata_nv.c struct defer_queue *dq = &pp->defer_queue; pp 1718 drivers/ata/sata_nv.c struct nv_swncq_port_priv *pp = ap->private_data; pp 1720 drivers/ata/sata_nv.c pp->dhfis_bits = 0; pp 1721 drivers/ata/sata_nv.c pp->dmafis_bits = 0; pp 1722 drivers/ata/sata_nv.c pp->sdbfis_bits = 0; pp 1723 drivers/ata/sata_nv.c pp->ncq_flags = 0; pp 1728 drivers/ata/sata_nv.c struct nv_swncq_port_priv *pp = ap->private_data; pp 1729 drivers/ata/sata_nv.c struct defer_queue *dq = &pp->defer_queue; pp 1734 drivers/ata/sata_nv.c pp->qc_active = 0; pp 1735 drivers/ata/sata_nv.c pp->last_issue_tag = ATA_TAG_POISON; pp 1741 drivers/ata/sata_nv.c struct nv_swncq_port_priv *pp = ap->private_data; pp 1743 drivers/ata/sata_nv.c writew(fis, pp->irq_block); pp 1756 drivers/ata/sata_nv.c struct nv_swncq_port_priv *pp = ap->private_data; pp 1766 drivers/ata/sata_nv.c pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag, pp 1767 drivers/ata/sata_nv.c pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits); pp 1773 drivers/ata/sata_nv.c sactive = readl(pp->sactive_block); pp 1774 drivers/ata/sata_nv.c done_mask = pp->qc_active ^ sactive; pp 1779 drivers/ata/sata_nv.c if (pp->qc_active & (1 << i)) pp 1788 drivers/ata/sata_nv.c (pp->dhfis_bits >> i) & 0x1, pp 1789 drivers/ata/sata_nv.c (pp->dmafis_bits >> i) & 0x1, pp 1790 drivers/ata/sata_nv.c (pp->sdbfis_bits >> i) & 0x1, pp 1928 drivers/ata/sata_nv.c struct nv_swncq_port_priv *pp; pp 1936 drivers/ata/sata_nv.c pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); pp 1937 drivers/ata/sata_nv.c if (!pp) pp 1940 drivers/ata/sata_nv.c pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE, pp 1941 drivers/ata/sata_nv.c &pp->prd_dma, GFP_KERNEL); pp 1942 drivers/ata/sata_nv.c if (!pp->prd) pp 1945 drivers/ata/sata_nv.c ap->private_data = pp; pp 1946 drivers/ata/sata_nv.c pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE; pp 1947 drivers/ata/sata_nv.c pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2; pp 1948 drivers/ata/sata_nv.c pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2; pp 1970 drivers/ata/sata_nv.c struct nv_swncq_port_priv *pp = ap->private_data; pp 1974 drivers/ata/sata_nv.c prd = pp->prd + ATA_MAX_PRD * qc->hw_tag; pp 2005 drivers/ata/sata_nv.c struct nv_swncq_port_priv *pp = ap->private_data; pp 2012 drivers/ata/sata_nv.c writel((1 << qc->hw_tag), pp->sactive_block); pp 2013 drivers/ata/sata_nv.c pp->last_issue_tag = qc->hw_tag; pp 2014 drivers/ata/sata_nv.c pp->dhfis_bits &= ~(1 << qc->hw_tag); pp 2015 drivers/ata/sata_nv.c pp->dmafis_bits &= ~(1 << qc->hw_tag); pp 2016 drivers/ata/sata_nv.c pp->qc_active |= (0x1 << qc->hw_tag); pp 2029 drivers/ata/sata_nv.c struct nv_swncq_port_priv *pp = ap->private_data; pp 2036 drivers/ata/sata_nv.c if (!pp->qc_active) pp 2072 drivers/ata/sata_nv.c struct nv_swncq_port_priv *pp = ap->private_data; pp 2092 drivers/ata/sata_nv.c sactive = readl(pp->sactive_block); pp 2093 drivers/ata/sata_nv.c done_mask = pp->qc_active ^ sactive; pp 2095 drivers/ata/sata_nv.c pp->qc_active &= ~done_mask; pp 2096 drivers/ata/sata_nv.c pp->dhfis_bits &= ~done_mask; pp 2097 drivers/ata/sata_nv.c pp->dmafis_bits &= ~done_mask; pp 2098 drivers/ata/sata_nv.c pp->sdbfis_bits |= done_mask; pp 2107 drivers/ata/sata_nv.c if (pp->qc_active & pp->dhfis_bits) pp 2110 drivers/ata/sata_nv.c if ((pp->ncq_flags & ncq_saw_backout) || pp 2111 drivers/ata/sata_nv.c (pp->qc_active ^ pp->dhfis_bits)) pp 2120 drivers/ata/sata_nv.c ap->print_id, ap->qc_active, pp->qc_active, pp 2121 drivers/ata/sata_nv.c pp->defer_queue.defer_bits, pp->dhfis_bits, pp 2122 drivers/ata/sata_nv.c pp->dmafis_bits, pp->last_issue_tag); pp 2127 drivers/ata/sata_nv.c qc = ata_qc_from_tag(ap, pp->last_issue_tag); pp 2132 drivers/ata/sata_nv.c if (pp->defer_queue.defer_bits) { pp 2144 drivers/ata/sata_nv.c struct nv_swncq_port_priv *pp = ap->private_data; pp 2147 drivers/ata/sata_nv.c tag = readb(pp->tag_block) >> 2; pp 2157 drivers/ata/sata_nv.c struct nv_swncq_port_priv *pp = ap->private_data; pp 2171 drivers/ata/sata_nv.c iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag, pp 2185 drivers/ata/sata_nv.c struct nv_swncq_port_priv *pp = ap->private_data; pp 2204 drivers/ata/sata_nv.c if (!pp->qc_active) pp 2225 drivers/ata/sata_nv.c pp->ncq_flags |= ncq_saw_backout; pp 2229 drivers/ata/sata_nv.c pp->ncq_flags |= ncq_saw_sdb; pp 2232 drivers/ata/sata_nv.c ap->print_id, pp->qc_active, pp->dhfis_bits, pp 2233 drivers/ata/sata_nv.c pp->dmafis_bits, readl(pp->sactive_block)); pp 2242 drivers/ata/sata_nv.c pp->dhfis_bits |= (0x1 << pp->last_issue_tag); pp 2243 drivers/ata/sata_nv.c pp->ncq_flags |= ncq_saw_d2h; pp 2244 drivers/ata/sata_nv.c if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) { pp 2252 drivers/ata/sata_nv.c !(pp->ncq_flags & ncq_saw_dmas)) { pp 2257 drivers/ata/sata_nv.c if (pp->defer_queue.defer_bits) { pp 2269 drivers/ata/sata_nv.c pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap)); pp 2270 drivers/ata/sata_nv.c pp->ncq_flags |= ncq_saw_dmas; pp 2416 drivers/ata/sata_nv.c struct nv_adma_port_priv *pp; pp 2420 drivers/ata/sata_nv.c pp = host->ports[0]->private_data; pp 2421 drivers/ata/sata_nv.c if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) pp 2427 drivers/ata/sata_nv.c pp = host->ports[1]->private_data; pp 2428 drivers/ata/sata_nv.c if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) pp 318 drivers/ata/sata_promise.c struct pdc_port_priv *pp; pp 326 drivers/ata/sata_promise.c pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); pp 327 drivers/ata/sata_promise.c if (!pp) pp 330 drivers/ata/sata_promise.c pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL); pp 331 drivers/ata/sata_promise.c if (!pp->pkt) pp 334 drivers/ata/sata_promise.c ap->private_data = pp; pp 491 drivers/ata/sata_promise.c struct pdc_port_priv *pp = ap->private_data; pp 492 drivers/ata/sata_promise.c u8 *buf = pp->pkt; pp 638 drivers/ata/sata_promise.c struct pdc_port_priv *pp = qc->ap->private_data; pp 649 drivers/ata/sata_promise.c qc->dev->devno, pp->pkt); pp 651 drivers/ata/sata_promise.c i = pdc_prep_lba48(&qc->tf, pp->pkt, i); pp 653 drivers/ata/sata_promise.c i = pdc_prep_lba28(&qc->tf, pp->pkt, i); pp 654 drivers/ata/sata_promise.c pdc_pkt_footer(&qc->tf, pp->pkt, i); pp 1006 drivers/ata/sata_promise.c struct pdc_port_priv *pp = ap->private_data; pp 1017 drivers/ata/sata_promise.c pp->pkt[2] = seq; pp 1019 drivers/ata/sata_promise.c writel(pp->pkt_dma, ata_mmio + PDC_PKT_SUBMIT); pp 174 drivers/ata/sata_qstor.c struct qs_port_priv *pp = ap->private_data; pp 176 drivers/ata/sata_qstor.c pp->state = qs_state_mmio; pp 240 drivers/ata/sata_qstor.c struct qs_port_priv *pp = ap->private_data; pp 241 drivers/ata/sata_qstor.c u8 *prd = pp->pkt + QS_CPB_BYTES; pp 265 drivers/ata/sata_qstor.c struct qs_port_priv *pp = qc->ap->private_data; pp 266 drivers/ata/sata_qstor.c u8 dflags = QS_DF_PORD, *buf = pp->pkt; pp 289 drivers/ata/sata_qstor.c addr = ((u64)pp->pkt_dma) + QS_CPB_BYTES; pp 315 drivers/ata/sata_qstor.c struct qs_port_priv *pp = qc->ap->private_data; pp 319 drivers/ata/sata_qstor.c pp->state = qs_state_pkt; pp 331 drivers/ata/sata_qstor.c pp->state = qs_state_mmio; pp 372 drivers/ata/sata_qstor.c struct qs_port_priv *pp = ap->private_data; pp 378 drivers/ata/sata_qstor.c if (!pp || pp->state != qs_state_pkt) pp 403 drivers/ata/sata_qstor.c struct qs_port_priv *pp = ap->private_data; pp 422 drivers/ata/sata_qstor.c if (!pp || pp->state != qs_state_mmio) pp 468 drivers/ata/sata_qstor.c struct qs_port_priv *pp; pp 473 drivers/ata/sata_qstor.c pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); pp 474 drivers/ata/sata_qstor.c if (!pp) pp 476 drivers/ata/sata_qstor.c pp->pkt = dmam_alloc_coherent(dev, QS_PKT_BYTES, &pp->pkt_dma, pp 478 drivers/ata/sata_qstor.c if (!pp->pkt) pp 480 drivers/ata/sata_qstor.c ap->private_data = pp; pp 483 drivers/ata/sata_qstor.c addr = (u64)pp->pkt_dma; pp 572 drivers/ata/sata_sil24.c struct sil24_port_priv *pp = ap->private_data; pp 586 drivers/ata/sata_sil24.c pp->do_port_rst = 1; pp 600 drivers/ata/sata_sil24.c struct sil24_port_priv *pp = ap->private_data; pp 601 drivers/ata/sata_sil24.c struct sil24_prb *prb = &pp->cmd_block[0].ata.prb; pp 602 drivers/ata/sata_sil24.c dma_addr_t paddr = pp->cmd_block_dma; pp 695 drivers/ata/sata_sil24.c struct sil24_port_priv *pp = ap->private_data; pp 705 drivers/ata/sata_sil24.c if (pp->do_port_rst) { pp 719 drivers/ata/sata_sil24.c pp->do_port_rst = 0; pp 761 drivers/ata/sata_sil24.c pp->do_port_rst = 1; pp 836 drivers/ata/sata_sil24.c struct sil24_port_priv *pp = ap->private_data; pp 842 drivers/ata/sata_sil24.c cb = &pp->cmd_block[sil24_tag(qc->hw_tag)]; pp 882 drivers/ata/sata_sil24.c struct sil24_port_priv *pp = ap->private_data; pp 888 drivers/ata/sata_sil24.c paddr = pp->cmd_block_dma + tag * sizeof(*pp->cmd_block); pp 971 drivers/ata/sata_sil24.c struct sil24_port_priv *pp = ap->private_data; pp 1027 drivers/ata/sata_sil24.c pp->do_port_rst = 1; pp 1171 drivers/ata/sata_sil24.c struct sil24_port_priv *pp = ap->private_data; pp 1178 drivers/ata/sata_sil24.c pp->do_port_rst = 0; pp 1193 drivers/ata/sata_sil24.c struct sil24_port_priv *pp; pp 1198 drivers/ata/sata_sil24.c pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); pp 1199 drivers/ata/sata_sil24.c if (!pp) pp 1206 drivers/ata/sata_sil24.c pp->cmd_block = cb; pp 1207 drivers/ata/sata_sil24.c pp->cmd_block_dma = cb_dma; pp 1209 drivers/ata/sata_sil24.c ap->private_data = pp; pp 286 drivers/ata/sata_sx4.c struct pdc_port_priv *pp; pp 288 drivers/ata/sata_sx4.c pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL); pp 289 drivers/ata/sata_sx4.c if (!pp) pp 292 drivers/ata/sata_sx4.c pp->pkt = dmam_alloc_coherent(dev, 128, &pp->pkt_dma, GFP_KERNEL); pp 293 drivers/ata/sata_sx4.c if (!pp->pkt) pp 296 drivers/ata/sata_sx4.c ap->private_data = pp; pp 441 drivers/ata/sata_sx4.c struct pdc_port_priv *pp = ap->private_data; pp 446 drivers/ata/sata_sx4.c __le32 *buf = (__le32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; pp 470 drivers/ata/sata_sx4.c pdc20621_host_sg(&pp->dimm_buf[0], portno, total_len); pp 471 drivers/ata/sata_sx4.c pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno); pp 473 drivers/ata/sata_sx4.c pdc20621_ata_sg(&pp->dimm_buf[0], portno, total_len); pp 474 drivers/ata/sata_sx4.c i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno); pp 477 drivers/ata/sata_sx4.c i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i); pp 479 drivers/ata/sata_sx4.c i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i); pp 481 drivers/ata/sata_sx4.c pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i); pp 485 drivers/ata/sata_sx4.c &pp->dimm_buf, PDC_DIMM_HEADER_SZ); pp 488 drivers/ata/sata_sx4.c &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len); pp 501 drivers/ata/sata_sx4.c struct pdc_port_priv *pp = ap->private_data; pp 512 drivers/ata/sata_sx4.c i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno); pp 515 drivers/ata/sata_sx4.c i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i); pp 517 drivers/ata/sata_sx4.c i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i); pp 519 drivers/ata/sata_sx4.c pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i); pp 523 drivers/ata/sata_sx4.c &pp->dimm_buf, PDC_DIMM_HEADER_SZ); pp 570 drivers/ata/sata_sx4.c struct pdc_host_priv *pp = ap->host->private_data; pp 571 drivers/ata/sata_sx4.c unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK; pp 573 drivers/ata/sata_sx4.c if (!pp->doing_hdma) { pp 575 drivers/ata/sata_sx4.c pp->doing_hdma = 1; pp 579 drivers/ata/sata_sx4.c pp->hdma[idx].qc = qc; pp 580 drivers/ata/sata_sx4.c pp->hdma[idx].seq = seq; pp 581 drivers/ata/sata_sx4.c pp->hdma[idx].pkt_ofs = pkt_ofs; pp 582 drivers/ata/sata_sx4.c pp->hdma_prod++; pp 588 drivers/ata/sata_sx4.c struct pdc_host_priv *pp = ap->host->private_data; pp 589 drivers/ata/sata_sx4.c unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK; pp 592 drivers/ata/sata_sx4.c if (pp->hdma_prod == pp->hdma_cons) { pp 593 drivers/ata/sata_sx4.c pp->doing_hdma = 0; pp 597 drivers/ata/sata_sx4.c __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq, pp 598 drivers/ata/sata_sx4.c pp->hdma[idx].pkt_ofs); pp 599 drivers/ata/sata_sx4.c pp->hdma_cons++; pp 108 drivers/char/ppdev.c static inline void pp_enable_irq(struct pp_struct *pp) pp 110 drivers/char/ppdev.c struct parport *port = pp->pdev->port; pp 119 drivers/char/ppdev.c struct pp_struct *pp = file->private_data; pp 125 drivers/char/ppdev.c if (!(pp->flags & PP_CLAIMED)) { pp 138 drivers/char/ppdev.c pport = pp->pdev->port; pp 141 drivers/char/ppdev.c parport_set_timeout(pp->pdev, pp 144 drivers/char/ppdev.c pp->default_inactivity); pp 154 drivers/char/ppdev.c if (pp->flags & PP_W91284PIC) pp 156 drivers/char/ppdev.c if (pp->flags & PP_FASTREAD) pp 183 drivers/char/ppdev.c parport_set_timeout(pp->pdev, pp->default_inactivity); pp 189 drivers/char/ppdev.c pp_enable_irq(pp); pp 197 drivers/char/ppdev.c struct pp_struct *pp = file->private_data; pp 204 drivers/char/ppdev.c if (!(pp->flags & PP_CLAIMED)) { pp 214 drivers/char/ppdev.c pport = pp->pdev->port; pp 217 drivers/char/ppdev.c parport_set_timeout(pp->pdev, pp 220 drivers/char/ppdev.c pp->default_inactivity); pp 230 drivers/char/ppdev.c if ((pp->flags & PP_FASTWRITE) && (mode == IEEE1284_MODE_EPP)) { pp 240 drivers/char/ppdev.c wrote = parport_write(pp->pdev->port, kbuffer, n); pp 263 drivers/char/ppdev.c parport_set_timeout(pp->pdev, pp->default_inactivity); pp 266 drivers/char/ppdev.c pp_enable_irq(pp); pp 272 drivers/char/ppdev.c struct pp_struct *pp = private; pp 274 drivers/char/ppdev.c if (pp->irqresponse) { pp 275 drivers/char/ppdev.c parport_write_control(pp->pdev->port, pp->irqctl); pp 276 drivers/char/ppdev.c pp->irqresponse = 0; pp 279 drivers/char/ppdev.c atomic_inc(&pp->irqc); pp 280 drivers/char/ppdev.c wake_up_interruptible(&pp->irq_wait); pp 283 drivers/char/ppdev.c static int register_device(int minor, struct pp_struct *pp) pp 305 drivers/char/ppdev.c ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; pp 306 drivers/char/ppdev.c ppdev_cb.private = pp; pp 317 drivers/char/ppdev.c pp->pdev = pdev; pp 318 drivers/char/ppdev.c pp->index = index; pp 355 drivers/char/ppdev.c struct pp_struct *pp = file->private_data; pp 366 drivers/char/ppdev.c if (pp->flags & PP_CLAIMED) { pp 367 drivers/char/ppdev.c dev_dbg(&pp->pdev->dev, "you've already got it!\n"); pp 372 drivers/char/ppdev.c if (!pp->pdev) { pp 373 drivers/char/ppdev.c int err = register_device(minor, pp); pp 379 drivers/char/ppdev.c ret = parport_claim_or_block(pp->pdev); pp 383 drivers/char/ppdev.c pp->flags |= PP_CLAIMED; pp 387 drivers/char/ppdev.c pp_enable_irq(pp); pp 390 drivers/char/ppdev.c info = &pp->pdev->port->ieee1284; pp 391 drivers/char/ppdev.c pp->saved_state.mode = info->mode; pp 392 drivers/char/ppdev.c pp->saved_state.phase = info->phase; pp 393 drivers/char/ppdev.c info->mode = pp->state.mode; pp 394 drivers/char/ppdev.c info->phase = pp->state.phase; pp 395 drivers/char/ppdev.c pp->default_inactivity = parport_set_timeout(pp->pdev, 0); pp 396 drivers/char/ppdev.c parport_set_timeout(pp->pdev, pp->default_inactivity); pp 401 drivers/char/ppdev.c if (pp->pdev) { pp 402 drivers/char/ppdev.c dev_dbg(&pp->pdev->dev, pp 404 drivers/char/ppdev.c if (pp->flags & PP_EXCL) pp 413 drivers/char/ppdev.c pp->flags |= PP_EXCL; pp 422 drivers/char/ppdev.c pp->state.mode = mode; pp 423 drivers/char/ppdev.c pp->state.phase = init_phase(mode); pp 425 drivers/char/ppdev.c if (pp->flags & PP_CLAIMED) { pp 426 drivers/char/ppdev.c pp->pdev->port->ieee1284.mode = mode; pp 427 drivers/char/ppdev.c pp->pdev->port->ieee1284.phase = pp->state.phase; pp 436 drivers/char/ppdev.c if (pp->flags & PP_CLAIMED) pp 437 drivers/char/ppdev.c mode = pp->pdev->port->ieee1284.mode; pp 439 drivers/char/ppdev.c mode = pp->state.mode; pp 453 drivers/char/ppdev.c pp->state.phase = phase; pp 455 drivers/char/ppdev.c if (pp->flags & PP_CLAIMED) pp 456 drivers/char/ppdev.c pp->pdev->port->ieee1284.phase = phase; pp 464 drivers/char/ppdev.c if (pp->flags & PP_CLAIMED) pp 465 drivers/char/ppdev.c phase = pp->pdev->port->ieee1284.phase; pp 467 drivers/char/ppdev.c phase = pp->state.phase; pp 492 drivers/char/ppdev.c pp->flags &= ~PP_FLAGMASK; pp 493 drivers/char/ppdev.c pp->flags |= (uflags & PP_FLAGMASK); pp 500 drivers/char/ppdev.c uflags = pp->flags & PP_FLAGMASK; pp 509 drivers/char/ppdev.c if ((pp->flags & PP_CLAIMED) == 0) { pp 514 drivers/char/ppdev.c port = pp->pdev->port; pp 541 drivers/char/ppdev.c parport_yield_blocking(pp->pdev); pp 546 drivers/char/ppdev.c info = &pp->pdev->port->ieee1284; pp 547 drivers/char/ppdev.c pp->state.mode = info->mode; pp 548 drivers/char/ppdev.c pp->state.phase = info->phase; pp 549 drivers/char/ppdev.c info->mode = pp->saved_state.mode; pp 550 drivers/char/ppdev.c info->phase = pp->saved_state.phase; pp 551 drivers/char/ppdev.c parport_release(pp->pdev); pp 552 drivers/char/ppdev.c pp->flags &= ~PP_CLAIMED; pp 598 drivers/char/ppdev.c pp_enable_irq(pp); pp 607 drivers/char/ppdev.c pp->irqctl = reg; pp 608 drivers/char/ppdev.c pp->irqresponse = 1; pp 612 drivers/char/ppdev.c ret = atomic_read(&pp->irqc); pp 615 drivers/char/ppdev.c atomic_sub(ret, &pp->irqc); pp 625 drivers/char/ppdev.c return pp_set_timeout(pp->pdev, time32[0], time32[1]); pp 637 drivers/char/ppdev.c return pp_set_timeout(pp->pdev, time64[0], time64[1]); pp 640 drivers/char/ppdev.c jiffies_to_timespec64(pp->pdev->timeout, &ts); pp 650 drivers/char/ppdev.c jiffies_to_timespec64(pp->pdev->timeout, &ts); pp 663 drivers/char/ppdev.c dev_dbg(&pp->pdev->dev, "What? (cmd=0x%x)\n", cmd); pp 692 drivers/char/ppdev.c struct pp_struct *pp; pp 697 drivers/char/ppdev.c pp = kmalloc(sizeof(struct pp_struct), GFP_KERNEL); pp 698 drivers/char/ppdev.c if (!pp) pp 701 drivers/char/ppdev.c pp->state.mode = IEEE1284_MODE_COMPAT; pp 702 drivers/char/ppdev.c pp->state.phase = init_phase(pp->state.mode); pp 703 drivers/char/ppdev.c pp->flags = 0; pp 704 drivers/char/ppdev.c pp->irqresponse = 0; pp 705 drivers/char/ppdev.c atomic_set(&pp->irqc, 0); pp 706 drivers/char/ppdev.c init_waitqueue_head(&pp->irq_wait); pp 712 drivers/char/ppdev.c pp->pdev = NULL; pp 713 drivers/char/ppdev.c file->private_data = pp; pp 721 drivers/char/ppdev.c struct pp_struct *pp = file->private_data; pp 725 drivers/char/ppdev.c if (!(pp->flags & PP_CLAIMED) && pp->pdev && pp 726 drivers/char/ppdev.c (pp->state.mode != IEEE1284_MODE_COMPAT)) { pp 730 drivers/char/ppdev.c parport_claim_or_block(pp->pdev); pp 731 drivers/char/ppdev.c pp->flags |= PP_CLAIMED; pp 732 drivers/char/ppdev.c info = &pp->pdev->port->ieee1284; pp 733 drivers/char/ppdev.c pp->saved_state.mode = info->mode; pp 734 drivers/char/ppdev.c pp->saved_state.phase = info->phase; pp 735 drivers/char/ppdev.c info->mode = pp->state.mode; pp 736 drivers/char/ppdev.c info->phase = pp->state.phase; pp 738 drivers/char/ppdev.c } else if ((pp->flags & PP_CLAIMED) && pp->pdev && pp 739 drivers/char/ppdev.c (pp->pdev->port->ieee1284.mode != IEEE1284_MODE_COMPAT)) { pp 743 drivers/char/ppdev.c parport_negotiate(pp->pdev->port, IEEE1284_MODE_COMPAT); pp 744 drivers/char/ppdev.c dev_dbg(&pp->pdev->dev, pp 748 drivers/char/ppdev.c if ((pp->flags & PP_CLAIMED) && pp->pdev) { pp 751 drivers/char/ppdev.c info = &pp->pdev->port->ieee1284; pp 752 drivers/char/ppdev.c pp->state.mode = info->mode; pp 753 drivers/char/ppdev.c pp->state.phase = info->phase; pp 754 drivers/char/ppdev.c info->mode = pp->saved_state.mode; pp 755 drivers/char/ppdev.c info->phase = pp->saved_state.phase; pp 756 drivers/char/ppdev.c parport_release(pp->pdev); pp 763 drivers/char/ppdev.c if (pp->pdev) { pp 764 drivers/char/ppdev.c parport_unregister_device(pp->pdev); pp 765 drivers/char/ppdev.c ida_simple_remove(&ida_index, pp->index); pp 766 drivers/char/ppdev.c pp->pdev = NULL; pp 770 drivers/char/ppdev.c kfree(pp); pp 778 drivers/char/ppdev.c struct pp_struct *pp = file->private_data; pp 781 drivers/char/ppdev.c poll_wait(file, &pp->irq_wait, wait); pp 782 drivers/char/ppdev.c if (atomic_read(&pp->irqc)) pp 146 drivers/char/tpm/st33zp24/i2c.c struct device_node *pp; pp 150 drivers/char/tpm/st33zp24/i2c.c pp = client->dev.of_node; pp 151 drivers/char/tpm/st33zp24/i2c.c if (!pp) { pp 157 drivers/char/tpm/st33zp24/i2c.c gpio = of_get_named_gpio(pp, "lpcpd-gpios", 0); pp 264 drivers/char/tpm/st33zp24/spi.c struct device_node *pp; pp 268 drivers/char/tpm/st33zp24/spi.c pp = spi_dev->dev.of_node; pp 269 drivers/char/tpm/st33zp24/spi.c if (!pp) { pp 275 drivers/char/tpm/st33zp24/spi.c gpio = of_get_named_gpio(pp, "lpcpd-gpios", 0); pp 52 drivers/clk/clk-tango4.c struct clk **pp = clk_data.clks; pp 73 drivers/clk/clk-tango4.c pp[0] = clk_register_divider(NULL, "cpu_clk", "pll0", 0, pp 75 drivers/clk/clk-tango4.c pp[1] = clk_register_fixed_factor(NULL, "sys_clk", "pll1", 0, 1, 4); pp 76 drivers/clk/clk-tango4.c pp[2] = clk_register_fixed_factor(NULL, "usb_clk", "cd2", 0, 1, 2); pp 77 drivers/clk/clk-tango4.c pp[3] = clk_register_fixed_factor(NULL, "sdio_clk", "cd6", 0, 1, 2); pp 79 drivers/clk/clk-tango4.c if (IS_ERR(pp[0]) || IS_ERR(pp[1]) || IS_ERR(pp[2]) || IS_ERR(pp[3])) pp 62 drivers/cpufreq/cpufreq-dt.c struct property *pp; pp 74 drivers/cpufreq/cpufreq-dt.c pp = of_find_property(np, "cpu0-supply", NULL); pp 75 drivers/cpufreq/cpufreq-dt.c if (pp) { pp 81 drivers/cpufreq/cpufreq-dt.c pp = of_find_property(np, "cpu-supply", NULL); pp 82 drivers/cpufreq/cpufreq-dt.c if (pp) { pp 269 drivers/dma/ipu/ipu_idmac.c struct chan_param_mem_planar pp; pp 276 drivers/dma/ipu/ipu_idmac.c params->pp.ubo_l = u_offset & 0x7ff; pp 277 drivers/dma/ipu/ipu_idmac.c params->pp.ubo_h = u_offset >> 11; pp 278 drivers/dma/ipu/ipu_idmac.c params->pp.vbo_l = v_offset & 0x1ffff; pp 279 drivers/dma/ipu/ipu_idmac.c params->pp.vbo_h = v_offset >> 17; pp 289 drivers/dma/ipu/ipu_idmac.c params->pp.fw = width - 1; pp 290 drivers/dma/ipu/ipu_idmac.c params->pp.fh_l = height - 1; pp 291 drivers/dma/ipu/ipu_idmac.c params->pp.fh_h = (height - 1) >> 8; pp 292 drivers/dma/ipu/ipu_idmac.c params->pp.sl = stride - 1; pp 297 drivers/dma/ipu/ipu_idmac.c params->pp.bpp = 3; pp 298 drivers/dma/ipu/ipu_idmac.c params->pp.pfs = 7; pp 299 drivers/dma/ipu/ipu_idmac.c params->pp.npb = 31; pp 300 drivers/dma/ipu/ipu_idmac.c params->pp.sat = 2; /* SAT = use 32-bit access */ pp 304 drivers/dma/ipu/ipu_idmac.c params->pp.bpp = 0; pp 305 drivers/dma/ipu/ipu_idmac.c params->pp.pfs = 7; pp 306 drivers/dma/ipu/ipu_idmac.c params->pp.npb = 7; pp 307 drivers/dma/ipu/ipu_idmac.c params->pp.sat = 2; /* SAT = use 32-bit access */ pp 419 drivers/dma/ipu/ipu_idmac.c params->pp.nsb = 1; pp 425 drivers/dma/ipu/ipu_idmac.c params->pp.eba0 = buf0; pp 426 drivers/dma/ipu/ipu_idmac.c params->pp.eba1 = buf1; pp 432 drivers/dma/ipu/ipu_idmac.c params->pp.bam = rotate; pp 386 drivers/gpio/gpio-dwapb.c struct dwapb_port_property *pp) pp 389 drivers/gpio/gpio-dwapb.c struct fwnode_handle *fwnode = pp->fwnode; pp 443 drivers/gpio/gpio-dwapb.c if (!pp->irq_shared) { pp 446 drivers/gpio/gpio-dwapb.c for (i = 0; i < pp->ngpio; i++) { pp 447 drivers/gpio/gpio-dwapb.c if (pp->irq[i] >= 0) pp 448 drivers/gpio/gpio-dwapb.c irq_set_chained_handler_and_data(pp->irq[i], pp 456 drivers/gpio/gpio-dwapb.c err = devm_request_irq(gpio->dev, pp->irq[0], pp 491 drivers/gpio/gpio-dwapb.c struct dwapb_port_property *pp, pp 500 drivers/gpio/gpio-dwapb.c port->idx = pp->idx; pp 508 drivers/gpio/gpio-dwapb.c dat = gpio->regs + GPIO_EXT_PORTA + (pp->idx * GPIO_EXT_PORT_STRIDE); pp 509 drivers/gpio/gpio-dwapb.c set = gpio->regs + GPIO_SWPORTA_DR + (pp->idx * GPIO_SWPORT_DR_STRIDE); pp 511 drivers/gpio/gpio-dwapb.c (pp->idx * GPIO_SWPORT_DDR_STRIDE); pp 523 drivers/gpio/gpio-dwapb.c port->gc.of_node = to_of_node(pp->fwnode); pp 525 drivers/gpio/gpio-dwapb.c port->gc.ngpio = pp->ngpio; pp 526 drivers/gpio/gpio-dwapb.c port->gc.base = pp->gpio_base; pp 529 drivers/gpio/gpio-dwapb.c if (pp->idx == 0) pp 532 drivers/gpio/gpio-dwapb.c if (pp->has_irq) pp 533 drivers/gpio/gpio-dwapb.c dwapb_configure_irqs(gpio, port, pp); pp 543 drivers/gpio/gpio-dwapb.c if (pp->has_irq) pp 563 drivers/gpio/gpio-dwapb.c struct dwapb_port_property *pp; pp 575 drivers/gpio/gpio-dwapb.c pdata->properties = devm_kcalloc(dev, nports, sizeof(*pp), GFP_KERNEL); pp 585 drivers/gpio/gpio-dwapb.c pp = &pdata->properties[i++]; pp 586 drivers/gpio/gpio-dwapb.c pp->fwnode = fwnode; pp 588 drivers/gpio/gpio-dwapb.c if (fwnode_property_read_u32(fwnode, "reg", &pp->idx) || pp 589 drivers/gpio/gpio-dwapb.c pp->idx >= DWAPB_MAX_PORTS) { pp 597 drivers/gpio/gpio-dwapb.c &pp->ngpio)) { pp 601 drivers/gpio/gpio-dwapb.c pp->ngpio = 32; pp 604 drivers/gpio/gpio-dwapb.c pp->irq_shared = false; pp 605 drivers/gpio/gpio-dwapb.c pp->gpio_base = -1; pp 611 drivers/gpio/gpio-dwapb.c if (pp->idx != 0) pp 619 drivers/gpio/gpio-dwapb.c for (j = 0; j < pp->ngpio; j++) { pp 620 drivers/gpio/gpio-dwapb.c pp->irq[j] = -ENXIO; pp 623 drivers/gpio/gpio-dwapb.c pp->irq[j] = of_irq_get(np, j); pp 625 drivers/gpio/gpio-dwapb.c pp->irq[j] = platform_get_irq(to_platform_device(dev), j); pp 627 drivers/gpio/gpio-dwapb.c if (pp->irq[j] >= 0) pp 628 drivers/gpio/gpio-dwapb.c pp->has_irq = true; pp 631 drivers/gpio/gpio-dwapb.c if (!pp->has_irq) pp 632 drivers/gpio/gpio-dwapb.c dev_warn(dev, "no irq for port%d\n", pp->idx); pp 60 drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c uint32_t *pp = (uint32_t *) buff; pp 62 drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c pp[0] = cpu_to_le32(hdr->header); pp 63 drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c pp[1] = cpu_to_le32(hdr->version); pp 64 drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c pp[2] = cpu_to_le32(hdr->first_rec_offset); pp 65 drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c pp[3] = cpu_to_le32(hdr->tbl_size); pp 66 drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c pp[4] = cpu_to_le32(hdr->checksum); pp 72 drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c uint32_t *pp = (uint32_t *)buff; pp 74 drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c hdr->header = le32_to_cpu(pp[0]); pp 75 drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c hdr->version = le32_to_cpu(pp[1]); pp 76 drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c hdr->first_rec_offset = le32_to_cpu(pp[2]); pp 77 drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c hdr->tbl_size = le32_to_cpu(pp[3]); pp 78 drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c hdr->checksum = le32_to_cpu(pp[4]); pp 543 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c void pp_rv_set_wm_ranges(struct pp_smu *pp, pp 546 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c const struct dc_context *ctx = pp->dm; pp 599 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c void pp_rv_set_pme_wa_enable(struct pp_smu *pp) pp 601 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c const struct dc_context *ctx = pp->dm; pp 612 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c void pp_rv_set_active_display_count(struct pp_smu *pp, int count) pp 614 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c const struct dc_context *ctx = pp->dm; pp 625 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c void pp_rv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int clock) pp 627 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c const struct dc_context *ctx = pp->dm; pp 638 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c void pp_rv_set_hard_min_dcefclk_by_freq(struct pp_smu *pp, int clock) pp 640 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c const struct dc_context *ctx = pp->dm; pp 651 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz) pp 653 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c const struct dc_context *ctx = pp->dm; pp 664 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp, pp 667 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c const struct dc_context *ctx = pp->dm; pp 725 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c enum pp_smu_status pp_nv_set_pme_wa_enable(struct pp_smu *pp) pp 727 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c const struct dc_context *ctx = pp->dm; pp 741 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count) pp 743 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c const struct dc_context *ctx = pp->dm; pp 757 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c enum pp_smu_status pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz) pp 759 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c const struct dc_context *ctx = pp->dm; pp 774 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c struct pp_smu *pp, int mhz) pp 776 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c const struct dc_context *ctx = pp->dm; pp 796 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c enum pp_smu_status pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz) pp 798 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c const struct dc_context *ctx = pp->dm; pp 819 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c struct pp_smu *pp, BOOLEAN pstate_handshake_supported) pp 821 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c const struct dc_context *ctx = pp->dm; pp 831 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp, pp 834 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c const struct dc_context *ctx = pp->dm; pp 867 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c struct pp_smu *pp, struct pp_smu_nv_clock_table *max_clocks) pp 869 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c const struct dc_context *ctx = pp->dm; pp 885 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp, pp 888 drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c const struct dc_context *ctx = pp->dm; pp 1501 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c struct pp_smu_funcs_rv *pp = NULL; pp 1507 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c pp = &dc->res_pool->pp_smu->rv_funcs; pp 1508 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c if (!pp || !pp->set_wm_ranges) pp 1561 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c pp->set_wm_ranges(&pp->pp_smu, &ranges); pp 1322 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c enum pp_smu_status dummy_set_wm_ranges(struct pp_smu *pp, pp 1328 drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c enum pp_smu_status dummy_get_dpm_clock_table(struct pp_smu *pp, pp 56 drivers/gpu/drm/amd/display/dc/dm_pp_smu.h const void *pp; pp 109 drivers/gpu/drm/amd/display/dc/dm_pp_smu.h void (*set_display_count)(struct pp_smu *pp, int count); pp 118 drivers/gpu/drm/amd/display/dc/dm_pp_smu.h void (*set_wm_ranges)(struct pp_smu *pp, pp 124 drivers/gpu/drm/amd/display/dc/dm_pp_smu.h void (*set_hard_min_dcfclk_by_freq)(struct pp_smu *pp, int mhz); pp 130 drivers/gpu/drm/amd/display/dc/dm_pp_smu.h void (*set_min_deep_sleep_dcfclk)(struct pp_smu *pp, int mhz); pp 135 drivers/gpu/drm/amd/display/dc/dm_pp_smu.h void (*set_hard_min_fclk_by_freq)(struct pp_smu *pp, int mhz); pp 140 drivers/gpu/drm/amd/display/dc/dm_pp_smu.h void (*set_hard_min_socclk_by_freq)(struct pp_smu *pp, int mhz); pp 143 drivers/gpu/drm/amd/display/dc/dm_pp_smu.h void (*set_pme_wa_enable)(struct pp_smu *pp); pp 180 drivers/gpu/drm/amd/display/dc/dm_pp_smu.h enum pp_smu_status (*set_display_count)(struct pp_smu *pp, int count); pp 185 drivers/gpu/drm/amd/display/dc/dm_pp_smu.h enum pp_smu_status (*set_hard_min_dcfclk_by_freq)(struct pp_smu *pp, int Mhz); pp 191 drivers/gpu/drm/amd/display/dc/dm_pp_smu.h enum pp_smu_status (*set_min_deep_sleep_dcfclk)(struct pp_smu *pp, int Mhz); pp 196 drivers/gpu/drm/amd/display/dc/dm_pp_smu.h enum pp_smu_status (*set_hard_min_uclk_by_freq)(struct pp_smu *pp, int Mhz); pp 201 drivers/gpu/drm/amd/display/dc/dm_pp_smu.h enum pp_smu_status (*set_hard_min_socclk_by_freq)(struct pp_smu *pp, int Mhz); pp 204 drivers/gpu/drm/amd/display/dc/dm_pp_smu.h enum pp_smu_status (*set_pme_wa_enable)(struct pp_smu *pp); pp 209 drivers/gpu/drm/amd/display/dc/dm_pp_smu.h enum pp_smu_status (*set_voltage_by_freq)(struct pp_smu *pp, pp 224 drivers/gpu/drm/amd/display/dc/dm_pp_smu.h enum pp_smu_status (*set_wm_ranges)(struct pp_smu *pp, pp 230 drivers/gpu/drm/amd/display/dc/dm_pp_smu.h enum pp_smu_status (*get_maximum_sustainable_clocks)(struct pp_smu *pp, pp 235 drivers/gpu/drm/amd/display/dc/dm_pp_smu.h enum pp_smu_status (*get_uclk_dpm_states)(struct pp_smu *pp, pp 247 drivers/gpu/drm/amd/display/dc/dm_pp_smu.h enum pp_smu_status (*set_pstate_handshake_support)(struct pp_smu *pp, pp 285 drivers/gpu/drm/amd/display/dc/dm_pp_smu.h enum pp_smu_status (*set_wm_ranges)(struct pp_smu *pp, pp 288 drivers/gpu/drm/amd/display/dc/dm_pp_smu.h enum pp_smu_status (*get_dpm_clock_table) (struct pp_smu *pp, pp 383 drivers/gpu/drm/gma500/cdv_intel_dp.c u32 pp; pp 391 drivers/gpu/drm/gma500/cdv_intel_dp.c pp = REG_READ(PP_CONTROL); pp 393 drivers/gpu/drm/gma500/cdv_intel_dp.c pp |= EDP_FORCE_VDD; pp 394 drivers/gpu/drm/gma500/cdv_intel_dp.c REG_WRITE(PP_CONTROL, pp); pp 402 drivers/gpu/drm/gma500/cdv_intel_dp.c u32 pp; pp 405 drivers/gpu/drm/gma500/cdv_intel_dp.c pp = REG_READ(PP_CONTROL); pp 407 drivers/gpu/drm/gma500/cdv_intel_dp.c pp &= ~EDP_FORCE_VDD; pp 408 drivers/gpu/drm/gma500/cdv_intel_dp.c REG_WRITE(PP_CONTROL, pp); pp 418 drivers/gpu/drm/gma500/cdv_intel_dp.c u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_NONE; pp 424 drivers/gpu/drm/gma500/cdv_intel_dp.c pp = REG_READ(PP_CONTROL); pp 425 drivers/gpu/drm/gma500/cdv_intel_dp.c pp &= ~PANEL_UNLOCK_MASK; pp 427 drivers/gpu/drm/gma500/cdv_intel_dp.c pp |= (PANEL_UNLOCK_REGS | POWER_TARGET_ON); pp 428 drivers/gpu/drm/gma500/cdv_intel_dp.c REG_WRITE(PP_CONTROL, pp); pp 444 drivers/gpu/drm/gma500/cdv_intel_dp.c u32 pp, idle_off_mask = PP_ON ; pp 449 drivers/gpu/drm/gma500/cdv_intel_dp.c pp = REG_READ(PP_CONTROL); pp 451 drivers/gpu/drm/gma500/cdv_intel_dp.c if ((pp & POWER_TARGET_ON) == 0) pp 455 drivers/gpu/drm/gma500/cdv_intel_dp.c pp &= ~PANEL_UNLOCK_MASK; pp 458 drivers/gpu/drm/gma500/cdv_intel_dp.c pp &= ~POWER_TARGET_ON; pp 459 drivers/gpu/drm/gma500/cdv_intel_dp.c pp &= ~EDP_FORCE_VDD; pp 460 drivers/gpu/drm/gma500/cdv_intel_dp.c pp &= ~EDP_BLC_ENABLE; pp 461 drivers/gpu/drm/gma500/cdv_intel_dp.c REG_WRITE(PP_CONTROL, pp); pp 476 drivers/gpu/drm/gma500/cdv_intel_dp.c u32 pp; pp 486 drivers/gpu/drm/gma500/cdv_intel_dp.c pp = REG_READ(PP_CONTROL); pp 488 drivers/gpu/drm/gma500/cdv_intel_dp.c pp |= EDP_BLC_ENABLE; pp 489 drivers/gpu/drm/gma500/cdv_intel_dp.c REG_WRITE(PP_CONTROL, pp); pp 497 drivers/gpu/drm/gma500/cdv_intel_dp.c u32 pp; pp 502 drivers/gpu/drm/gma500/cdv_intel_dp.c pp = REG_READ(PP_CONTROL); pp 504 drivers/gpu/drm/gma500/cdv_intel_dp.c pp &= ~EDP_BLC_ENABLE; pp 505 drivers/gpu/drm/gma500/cdv_intel_dp.c REG_WRITE(PP_CONTROL, pp); pp 2552 drivers/gpu/drm/i915/display/intel_dp.c u32 pp; pp 2576 drivers/gpu/drm/i915/display/intel_dp.c pp = ironlake_get_pp_control(intel_dp); pp 2577 drivers/gpu/drm/i915/display/intel_dp.c pp |= EDP_FORCE_VDD; pp 2582 drivers/gpu/drm/i915/display/intel_dp.c I915_WRITE(pp_ctrl_reg, pp); pp 2625 drivers/gpu/drm/i915/display/intel_dp.c u32 pp; pp 2638 drivers/gpu/drm/i915/display/intel_dp.c pp = ironlake_get_pp_control(intel_dp); pp 2639 drivers/gpu/drm/i915/display/intel_dp.c pp &= ~EDP_FORCE_VDD; pp 2644 drivers/gpu/drm/i915/display/intel_dp.c I915_WRITE(pp_ctrl_reg, pp); pp 2651 drivers/gpu/drm/i915/display/intel_dp.c if ((pp & PANEL_POWER_ON) == 0) pp 2712 drivers/gpu/drm/i915/display/intel_dp.c u32 pp; pp 2731 drivers/gpu/drm/i915/display/intel_dp.c pp = ironlake_get_pp_control(intel_dp); pp 2734 drivers/gpu/drm/i915/display/intel_dp.c pp &= ~PANEL_POWER_RESET; pp 2735 drivers/gpu/drm/i915/display/intel_dp.c I915_WRITE(pp_ctrl_reg, pp); pp 2739 drivers/gpu/drm/i915/display/intel_dp.c pp |= PANEL_POWER_ON; pp 2741 drivers/gpu/drm/i915/display/intel_dp.c pp |= PANEL_POWER_RESET; pp 2743 drivers/gpu/drm/i915/display/intel_dp.c I915_WRITE(pp_ctrl_reg, pp); pp 2750 drivers/gpu/drm/i915/display/intel_dp.c pp |= PANEL_POWER_RESET; /* restore panel reset bit */ pp 2751 drivers/gpu/drm/i915/display/intel_dp.c I915_WRITE(pp_ctrl_reg, pp); pp 2772 drivers/gpu/drm/i915/display/intel_dp.c u32 pp; pp 2786 drivers/gpu/drm/i915/display/intel_dp.c pp = ironlake_get_pp_control(intel_dp); pp 2789 drivers/gpu/drm/i915/display/intel_dp.c pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD | pp 2796 drivers/gpu/drm/i915/display/intel_dp.c I915_WRITE(pp_ctrl_reg, pp); pp 2833 drivers/gpu/drm/i915/display/intel_dp.c u32 pp; pp 2835 drivers/gpu/drm/i915/display/intel_dp.c pp = ironlake_get_pp_control(intel_dp); pp 2836 drivers/gpu/drm/i915/display/intel_dp.c pp |= EDP_BLC_ENABLE; pp 2838 drivers/gpu/drm/i915/display/intel_dp.c I915_WRITE(pp_ctrl_reg, pp); pp 2869 drivers/gpu/drm/i915/display/intel_dp.c u32 pp; pp 2871 drivers/gpu/drm/i915/display/intel_dp.c pp = ironlake_get_pp_control(intel_dp); pp 2872 drivers/gpu/drm/i915/display/intel_dp.c pp &= ~EDP_BLC_ENABLE; pp 2874 drivers/gpu/drm/i915/display/intel_dp.c I915_WRITE(pp_ctrl_reg, pp); pp 6556 drivers/gpu/drm/i915/display/intel_dp.c u32 pp = ironlake_get_pp_control(intel_dp); pp 6558 drivers/gpu/drm/i915/display/intel_dp.c WARN(pp & PANEL_POWER_ON, "Panel power already on\n"); pp 6560 drivers/gpu/drm/i915/display/intel_dp.c if (pp & EDP_FORCE_VDD) pp 6563 drivers/gpu/drm/i915/display/intel_dp.c pp &= ~EDP_FORCE_VDD; pp 6565 drivers/gpu/drm/i915/display/intel_dp.c I915_WRITE(regs.pp_ctrl, pp); pp 21 drivers/gpu/drm/lima/lima_bcast.c struct lima_ip *pp = pipe->processor[i]; pp 23 drivers/gpu/drm/lima/lima_bcast.c mask |= 1 << (pp->id - lima_ip_pp0); pp 52 drivers/gpu/drm/lima/lima_device.c LIMA_IP_DESC(pp0, true, true, 0x08000, 0x08000, pp, "pp0"), pp 53 drivers/gpu/drm/lima/lima_device.c LIMA_IP_DESC(pp1, false, false, 0x0A000, 0x0A000, pp, "pp1"), pp 54 drivers/gpu/drm/lima/lima_device.c LIMA_IP_DESC(pp2, false, false, 0x0C000, 0x0C000, pp, "pp2"), pp 55 drivers/gpu/drm/lima/lima_device.c LIMA_IP_DESC(pp3, false, false, 0x0E000, 0x0E000, pp, "pp3"), pp 56 drivers/gpu/drm/lima/lima_device.c LIMA_IP_DESC(pp4, false, false, -1, 0x28000, pp, "pp4"), pp 57 drivers/gpu/drm/lima/lima_device.c LIMA_IP_DESC(pp5, false, false, -1, 0x2A000, pp, "pp5"), pp 58 drivers/gpu/drm/lima/lima_device.c LIMA_IP_DESC(pp6, false, false, -1, 0x2C000, pp, "pp6"), pp 59 drivers/gpu/drm/lima/lima_device.c LIMA_IP_DESC(pp7, false, false, -1, 0x2E000, pp, "pp7"), pp 251 drivers/gpu/drm/lima/lima_device.c struct lima_ip *pp = dev->ip + lima_ip_pp0 + i; pp 260 drivers/gpu/drm/lima/lima_device.c if (pp->present && ppmmu->present && l2_cache->present) { pp 262 drivers/gpu/drm/lima/lima_device.c pipe->processor[pipe->num_processor++] = pp; pp 22 drivers/gpu/drm/lima/lima_dlbu.c struct lima_ip *pp = pipe->processor[i]; pp 24 drivers/gpu/drm/lima/lima_dlbu.c mask |= 1 << (pp->id - lima_ip_pp0); pp 31 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c static struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp, pp 39 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c if (pp == m->pingpong[i].id) { pp 52 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c static int dpu_hw_pp_setup_te_config(struct dpu_hw_pingpong *pp, pp 58 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c if (!pp || !te) pp 60 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c c = &pp->hw; pp 82 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c static int dpu_hw_pp_poll_timeout_wr_ptr(struct dpu_hw_pingpong *pp, pp 89 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c if (!pp) pp 92 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c c = &pp->hw; pp 99 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c static int dpu_hw_pp_enable_te(struct dpu_hw_pingpong *pp, bool enable) pp 103 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c if (!pp) pp 105 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c c = &pp->hw; pp 111 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c static int dpu_hw_pp_connect_external_te(struct dpu_hw_pingpong *pp, pp 114 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c struct dpu_hw_blk_reg_map *c = &pp->hw; pp 118 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c if (!pp) pp 121 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c c = &pp->hw; pp 129 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c trace_dpu_pp_connect_ext_te(pp->idx - PINGPONG_0, cfg); pp 134 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c static int dpu_hw_pp_get_vsync_info(struct dpu_hw_pingpong *pp, pp 140 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c if (!pp || !info) pp 142 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c c = &pp->hw; pp 157 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp) pp 159 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c struct dpu_hw_blk_reg_map *c = &pp->hw; pp 163 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c if (!pp) pp 165 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c c = &pp->hw; pp 222 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp) pp 224 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c if (pp) pp 225 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c dpu_hw_blk_destroy(&pp->base); pp 226 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c kfree(pp); pp 52 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h int (*setup_tearcheck)(struct dpu_hw_pingpong *pp, pp 58 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h int (*enable_tearcheck)(struct dpu_hw_pingpong *pp, pp 65 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h int (*connect_external_te)(struct dpu_hw_pingpong *pp, pp 72 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h int (*get_vsync_info)(struct dpu_hw_pingpong *pp, pp 79 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h int (*poll_timeout_wr_ptr)(struct dpu_hw_pingpong *pp, u32 timeout_us); pp 84 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h u32 (*get_line_count)(struct dpu_hw_pingpong *pp); pp 116 drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp); pp 310 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c struct dpu_rm_hw_blk **pp, pp 316 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c *pp = NULL; pp 342 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c *pp = iter.blk; pp 347 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c if (!*pp) { pp 352 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c if (RESERVED_BY_OTHER(*pp, enc_id)) { pp 354 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c (*pp)->id); pp 366 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c struct dpu_rm_hw_blk *pp[MAX_BLOCKS]; pp 381 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c memset(&pp, 0, sizeof(pp)); pp 388 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c &pp[lm_count], NULL)) pp 403 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c &pp[lm_count], iter_i.blk)) pp 421 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c pp[i]->enc_id = enc_id; pp 423 drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c trace_dpu_rm_reserve_lms(lm[i]->id, enc_id, pp[i]->id); pp 544 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, bool enable, pp 546 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h TP_ARGS(drm_id, pp, enable, refcnt), pp 549 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h __field( enum dpu_pingpong, pp ) pp 555 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h __entry->pp = pp; pp 560 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h __entry->pp, __entry->enable ? "true" : "false", pp 565 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, int new_count, pp 567 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h TP_ARGS(drm_id, pp, new_count, event), pp 570 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h __field( enum dpu_pingpong, pp ) pp 576 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h __entry->pp = pp; pp 581 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h __entry->pp, __entry->new_count, __entry->event) pp 585 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, int timeout_count, pp 587 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h TP_ARGS(drm_id, pp, timeout_count, kickoff_count, event), pp 590 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h __field( enum dpu_pingpong, pp ) pp 597 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h __entry->pp = pp; pp 603 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h __entry->drm_id, __entry->pp, __entry->timeout_count, pp 871 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h TP_PROTO(enum dpu_pingpong pp, u32 cfg), pp 872 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h TP_ARGS(pp, cfg), pp 874 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h __field( enum dpu_pingpong, pp ) pp 878 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h __entry->pp = pp; pp 881 drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h TP_printk("pp:%d cfg:%u", __entry->pp, __entry->cfg) pp 1306 drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h case 0: return (mdp5_cfg->pp.base[0]); pp 1307 drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h case 1: return (mdp5_cfg->pp.base[1]); pp 1308 drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h case 2: return (mdp5_cfg->pp.base[2]); pp 1309 drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h case 3: return (mdp5_cfg->pp.base[3]); pp 66 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 0, .pp = 0, .dspp = 0, pp 68 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 1, .pp = 1, .dspp = 1, pp 70 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 2, .pp = 2, .dspp = 2, pp 72 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 3, .pp = -1, .dspp = -1, pp 74 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 4, .pp = -1, .dspp = -1, pp 85 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c .pp = { pp 144 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 0, .pp = 0, .dspp = 0, pp 146 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 1, .pp = 1, .dspp = 1, pp 148 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 2, .pp = 2, .dspp = 2, pp 150 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 3, .pp = -1, .dspp = -1, pp 152 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 4, .pp = -1, .dspp = -1, pp 167 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c .pp = { pp 234 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 0, .pp = 0, .dspp = 0, pp 237 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 1, .pp = 1, .dspp = 1, pp 239 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 2, .pp = 2, .dspp = 2, pp 242 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 3, .pp = -1, .dspp = -1, pp 244 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 4, .pp = -1, .dspp = -1, pp 246 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 5, .pp = 3, .dspp = 3, pp 262 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c .pp = { pp 321 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 0, .pp = 0, .dspp = 0, pp 323 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 3, .pp = -1, .dspp = -1, pp 396 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 0, .pp = 0, .dspp = 0, pp 399 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 1, .pp = 1, .dspp = 1, pp 401 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 2, .pp = 2, .dspp = 2, pp 404 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 3, .pp = -1, .dspp = -1, pp 406 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 4, .pp = -1, .dspp = -1, pp 408 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 5, .pp = 3, .dspp = 3, pp 424 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c .pp = { pp 497 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 0, .pp = 0, .dspp = 0, pp 500 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 1, .pp = 1, .dspp = 1, pp 502 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 2, .pp = 2, .dspp = -1, pp 505 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 3, .pp = -1, .dspp = -1, pp 507 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 4, .pp = -1, .dspp = -1, pp 509 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 5, .pp = 3, .dspp = -1, pp 524 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c .pp = { pp 601 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 0, .pp = 0, .dspp = 0, pp 603 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 1, .pp = -1, .dspp = -1, pp 615 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c .pp = { pp 690 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 0, .pp = 0, .dspp = 0, pp 693 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 1, .pp = 1, .dspp = 1, pp 695 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 2, .pp = 2, .dspp = -1, pp 698 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 3, .pp = -1, .dspp = -1, pp 700 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 4, .pp = -1, .dspp = -1, pp 702 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c { .id = 5, .pp = 3, .dspp = -1, pp 717 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c .pp = { pp 36 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h int pp; pp 92 drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.h struct mdp5_sub_block pp; pp 47 drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c int pp_id = mixer->pp; pp 91 drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c int pp_id = mixer->pp; pp 117 drivers/gpu/drm/msm/disp/mdp5/mdp5_cmd_encoder.c int pp_id = mixer->pp; pp 243 drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h return MDP5_IRQ_PING_PONG_0_RD_PTR << mixer->pp; pp 259 drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.h return MDP5_IRQ_PING_PONG_0_DONE << mixer->pp; pp 156 drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.c mixer->pp = lm->pp; pp 17 drivers/gpu/drm/msm/disp/mdp5/mdp5_mixer.h int pp; pp 199 drivers/gpu/drm/tiny/repaper.c static void repaper_even_pixels(struct repaper_epd *epd, u8 **pp, pp 237 drivers/gpu/drm/tiny/repaper.c *(*pp)++ = pixels; pp 239 drivers/gpu/drm/tiny/repaper.c *(*pp)++ = fixed_value; pp 245 drivers/gpu/drm/tiny/repaper.c static void repaper_odd_pixels(struct repaper_epd *epd, u8 **pp, pp 277 drivers/gpu/drm/tiny/repaper.c *(*pp)++ = pixels; pp 279 drivers/gpu/drm/tiny/repaper.c *(*pp)++ = fixed_value; pp 295 drivers/gpu/drm/tiny/repaper.c static void repaper_all_pixels(struct repaper_epd *epd, u8 **pp, pp 329 drivers/gpu/drm/tiny/repaper.c *(*pp)++ = pixels >> 8; pp 330 drivers/gpu/drm/tiny/repaper.c *(*pp)++ = pixels; pp 332 drivers/gpu/drm/tiny/repaper.c *(*pp)++ = fixed_value; pp 333 drivers/gpu/drm/tiny/repaper.c *(*pp)++ = fixed_value; pp 607 drivers/iio/dac/ad5755.c struct device_node *pp; pp 655 drivers/iio/dac/ad5755.c for_each_child_of_node(np, pp) { pp 662 drivers/iio/dac/ad5755.c if (!of_property_read_u32(pp, "adi,mode", &tmp)) pp 668 drivers/iio/dac/ad5755.c of_property_read_bool(pp, "adi,ext-current-sense-resistor"); pp 671 drivers/iio/dac/ad5755.c of_property_read_bool(pp, "adi,enable-voltage-overrange"); pp 673 drivers/iio/dac/ad5755.c if (!of_property_read_u32_array(pp, "adi,slew", tmparray, 3)) { pp 46 drivers/infiniband/core/security.c static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp) pp 50 drivers/infiniband/core/security.c struct ib_device *dev = pp->sec->dev; pp 52 drivers/infiniband/core/security.c spin_lock(&dev->port_data[pp->port_num].pkey_list_lock); pp 53 drivers/infiniband/core/security.c list_for_each_entry (tmp_pkey, &dev->port_data[pp->port_num].pkey_list, pp 55 drivers/infiniband/core/security.c if (tmp_pkey->pkey_index == pp->pkey_index) { pp 60 drivers/infiniband/core/security.c spin_unlock(&dev->port_data[pp->port_num].pkey_list_lock); pp 64 drivers/infiniband/core/security.c static int get_pkey_and_subnet_prefix(struct ib_port_pkey *pp, pp 68 drivers/infiniband/core/security.c struct ib_device *dev = pp->sec->dev; pp 71 drivers/infiniband/core/security.c ret = ib_get_cached_pkey(dev, pp->port_num, pp->pkey_index, pkey); pp 75 drivers/infiniband/core/security.c ret = ib_get_cached_subnet_prefix(dev, pp->port_num, subnet_prefix); pp 199 drivers/infiniband/core/security.c struct ib_port_pkey *pp, *tmp_pp; pp 209 drivers/infiniband/core/security.c list_for_each_entry(pp, &pkey->qp_list, qp_list) { pp 210 drivers/infiniband/core/security.c if (atomic_read(&pp->sec->error_list_count)) pp 215 drivers/infiniband/core/security.c pp->sec)) { pp 216 drivers/infiniband/core/security.c atomic_inc(&pp->sec->error_list_count); pp 217 drivers/infiniband/core/security.c list_add(&pp->to_error_list, pp 224 drivers/infiniband/core/security.c list_for_each_entry_safe(pp, pp 228 drivers/infiniband/core/security.c mutex_lock(&pp->sec->mutex); pp 229 drivers/infiniband/core/security.c qp_to_error(pp->sec); pp 230 drivers/infiniband/core/security.c list_del(&pp->to_error_list); pp 231 drivers/infiniband/core/security.c atomic_dec(&pp->sec->error_list_count); pp 232 drivers/infiniband/core/security.c comp = pp->sec->destroying; pp 233 drivers/infiniband/core/security.c mutex_unlock(&pp->sec->mutex); pp 236 drivers/infiniband/core/security.c complete(&pp->sec->error_complete); pp 243 drivers/infiniband/core/security.c static int port_pkey_list_insert(struct ib_port_pkey *pp) pp 248 drivers/infiniband/core/security.c u8 port_num = pp->port_num; pp 251 drivers/infiniband/core/security.c if (pp->state != IB_PORT_PKEY_VALID) pp 254 drivers/infiniband/core/security.c dev = pp->sec->dev; pp 256 drivers/infiniband/core/security.c pkey = get_pkey_idx_qp_list(pp); pp 272 drivers/infiniband/core/security.c if (tmp_pkey->pkey_index == pp->pkey_index) { pp 281 drivers/infiniband/core/security.c pkey->pkey_index = pp->pkey_index; pp 291 drivers/infiniband/core/security.c list_add(&pp->qp_list, &pkey->qp_list); pp 294 drivers/infiniband/core/security.c pp->state = IB_PORT_PKEY_LISTED; pp 302 drivers/infiniband/core/security.c static void port_pkey_list_remove(struct ib_port_pkey *pp) pp 306 drivers/infiniband/core/security.c if (pp->state != IB_PORT_PKEY_LISTED) pp 309 drivers/infiniband/core/security.c pkey = get_pkey_idx_qp_list(pp); pp 312 drivers/infiniband/core/security.c list_del(&pp->qp_list); pp 318 drivers/infiniband/core/security.c pp->state = IB_PORT_PKEY_VALID; pp 116 drivers/infiniband/sw/rdmavt/mmap.c struct rvt_mmap_info *ip, *pp; pp 125 drivers/infiniband/sw/rdmavt/mmap.c list_for_each_entry_safe(ip, pp, &rdi->pending_mmaps, pp 96 drivers/infiniband/sw/rxe/rxe_mmap.c struct rxe_mmap_info *ip, *pp; pp 105 drivers/infiniband/sw/rxe/rxe_mmap.c list_for_each_entry_safe(ip, pp, &rxe->pending_mmaps, pending_mmaps) { pp 399 drivers/infiniband/sw/siw/siw_qp_tx.c static void siw_unmap_pages(struct page **pp, unsigned long kmap_mask) pp 403 drivers/infiniband/sw/siw/siw_qp_tx.c kunmap(*pp); pp 404 drivers/infiniband/sw/siw/siw_qp_tx.c pp++; pp 546 drivers/input/joystick/db9.c static void db9_attach(struct parport *pp) pp 561 drivers/input/joystick/db9.c if (db9_cfg[port_idx].args[DB9_ARG_PARPORT] == pp->number) pp 566 drivers/input/joystick/db9.c pr_debug("Not using parport%d.\n", pp->number); pp 579 drivers/input/joystick/db9.c if (db9_mode->bidirectional && !(pp->modes & PARPORT_MODE_TRISTATE)) { pp 587 drivers/input/joystick/db9.c pd = parport_register_dev_model(pp, "db9", &db9_parport_cb, port_idx); pp 600 drivers/input/joystick/db9.c db9->parportno = pp->number; pp 921 drivers/input/joystick/gamecon.c static void gc_attach(struct parport *pp) pp 934 drivers/input/joystick/gamecon.c if (gc_cfg[port_idx].args[0] == pp->number) pp 939 drivers/input/joystick/gamecon.c pr_debug("Not using parport%d.\n", pp->number); pp 948 drivers/input/joystick/gamecon.c pd = parport_register_dev_model(pp, "gamecon", &gc_parport_cb, pp 963 drivers/input/joystick/gamecon.c gc->parportno = pp->number; pp 144 drivers/input/joystick/turbografx.c static void tgfx_attach(struct parport *pp) pp 157 drivers/input/joystick/turbografx.c if (tgfx_cfg[port_idx].args[0] == pp->number) pp 162 drivers/input/joystick/turbografx.c pr_debug("Not using parport%d.\n", pp->number); pp 171 drivers/input/joystick/turbografx.c pd = parport_register_dev_model(pp, "turbografx", &tgfx_parport_cb, pp 186 drivers/input/joystick/turbografx.c tgfx->parportno = pp->number; pp 199 drivers/input/joystick/walkera0701.c static void walkera0701_attach(struct parport *pp) pp 204 drivers/input/joystick/walkera0701.c if (pp->number != walkera0701_pp_no) { pp 205 drivers/input/joystick/walkera0701.c pr_debug("Not using parport%d.\n", pp->number); pp 209 drivers/input/joystick/walkera0701.c if (pp->irq == -1) { pp 211 drivers/input/joystick/walkera0701.c pp->number); pp 215 drivers/input/joystick/walkera0701.c w->parport = pp; pp 222 drivers/input/joystick/walkera0701.c w->pardevice = parport_register_dev_model(pp, "walkera0701", pp 173 drivers/input/keyboard/sun4i-lradc-keys.c struct device_node *np, *pp; pp 194 drivers/input/keyboard/sun4i-lradc-keys.c for_each_child_of_node(np, pp) { pp 198 drivers/input/keyboard/sun4i-lradc-keys.c error = of_property_read_u32(pp, "channel", &channel); pp 200 drivers/input/keyboard/sun4i-lradc-keys.c dev_err(dev, "%pOFn: Inval channel prop\n", pp); pp 201 drivers/input/keyboard/sun4i-lradc-keys.c of_node_put(pp); pp 205 drivers/input/keyboard/sun4i-lradc-keys.c error = of_property_read_u32(pp, "voltage", &map->voltage); pp 207 drivers/input/keyboard/sun4i-lradc-keys.c dev_err(dev, "%pOFn: Inval voltage prop\n", pp); pp 208 drivers/input/keyboard/sun4i-lradc-keys.c of_node_put(pp); pp 212 drivers/input/keyboard/sun4i-lradc-keys.c error = of_property_read_u32(pp, "linux,code", &map->keycode); pp 214 drivers/input/keyboard/sun4i-lradc-keys.c dev_err(dev, "%pOFn: Inval linux,code prop\n", pp); pp 215 drivers/input/keyboard/sun4i-lradc-keys.c of_node_put(pp); pp 140 drivers/input/serio/parkbd.c static int parkbd_getport(struct parport *pp) pp 148 drivers/input/serio/parkbd.c parkbd_dev = parport_register_dev_model(pp, "parkbd", pp 179 drivers/input/serio/parkbd.c static void parkbd_attach(struct parport *pp) pp 181 drivers/input/serio/parkbd.c if (pp->number != parkbd_pp_no) { pp 182 drivers/input/serio/parkbd.c pr_debug("Not using parport%d.\n", pp->number); pp 186 drivers/input/serio/parkbd.c if (parkbd_getport(pp)) pp 65 drivers/isdn/capi/capilib.c struct capilib_msgidqueue **pp; pp 66 drivers/isdn/capi/capilib.c for (pp = &np->msgidqueue; *pp; pp = &(*pp)->next) { pp 67 drivers/isdn/capi/capilib.c if ((*pp)->msgid == msgid) { pp 68 drivers/isdn/capi/capilib.c struct capilib_msgidqueue *mq = *pp; pp 69 drivers/isdn/capi/capilib.c *pp = mq->next; pp 1085 drivers/macintosh/smu.c struct smu_private *pp; pp 1088 drivers/macintosh/smu.c pp = kzalloc(sizeof(struct smu_private), GFP_KERNEL); pp 1089 drivers/macintosh/smu.c if (pp == 0) pp 1091 drivers/macintosh/smu.c spin_lock_init(&pp->lock); pp 1092 drivers/macintosh/smu.c pp->mode = smu_file_commands; pp 1093 drivers/macintosh/smu.c init_waitqueue_head(&pp->wait); pp 1097 drivers/macintosh/smu.c list_add(&pp->list, &smu_clist); pp 1099 drivers/macintosh/smu.c file->private_data = pp; pp 1108 drivers/macintosh/smu.c struct smu_private *pp = misc; pp 1110 drivers/macintosh/smu.c wake_up_all(&pp->wait); pp 1117 drivers/macintosh/smu.c struct smu_private *pp = file->private_data; pp 1122 drivers/macintosh/smu.c if (pp->busy) pp 1127 drivers/macintosh/smu.c pp->mode = smu_file_events; pp 1139 drivers/macintosh/smu.c else if (pp->mode != smu_file_commands) pp 1144 drivers/macintosh/smu.c spin_lock_irqsave(&pp->lock, flags); pp 1145 drivers/macintosh/smu.c if (pp->busy) { pp 1146 drivers/macintosh/smu.c spin_unlock_irqrestore(&pp->lock, flags); pp 1149 drivers/macintosh/smu.c pp->busy = 1; pp 1150 drivers/macintosh/smu.c pp->cmd.status = 1; pp 1151 drivers/macintosh/smu.c spin_unlock_irqrestore(&pp->lock, flags); pp 1153 drivers/macintosh/smu.c if (copy_from_user(pp->buffer, buf + sizeof(hdr), hdr.data_len)) { pp 1154 drivers/macintosh/smu.c pp->busy = 0; pp 1158 drivers/macintosh/smu.c pp->cmd.cmd = hdr.cmd; pp 1159 drivers/macintosh/smu.c pp->cmd.data_len = hdr.data_len; pp 1160 drivers/macintosh/smu.c pp->cmd.reply_len = SMU_MAX_DATA; pp 1161 drivers/macintosh/smu.c pp->cmd.data_buf = pp->buffer; pp 1162 drivers/macintosh/smu.c pp->cmd.reply_buf = pp->buffer; pp 1163 drivers/macintosh/smu.c pp->cmd.done = smu_user_cmd_done; pp 1164 drivers/macintosh/smu.c pp->cmd.misc = pp; pp 1165 drivers/macintosh/smu.c rc = smu_queue_cmd(&pp->cmd); pp 1172 drivers/macintosh/smu.c static ssize_t smu_read_command(struct file *file, struct smu_private *pp, pp 1180 drivers/macintosh/smu.c if (!pp->busy) pp 1184 drivers/macintosh/smu.c spin_lock_irqsave(&pp->lock, flags); pp 1185 drivers/macintosh/smu.c if (pp->cmd.status == 1) { pp 1187 drivers/macintosh/smu.c spin_unlock_irqrestore(&pp->lock, flags); pp 1190 drivers/macintosh/smu.c add_wait_queue(&pp->wait, &wait); pp 1194 drivers/macintosh/smu.c if (pp->cmd.status != 1) pp 1199 drivers/macintosh/smu.c spin_unlock_irqrestore(&pp->lock, flags); pp 1201 drivers/macintosh/smu.c spin_lock_irqsave(&pp->lock, flags); pp 1204 drivers/macintosh/smu.c remove_wait_queue(&pp->wait, &wait); pp 1206 drivers/macintosh/smu.c spin_unlock_irqrestore(&pp->lock, flags); pp 1209 drivers/macintosh/smu.c if (pp->cmd.status != 0) pp 1210 drivers/macintosh/smu.c pp->cmd.reply_len = 0; pp 1211 drivers/macintosh/smu.c size = sizeof(hdr) + pp->cmd.reply_len; pp 1215 drivers/macintosh/smu.c hdr.status = pp->cmd.status; pp 1216 drivers/macintosh/smu.c hdr.reply_len = pp->cmd.reply_len; pp 1220 drivers/macintosh/smu.c if (size && copy_to_user(buf + sizeof(hdr), pp->buffer, size)) pp 1222 drivers/macintosh/smu.c pp->busy = 0; pp 1228 drivers/macintosh/smu.c static ssize_t smu_read_events(struct file *file, struct smu_private *pp, pp 1240 drivers/macintosh/smu.c struct smu_private *pp = file->private_data; pp 1242 drivers/macintosh/smu.c if (pp->mode == smu_file_commands) pp 1243 drivers/macintosh/smu.c return smu_read_command(file, pp, buf, count); pp 1244 drivers/macintosh/smu.c if (pp->mode == smu_file_events) pp 1245 drivers/macintosh/smu.c return smu_read_events(file, pp, buf, count); pp 1252 drivers/macintosh/smu.c struct smu_private *pp = file->private_data; pp 1256 drivers/macintosh/smu.c if (pp == 0) pp 1259 drivers/macintosh/smu.c if (pp->mode == smu_file_commands) { pp 1260 drivers/macintosh/smu.c poll_wait(file, &pp->wait, wait); pp 1262 drivers/macintosh/smu.c spin_lock_irqsave(&pp->lock, flags); pp 1263 drivers/macintosh/smu.c if (pp->busy && pp->cmd.status != 1) pp 1265 drivers/macintosh/smu.c spin_unlock_irqrestore(&pp->lock, flags); pp 1267 drivers/macintosh/smu.c if (pp->mode == smu_file_events) { pp 1275 drivers/macintosh/smu.c struct smu_private *pp = file->private_data; pp 1279 drivers/macintosh/smu.c if (pp == 0) pp 1285 drivers/macintosh/smu.c spin_lock_irqsave(&pp->lock, flags); pp 1286 drivers/macintosh/smu.c pp->mode = smu_file_closing; pp 1287 drivers/macintosh/smu.c busy = pp->busy; pp 1290 drivers/macintosh/smu.c if (busy && pp->cmd.status == 1) { pp 1293 drivers/macintosh/smu.c add_wait_queue(&pp->wait, &wait); pp 1296 drivers/macintosh/smu.c if (pp->cmd.status != 1) pp 1298 drivers/macintosh/smu.c spin_unlock_irqrestore(&pp->lock, flags); pp 1300 drivers/macintosh/smu.c spin_lock_irqsave(&pp->lock, flags); pp 1303 drivers/macintosh/smu.c remove_wait_queue(&pp->wait, &wait); pp 1305 drivers/macintosh/smu.c spin_unlock_irqrestore(&pp->lock, flags); pp 1308 drivers/macintosh/smu.c list_del(&pp->list); pp 1310 drivers/macintosh/smu.c kfree(pp); pp 2130 drivers/macintosh/via-pmu.c struct pmu_private *pp; pp 2135 drivers/macintosh/via-pmu.c if (len > sizeof(pp->rb_buf[0].data)) pp 2136 drivers/macintosh/via-pmu.c len = sizeof(pp->rb_buf[0].data); pp 2139 drivers/macintosh/via-pmu.c pp = list_entry(list, struct pmu_private, list); pp 2140 drivers/macintosh/via-pmu.c spin_lock(&pp->lock); pp 2141 drivers/macintosh/via-pmu.c i = pp->rb_put + 1; pp 2144 drivers/macintosh/via-pmu.c if (i != pp->rb_get) { pp 2145 drivers/macintosh/via-pmu.c struct rb_entry *rp = &pp->rb_buf[pp->rb_put]; pp 2148 drivers/macintosh/via-pmu.c pp->rb_put = i; pp 2149 drivers/macintosh/via-pmu.c wake_up_interruptible(&pp->wait); pp 2151 drivers/macintosh/via-pmu.c spin_unlock(&pp->lock); pp 2159 drivers/macintosh/via-pmu.c struct pmu_private *pp; pp 2162 drivers/macintosh/via-pmu.c pp = kmalloc(sizeof(struct pmu_private), GFP_KERNEL); pp 2163 drivers/macintosh/via-pmu.c if (!pp) pp 2165 drivers/macintosh/via-pmu.c pp->rb_get = pp->rb_put = 0; pp 2166 drivers/macintosh/via-pmu.c spin_lock_init(&pp->lock); pp 2167 drivers/macintosh/via-pmu.c init_waitqueue_head(&pp->wait); pp 2171 drivers/macintosh/via-pmu.c pp->backlight_locker = 0; pp 2173 drivers/macintosh/via-pmu.c list_add(&pp->list, &all_pmu_pvt); pp 2175 drivers/macintosh/via-pmu.c file->private_data = pp; pp 2184 drivers/macintosh/via-pmu.c struct pmu_private *pp = file->private_data; pp 2189 drivers/macintosh/via-pmu.c if (count < 1 || !pp) pp 2194 drivers/macintosh/via-pmu.c spin_lock_irqsave(&pp->lock, flags); pp 2195 drivers/macintosh/via-pmu.c add_wait_queue(&pp->wait, &wait); pp 2200 drivers/macintosh/via-pmu.c if (pp->rb_get != pp->rb_put) { pp 2201 drivers/macintosh/via-pmu.c int i = pp->rb_get; pp 2202 drivers/macintosh/via-pmu.c struct rb_entry *rp = &pp->rb_buf[i]; pp 2204 drivers/macintosh/via-pmu.c spin_unlock_irqrestore(&pp->lock, flags); pp 2211 drivers/macintosh/via-pmu.c spin_lock_irqsave(&pp->lock, flags); pp 2212 drivers/macintosh/via-pmu.c pp->rb_get = i; pp 2221 drivers/macintosh/via-pmu.c spin_unlock_irqrestore(&pp->lock, flags); pp 2223 drivers/macintosh/via-pmu.c spin_lock_irqsave(&pp->lock, flags); pp 2226 drivers/macintosh/via-pmu.c remove_wait_queue(&pp->wait, &wait); pp 2227 drivers/macintosh/via-pmu.c spin_unlock_irqrestore(&pp->lock, flags); pp 2242 drivers/macintosh/via-pmu.c struct pmu_private *pp = filp->private_data; pp 2246 drivers/macintosh/via-pmu.c if (!pp) pp 2248 drivers/macintosh/via-pmu.c poll_wait(filp, &pp->wait, wait); pp 2249 drivers/macintosh/via-pmu.c spin_lock_irqsave(&pp->lock, flags); pp 2250 drivers/macintosh/via-pmu.c if (pp->rb_get != pp->rb_put) pp 2252 drivers/macintosh/via-pmu.c spin_unlock_irqrestore(&pp->lock, flags); pp 2259 drivers/macintosh/via-pmu.c struct pmu_private *pp = file->private_data; pp 2262 drivers/macintosh/via-pmu.c if (pp) { pp 2265 drivers/macintosh/via-pmu.c list_del(&pp->list); pp 2269 drivers/macintosh/via-pmu.c if (pp->backlight_locker) pp 2273 drivers/macintosh/via-pmu.c kfree(pp); pp 2409 drivers/macintosh/via-pmu.c struct pmu_private *pp = filp->private_data; pp 2411 drivers/macintosh/via-pmu.c if (pp->backlight_locker) pp 2414 drivers/macintosh/via-pmu.c pp->backlight_locker = 1; pp 306 drivers/media/dvb-frontends/cxd2880/cxd2880_dvbt2.h enum cxd2880_dvbt2_pp pp; pp 364 drivers/media/dvb-frontends/cxd2880/cxd2880_dvbt2.h enum cxd2880_dvbt2_pp pp; pp 272 drivers/media/dvb-frontends/cxd2880/cxd2880_tnrdmd_dvbt2_mon.c l1_pre->pp = (enum cxd2880_dvbt2_pp)(data[16] & 0x0f); pp 500 drivers/media/dvb-frontends/cxd2880/cxd2880_tnrdmd_dvbt2_mon.c ofdm->pp = (enum cxd2880_dvbt2_pp)(data[1] & 0x07); pp 351 drivers/media/platform/vicodec/vicodec-core.c u8 **pp, u32 sz) pp 356 drivers/media/platform/vicodec/vicodec-core.c u8 *p = *pp; pp 364 drivers/media/platform/vicodec/vicodec-core.c for (; p < *pp + sz; p++) { pp 368 drivers/media/platform/vicodec/vicodec-core.c *pp + sz - p); pp 371 drivers/media/platform/vicodec/vicodec-core.c p = *pp + sz; pp 375 drivers/media/platform/vicodec/vicodec-core.c if (*pp + sz - p < copy) pp 376 drivers/media/platform/vicodec/vicodec-core.c copy = *pp + sz - p; pp 388 drivers/media/platform/vicodec/vicodec-core.c *pp = p; pp 397 drivers/media/platform/vicodec/vicodec-core.c if (*pp + sz - p < copy) pp 398 drivers/media/platform/vicodec/vicodec-core.c copy = *pp + sz - p; pp 404 drivers/media/platform/vicodec/vicodec-core.c *pp = p; pp 3269 drivers/media/usb/pvrusb2/pvrusb2-hdw.c int pvr2_hdw_get_cropcap(struct pvr2_hdw *hdw, struct v4l2_cropcap *pp) pp 3275 drivers/media/usb/pvrusb2/pvrusb2-hdw.c memcpy(pp, &hdw->cropcap_info, sizeof(hdw->cropcap_info)); pp 796 drivers/mtd/devices/spear_smi.c struct device_node *pp = NULL; pp 815 drivers/mtd/devices/spear_smi.c while ((pp = of_get_next_child(np, pp))) { pp 819 drivers/mtd/devices/spear_smi.c pdata->np[i] = pp; pp 822 drivers/mtd/devices/spear_smi.c addr = of_get_property(pp, "reg", &len); pp 826 drivers/mtd/devices/spear_smi.c if (of_get_property(pp, "st,smi-fast-mode", NULL)) pp 19 drivers/mtd/parsers/ofpart.c static bool node_has_compatible(struct device_node *pp) pp 21 drivers/mtd/parsers/ofpart.c return of_get_property(pp, "compatible", NULL); pp 32 drivers/mtd/parsers/ofpart.c struct device_node *pp; pp 60 drivers/mtd/parsers/ofpart.c for_each_child_of_node(ofpart_node, pp) { pp 61 drivers/mtd/parsers/ofpart.c if (!dedicated && node_has_compatible(pp)) pp 75 drivers/mtd/parsers/ofpart.c for_each_child_of_node(ofpart_node, pp) { pp 80 drivers/mtd/parsers/ofpart.c if (!dedicated && node_has_compatible(pp)) pp 83 drivers/mtd/parsers/ofpart.c reg = of_get_property(pp, "reg", &len); pp 87 drivers/mtd/parsers/ofpart.c master->name, pp, pp 96 drivers/mtd/parsers/ofpart.c a_cells = of_n_addr_cells(pp); pp 97 drivers/mtd/parsers/ofpart.c s_cells = of_n_size_cells(pp); pp 100 drivers/mtd/parsers/ofpart.c master->name, pp, pp 107 drivers/mtd/parsers/ofpart.c parts[i].of_node = pp; pp 109 drivers/mtd/parsers/ofpart.c partname = of_get_property(pp, "label", &len); pp 111 drivers/mtd/parsers/ofpart.c partname = of_get_property(pp, "name", &len); pp 114 drivers/mtd/parsers/ofpart.c if (of_get_property(pp, "read-only", &len)) pp 117 drivers/mtd/parsers/ofpart.c if (of_get_property(pp, "lock", &len)) pp 131 drivers/mtd/parsers/ofpart.c master->name, pp, mtd_node); pp 134 drivers/mtd/parsers/ofpart.c of_node_put(pp); pp 2792 drivers/mtd/spi-nor/spi-nor.c spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, pp 2796 drivers/mtd/spi-nor/spi-nor.c pp->opcode = opcode; pp 2797 drivers/mtd/spi-nor/spi-nor.c pp->proto = proto; pp 2987 drivers/mtd/spi-nor/spi-nor.c const struct spi_nor_pp_command *pp) pp 2989 drivers/mtd/spi-nor/spi-nor.c struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(pp->opcode, 1), pp 2994 drivers/mtd/spi-nor/spi-nor.c op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(pp->proto); pp 2995 drivers/mtd/spi-nor/spi-nor.c op.addr.buswidth = spi_nor_get_protocol_addr_nbits(pp->proto); pp 2996 drivers/mtd/spi-nor/spi-nor.c op.data.buswidth = spi_nor_get_protocol_data_nbits(pp->proto); pp 4199 drivers/mtd/spi-nor/spi-nor.c const struct spi_nor_pp_command *pp; pp 4208 drivers/mtd/spi-nor/spi-nor.c pp = &nor->params.page_programs[cmd]; pp 4209 drivers/mtd/spi-nor/spi-nor.c nor->program_opcode = pp->opcode; pp 4210 drivers/mtd/spi-nor/spi-nor.c nor->write_proto = pp->proto; pp 283 drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c struct ethtool_pauseparam *pp) pp 287 drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c pp->autoneg = pdata->pause_autoneg; pp 288 drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c pp->tx_pause = pdata->tx_pause; pp 289 drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c pp->rx_pause = pdata->rx_pause; pp 293 drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c struct ethtool_pauseparam *pp) pp 303 drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c if (!phy_validate_pause(phydev, pp)) pp 306 drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c pdata->pause_autoneg = pp->autoneg; pp 307 drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c pdata->tx_pause = pp->tx_pause; pp 308 drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c pdata->rx_pause = pp->rx_pause; pp 310 drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c phy_set_asym_pause(phydev, pp->rx_pause, pp->tx_pause); pp 312 drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c if (!pp->autoneg) { pp 317 drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c if (pp->autoneg) pp 320 drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c pdata->tx_pause = pp->tx_pause; pp 321 drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c pdata->rx_pause = pp->rx_pause; pp 1024 drivers/net/ethernet/aurora/nb8800.c struct ethtool_pauseparam *pp) pp 1028 drivers/net/ethernet/aurora/nb8800.c pp->autoneg = priv->pause_aneg; pp 1029 drivers/net/ethernet/aurora/nb8800.c pp->rx_pause = priv->pause_rx; pp 1030 drivers/net/ethernet/aurora/nb8800.c pp->tx_pause = priv->pause_tx; pp 1034 drivers/net/ethernet/aurora/nb8800.c struct ethtool_pauseparam *pp) pp 1039 drivers/net/ethernet/aurora/nb8800.c priv->pause_aneg = pp->autoneg; pp 1040 drivers/net/ethernet/aurora/nb8800.c priv->pause_rx = pp->rx_pause; pp 1041 drivers/net/ethernet/aurora/nb8800.c priv->pause_tx = pp->tx_pause; pp 1504 drivers/net/ethernet/broadcom/b44.c static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset) pp 1507 drivers/net/ethernet/broadcom/b44.c u32 *pattern = (u32 *) pp; pp 2805 drivers/net/ethernet/broadcom/bnxt/bnxt.c struct page_pool_params pp = { 0 }; pp 2807 drivers/net/ethernet/broadcom/bnxt/bnxt.c pp.pool_size = bp->rx_ring_size; pp 2808 drivers/net/ethernet/broadcom/bnxt/bnxt.c pp.nid = dev_to_node(&bp->pdev->dev); pp 2809 drivers/net/ethernet/broadcom/bnxt/bnxt.c pp.dev = &bp->pdev->dev; pp 2810 drivers/net/ethernet/broadcom/bnxt/bnxt.c pp.dma_dir = DMA_BIDIRECTIONAL; pp 2812 drivers/net/ethernet/broadcom/bnxt/bnxt.c rxr->page_pool = page_pool_create(&pp); pp 174 drivers/net/ethernet/cisco/enic/enic.h struct enic_port_profile *pp; pp 1107 drivers/net/ethernet/cisco/enic/enic_main.c struct enic_port_profile *pp; pp 1110 drivers/net/ethernet/cisco/enic/enic_main.c ENIC_PP_BY_INDEX(enic, vf, pp, &err); pp 1116 drivers/net/ethernet/cisco/enic/enic_main.c memcpy(pp->vf_mac, mac, ETH_ALEN); pp 1135 drivers/net/ethernet/cisco/enic/enic_main.c struct enic_port_profile *pp; pp 1138 drivers/net/ethernet/cisco/enic/enic_main.c ENIC_PP_BY_INDEX(enic, vf, pp, &err); pp 1145 drivers/net/ethernet/cisco/enic/enic_main.c memcpy(&prev_pp, pp, sizeof(*enic->pp)); pp 1146 drivers/net/ethernet/cisco/enic/enic_main.c memset(pp, 0, sizeof(*enic->pp)); pp 1148 drivers/net/ethernet/cisco/enic/enic_main.c pp->set |= ENIC_SET_REQUEST; pp 1149 drivers/net/ethernet/cisco/enic/enic_main.c pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]); pp 1152 drivers/net/ethernet/cisco/enic/enic_main.c pp->set |= ENIC_SET_NAME; pp 1153 drivers/net/ethernet/cisco/enic/enic_main.c memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]), pp 1158 drivers/net/ethernet/cisco/enic/enic_main.c pp->set |= ENIC_SET_INSTANCE; pp 1159 drivers/net/ethernet/cisco/enic/enic_main.c memcpy(pp->instance_uuid, pp 1164 drivers/net/ethernet/cisco/enic/enic_main.c pp->set |= ENIC_SET_HOST; pp 1165 drivers/net/ethernet/cisco/enic/enic_main.c memcpy(pp->host_uuid, pp 1172 drivers/net/ethernet/cisco/enic/enic_main.c memcpy(pp->mac_addr, prev_pp.vf_mac, ETH_ALEN); pp 1179 drivers/net/ethernet/cisco/enic/enic_main.c vnic_dev_get_mac_addr, pp->mac_addr); pp 1182 drivers/net/ethernet/cisco/enic/enic_main.c memcpy(pp, &prev_pp, sizeof(*pp)); pp 1193 drivers/net/ethernet/cisco/enic/enic_main.c memcpy(pp, &prev_pp, sizeof(*pp)); pp 1195 drivers/net/ethernet/cisco/enic/enic_main.c memset(pp, 0, sizeof(*pp)); pp 1203 drivers/net/ethernet/cisco/enic/enic_main.c pp->set |= ENIC_PORT_REQUEST_APPLIED; pp 1206 drivers/net/ethernet/cisco/enic/enic_main.c if (pp->request == PORT_REQUEST_DISASSOCIATE) { pp 1207 drivers/net/ethernet/cisco/enic/enic_main.c eth_zero_addr(pp->mac_addr); pp 1214 drivers/net/ethernet/cisco/enic/enic_main.c eth_zero_addr(pp->vf_mac); pp 1224 drivers/net/ethernet/cisco/enic/enic_main.c struct enic_port_profile *pp; pp 1227 drivers/net/ethernet/cisco/enic/enic_main.c ENIC_PP_BY_INDEX(enic, vf, pp, &err); pp 1231 drivers/net/ethernet/cisco/enic/enic_main.c if (!(pp->set & ENIC_PORT_REQUEST_APPLIED)) pp 1234 drivers/net/ethernet/cisco/enic/enic_main.c err = enic_process_get_pp_request(enic, vf, pp->request, &response); pp 1238 drivers/net/ethernet/cisco/enic/enic_main.c if (nla_put_u16(skb, IFLA_PORT_REQUEST, pp->request) || pp 1240 drivers/net/ethernet/cisco/enic/enic_main.c ((pp->set & ENIC_SET_NAME) && pp 1241 drivers/net/ethernet/cisco/enic/enic_main.c nla_put(skb, IFLA_PORT_PROFILE, PORT_PROFILE_MAX, pp->name)) || pp 1242 drivers/net/ethernet/cisco/enic/enic_main.c ((pp->set & ENIC_SET_INSTANCE) && pp 1244 drivers/net/ethernet/cisco/enic/enic_main.c pp->instance_uuid)) || pp 1245 drivers/net/ethernet/cisco/enic/enic_main.c ((pp->set & ENIC_SET_HOST) && pp 1246 drivers/net/ethernet/cisco/enic/enic_main.c nla_put(skb, IFLA_PORT_HOST_UUID, PORT_UUID_MAX, pp->host_uuid))) pp 2814 drivers/net/ethernet/cisco/enic/enic_main.c enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL); pp 2815 drivers/net/ethernet/cisco/enic/enic_main.c if (!enic->pp) { pp 3001 drivers/net/ethernet/cisco/enic/enic_main.c kfree(enic->pp); pp 3041 drivers/net/ethernet/cisco/enic/enic_main.c kfree(enic->pp); pp 72 drivers/net/ethernet/cisco/enic/enic_pp.c struct enic_port_profile *pp; pp 81 drivers/net/ethernet/cisco/enic/enic_pp.c ENIC_PP_BY_INDEX(enic, vf, pp, &err); pp 85 drivers/net/ethernet/cisco/enic/enic_pp.c if (!(pp->set & ENIC_SET_NAME) || !strlen(pp->name)) pp 95 drivers/net/ethernet/cisco/enic/enic_pp.c strlen(pp->name) + 1, pp->name); pp 97 drivers/net/ethernet/cisco/enic/enic_pp.c if (!is_zero_ether_addr(pp->mac_addr)) { pp 98 drivers/net/ethernet/cisco/enic/enic_pp.c client_mac = pp->mac_addr; pp 117 drivers/net/ethernet/cisco/enic/enic_pp.c if (pp->set & ENIC_SET_INSTANCE) { pp 118 drivers/net/ethernet/cisco/enic/enic_pp.c sprintf(uuid_str, "%pUB", pp->instance_uuid); pp 124 drivers/net/ethernet/cisco/enic/enic_pp.c if (pp->set & ENIC_SET_HOST) { pp 125 drivers/net/ethernet/cisco/enic/enic_pp.c sprintf(uuid_str, "%pUB", pp->host_uuid); pp 199 drivers/net/ethernet/cisco/enic/enic_pp.c struct enic_port_profile *pp; pp 202 drivers/net/ethernet/cisco/enic/enic_pp.c ENIC_PP_BY_INDEX(enic, vf, pp, &err); pp 207 drivers/net/ethernet/cisco/enic/enic_pp.c if (!is_zero_ether_addr(pp->mac_addr)) pp 209 drivers/net/ethernet/cisco/enic/enic_pp.c pp->mac_addr); pp 220 drivers/net/ethernet/cisco/enic/enic_pp.c struct enic_port_profile *pp; pp 224 drivers/net/ethernet/cisco/enic/enic_pp.c ENIC_PP_BY_INDEX(enic, vf, pp, &err); pp 228 drivers/net/ethernet/cisco/enic/enic_pp.c if (pp->request != PORT_REQUEST_ASSOCIATE) { pp 246 drivers/net/ethernet/cisco/enic/enic_pp.c if (pp->request != PORT_REQUEST_ASSOCIATE) { pp 260 drivers/net/ethernet/cisco/enic/enic_pp.c struct enic_port_profile *pp; pp 264 drivers/net/ethernet/cisco/enic/enic_pp.c ENIC_PP_BY_INDEX(enic, vf, pp, &err); pp 271 drivers/net/ethernet/cisco/enic/enic_pp.c enic_are_pp_different(prev_pp, pp))) { pp 294 drivers/net/ethernet/cisco/enic/enic_pp.c if (!is_zero_ether_addr(pp->mac_addr)) pp 296 drivers/net/ethernet/cisco/enic/enic_pp.c pp->mac_addr); pp 307 drivers/net/ethernet/cisco/enic/enic_pp.c struct enic_port_profile *pp; pp 310 drivers/net/ethernet/cisco/enic/enic_pp.c ENIC_PP_BY_INDEX(enic, vf, pp, &err); pp 314 drivers/net/ethernet/cisco/enic/enic_pp.c if (pp->request >= enic_pp_handlers_count pp 315 drivers/net/ethernet/cisco/enic/enic_pp.c || !enic_pp_handlers[pp->request]) pp 318 drivers/net/ethernet/cisco/enic/enic_pp.c return enic_pp_handlers[pp->request](enic, vf, prev_pp, restore_pp); pp 22 drivers/net/ethernet/cisco/enic/enic_pp.h #define ENIC_PP_BY_INDEX(enic, vf, pp, err) \ pp 25 drivers/net/ethernet/cisco/enic/enic_pp.h pp = (vf == PORT_SELF_VF) ? enic->pp : enic->pp + vf; \ pp 27 drivers/net/ethernet/cisco/enic/enic_pp.h pp = NULL; \ pp 2148 drivers/net/ethernet/ibm/emac/core.c struct ethtool_pauseparam *pp) pp 2155 drivers/net/ethernet/ibm/emac/core.c pp->autoneg = 1; pp 2159 drivers/net/ethernet/ibm/emac/core.c pp->rx_pause = pp->tx_pause = 1; pp 2161 drivers/net/ethernet/ibm/emac/core.c pp->tx_pause = 1; pp 399 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp; pp 657 drivers/net/ethernet/marvell/mvneta.c static void mvreg_write(struct mvneta_port *pp, u32 offset, u32 data) pp 659 drivers/net/ethernet/marvell/mvneta.c writel(data, pp->base + offset); pp 663 drivers/net/ethernet/marvell/mvneta.c static u32 mvreg_read(struct mvneta_port *pp, u32 offset) pp 665 drivers/net/ethernet/marvell/mvneta.c return readl(pp->base + offset); pp 686 drivers/net/ethernet/marvell/mvneta.c static void mvneta_mib_counters_clear(struct mvneta_port *pp) pp 693 drivers/net/ethernet/marvell/mvneta.c dummy = mvreg_read(pp, (MVNETA_MIB_COUNTERS_BASE + i)); pp 694 drivers/net/ethernet/marvell/mvneta.c dummy = mvreg_read(pp, MVNETA_RX_DISCARD_FRAME_COUNT); pp 695 drivers/net/ethernet/marvell/mvneta.c dummy = mvreg_read(pp, MVNETA_OVERRUN_FRAME_COUNT); pp 703 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(dev); pp 716 drivers/net/ethernet/marvell/mvneta.c cpu_stats = per_cpu_ptr(pp->stats, cpu); pp 752 drivers/net/ethernet/marvell/mvneta.c static void mvneta_rxq_non_occup_desc_add(struct mvneta_port *pp, pp 760 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), pp 766 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), pp 771 drivers/net/ethernet/marvell/mvneta.c static int mvneta_rxq_busy_desc_num_get(struct mvneta_port *pp, pp 776 drivers/net/ethernet/marvell/mvneta.c val = mvreg_read(pp, MVNETA_RXQ_STATUS_REG(rxq->id)); pp 783 drivers/net/ethernet/marvell/mvneta.c static void mvneta_rxq_desc_num_update(struct mvneta_port *pp, pp 792 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); pp 812 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_RXQ_STATUS_UPDATE_REG(rxq->id), val); pp 828 drivers/net/ethernet/marvell/mvneta.c static void mvneta_max_rx_size_set(struct mvneta_port *pp, int max_rx_size) pp 832 drivers/net/ethernet/marvell/mvneta.c val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); pp 836 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); pp 841 drivers/net/ethernet/marvell/mvneta.c static void mvneta_rxq_offset_set(struct mvneta_port *pp, pp 847 drivers/net/ethernet/marvell/mvneta.c val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); pp 852 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); pp 859 drivers/net/ethernet/marvell/mvneta.c static void mvneta_txq_pend_desc_add(struct mvneta_port *pp, pp 870 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); pp 898 drivers/net/ethernet/marvell/mvneta.c static void mvneta_rxq_buf_size_set(struct mvneta_port *pp, pp 904 drivers/net/ethernet/marvell/mvneta.c val = mvreg_read(pp, MVNETA_RXQ_SIZE_REG(rxq->id)); pp 909 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), val); pp 913 drivers/net/ethernet/marvell/mvneta.c static void mvneta_rxq_bm_disable(struct mvneta_port *pp, pp 918 drivers/net/ethernet/marvell/mvneta.c val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); pp 920 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); pp 924 drivers/net/ethernet/marvell/mvneta.c static void mvneta_rxq_bm_enable(struct mvneta_port *pp, pp 929 drivers/net/ethernet/marvell/mvneta.c val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); pp 931 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); pp 935 drivers/net/ethernet/marvell/mvneta.c static void mvneta_rxq_long_pool_set(struct mvneta_port *pp, pp 940 drivers/net/ethernet/marvell/mvneta.c val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); pp 942 drivers/net/ethernet/marvell/mvneta.c val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT); pp 944 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); pp 948 drivers/net/ethernet/marvell/mvneta.c static void mvneta_rxq_short_pool_set(struct mvneta_port *pp, pp 953 drivers/net/ethernet/marvell/mvneta.c val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); pp 955 drivers/net/ethernet/marvell/mvneta.c val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT); pp 957 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); pp 961 drivers/net/ethernet/marvell/mvneta.c static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp, pp 968 drivers/net/ethernet/marvell/mvneta.c dev_warn(pp->dev->dev.parent, pp 974 drivers/net/ethernet/marvell/mvneta.c val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id)); pp 976 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val); pp 980 drivers/net/ethernet/marvell/mvneta.c static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize, pp 986 drivers/net/ethernet/marvell/mvneta.c win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE); pp 988 drivers/net/ethernet/marvell/mvneta.c if (pp->bm_win_id < 0) { pp 992 drivers/net/ethernet/marvell/mvneta.c pp->bm_win_id = i; pp 999 drivers/net/ethernet/marvell/mvneta.c i = pp->bm_win_id; pp 1002 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_WIN_BASE(i), 0); pp 1003 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); pp 1006 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); pp 1008 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) | pp 1011 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000); pp 1013 drivers/net/ethernet/marvell/mvneta.c win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE); pp 1015 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); pp 1018 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); pp 1023 drivers/net/ethernet/marvell/mvneta.c static int mvneta_bm_port_mbus_init(struct mvneta_port *pp) pp 1030 drivers/net/ethernet/marvell/mvneta.c err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize, pp 1035 drivers/net/ethernet/marvell/mvneta.c pp->bm_win_id = -1; pp 1038 drivers/net/ethernet/marvell/mvneta.c err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize, pp 1041 drivers/net/ethernet/marvell/mvneta.c netdev_info(pp->dev, "fail to configure mbus window to BM\n"); pp 1051 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp) pp 1056 drivers/net/ethernet/marvell/mvneta.c if (!pp->neta_armada3700) { pp 1059 drivers/net/ethernet/marvell/mvneta.c ret = mvneta_bm_port_mbus_init(pp); pp 1065 drivers/net/ethernet/marvell/mvneta.c netdev_info(pp->dev, "missing long pool id\n"); pp 1070 drivers/net/ethernet/marvell/mvneta.c pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id, pp 1071 drivers/net/ethernet/marvell/mvneta.c MVNETA_BM_LONG, pp->id, pp 1072 drivers/net/ethernet/marvell/mvneta.c MVNETA_RX_PKT_SIZE(pp->dev->mtu)); pp 1073 drivers/net/ethernet/marvell/mvneta.c if (!pp->pool_long) { pp 1074 drivers/net/ethernet/marvell/mvneta.c netdev_info(pp->dev, "fail to obtain long pool for port\n"); pp 1078 drivers/net/ethernet/marvell/mvneta.c pp->pool_long->port_map |= 1 << pp->id; pp 1080 drivers/net/ethernet/marvell/mvneta.c mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size, pp 1081 drivers/net/ethernet/marvell/mvneta.c pp->pool_long->id); pp 1088 drivers/net/ethernet/marvell/mvneta.c pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id, pp 1089 drivers/net/ethernet/marvell/mvneta.c MVNETA_BM_SHORT, pp->id, pp 1091 drivers/net/ethernet/marvell/mvneta.c if (!pp->pool_short) { pp 1092 drivers/net/ethernet/marvell/mvneta.c netdev_info(pp->dev, "fail to obtain short pool for port\n"); pp 1093 drivers/net/ethernet/marvell/mvneta.c mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); pp 1098 drivers/net/ethernet/marvell/mvneta.c pp->pool_short->port_map |= 1 << pp->id; pp 1099 drivers/net/ethernet/marvell/mvneta.c mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size, pp 1100 drivers/net/ethernet/marvell/mvneta.c pp->pool_short->id); pp 1107 drivers/net/ethernet/marvell/mvneta.c static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu) pp 1109 drivers/net/ethernet/marvell/mvneta.c struct mvneta_bm_pool *bm_pool = pp->pool_long; pp 1114 drivers/net/ethernet/marvell/mvneta.c mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id); pp 1133 drivers/net/ethernet/marvell/mvneta.c mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id); pp 1138 drivers/net/ethernet/marvell/mvneta.c mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); pp 1139 drivers/net/ethernet/marvell/mvneta.c mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id); pp 1141 drivers/net/ethernet/marvell/mvneta.c pp->bm_priv = NULL; pp 1142 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1); pp 1143 drivers/net/ethernet/marvell/mvneta.c netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n"); pp 1147 drivers/net/ethernet/marvell/mvneta.c static void mvneta_port_up(struct mvneta_port *pp) pp 1155 drivers/net/ethernet/marvell/mvneta.c struct mvneta_tx_queue *txq = &pp->txqs[queue]; pp 1159 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_TXQ_CMD, q_map); pp 1164 drivers/net/ethernet/marvell/mvneta.c struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; pp 1169 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_RXQ_CMD, q_map); pp 1173 drivers/net/ethernet/marvell/mvneta.c static void mvneta_port_down(struct mvneta_port *pp) pp 1179 drivers/net/ethernet/marvell/mvneta.c val = mvreg_read(pp, MVNETA_RXQ_CMD) & MVNETA_RXQ_ENABLE_MASK; pp 1183 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_RXQ_CMD, pp 1190 drivers/net/ethernet/marvell/mvneta.c netdev_warn(pp->dev, pp 1197 drivers/net/ethernet/marvell/mvneta.c val = mvreg_read(pp, MVNETA_RXQ_CMD); pp 1203 drivers/net/ethernet/marvell/mvneta.c val = (mvreg_read(pp, MVNETA_TXQ_CMD)) & MVNETA_TXQ_ENABLE_MASK; pp 1206 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_TXQ_CMD, pp 1213 drivers/net/ethernet/marvell/mvneta.c netdev_warn(pp->dev, pp 1221 drivers/net/ethernet/marvell/mvneta.c val = mvreg_read(pp, MVNETA_TXQ_CMD); pp 1229 drivers/net/ethernet/marvell/mvneta.c netdev_warn(pp->dev, pp 1236 drivers/net/ethernet/marvell/mvneta.c val = mvreg_read(pp, MVNETA_PORT_STATUS); pp 1244 drivers/net/ethernet/marvell/mvneta.c static void mvneta_port_enable(struct mvneta_port *pp) pp 1249 drivers/net/ethernet/marvell/mvneta.c val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); pp 1251 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); pp 1255 drivers/net/ethernet/marvell/mvneta.c static void mvneta_port_disable(struct mvneta_port *pp) pp 1260 drivers/net/ethernet/marvell/mvneta.c val = mvreg_read(pp, MVNETA_GMAC_CTRL_0); pp 1262 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_GMAC_CTRL_0, val); pp 1270 drivers/net/ethernet/marvell/mvneta.c static void mvneta_set_ucast_table(struct mvneta_port *pp, int queue) pp 1283 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_DA_FILT_UCAST_BASE + offset, val); pp 1287 drivers/net/ethernet/marvell/mvneta.c static void mvneta_set_special_mcast_table(struct mvneta_port *pp, int queue) pp 1300 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + offset, val); pp 1305 drivers/net/ethernet/marvell/mvneta.c static void mvneta_set_other_mcast_table(struct mvneta_port *pp, int queue) pp 1311 drivers/net/ethernet/marvell/mvneta.c memset(pp->mcast_count, 0, sizeof(pp->mcast_count)); pp 1314 drivers/net/ethernet/marvell/mvneta.c memset(pp->mcast_count, 1, sizeof(pp->mcast_count)); pp 1320 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); pp 1325 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = arg; pp 1330 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_INTR_NEW_MASK, pp 1338 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = arg; pp 1343 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); pp 1344 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); pp 1345 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); pp 1350 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = arg; pp 1355 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); pp 1356 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); pp 1357 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); pp 1369 drivers/net/ethernet/marvell/mvneta.c static void mvneta_defaults_set(struct mvneta_port *pp) pp 1377 drivers/net/ethernet/marvell/mvneta.c on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true); pp 1380 drivers/net/ethernet/marvell/mvneta.c on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); pp 1381 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_INTR_ENABLE, 0); pp 1384 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); pp 1394 drivers/net/ethernet/marvell/mvneta.c if (!pp->neta_armada3700) { pp 1408 drivers/net/ethernet/marvell/mvneta.c txq_map = (cpu == pp->rxq_def) ? pp 1416 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); pp 1420 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); pp 1421 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); pp 1424 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_TXQ_CMD_1, 0); pp 1426 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(queue), 0); pp 1427 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(queue), 0); pp 1430 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); pp 1431 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); pp 1434 drivers/net/ethernet/marvell/mvneta.c if (pp->bm_priv) pp 1440 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_ACC_MODE, val); pp 1442 drivers/net/ethernet/marvell/mvneta.c if (pp->bm_priv) pp 1443 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr); pp 1446 drivers/net/ethernet/marvell/mvneta.c val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); pp 1447 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_PORT_CONFIG, val); pp 1450 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_PORT_CONFIG_EXTEND, val); pp 1451 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_RX_MIN_FRAME_SIZE, 64); pp 1466 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_SDMA_CONFIG, val); pp 1471 drivers/net/ethernet/marvell/mvneta.c val = mvreg_read(pp, MVNETA_UNIT_CONTROL); pp 1473 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_UNIT_CONTROL, val); pp 1475 drivers/net/ethernet/marvell/mvneta.c mvneta_set_ucast_table(pp, -1); pp 1476 drivers/net/ethernet/marvell/mvneta.c mvneta_set_special_mcast_table(pp, -1); pp 1477 drivers/net/ethernet/marvell/mvneta.c mvneta_set_other_mcast_table(pp, -1); pp 1480 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_INTR_ENABLE, pp 1484 drivers/net/ethernet/marvell/mvneta.c mvneta_mib_counters_clear(pp); pp 1488 drivers/net/ethernet/marvell/mvneta.c static void mvneta_txq_max_tx_size_set(struct mvneta_port *pp, int max_tx_size) pp 1499 drivers/net/ethernet/marvell/mvneta.c val = mvreg_read(pp, MVNETA_TX_MTU); pp 1502 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_TX_MTU, val); pp 1505 drivers/net/ethernet/marvell/mvneta.c val = mvreg_read(pp, MVNETA_TX_TOKEN_SIZE); pp 1512 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_TX_TOKEN_SIZE, val); pp 1515 drivers/net/ethernet/marvell/mvneta.c val = mvreg_read(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue)); pp 1522 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_TXQ_TOKEN_SIZE_REG(queue), val); pp 1528 drivers/net/ethernet/marvell/mvneta.c static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble, pp 1544 drivers/net/ethernet/marvell/mvneta.c unicast_reg = mvreg_read(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset)); pp 1554 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, (MVNETA_DA_FILT_UCAST_BASE + tbl_offset), unicast_reg); pp 1558 drivers/net/ethernet/marvell/mvneta.c static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr, pp 1569 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_MAC_ADDR_LOW, mac_l); pp 1570 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, mac_h); pp 1574 drivers/net/ethernet/marvell/mvneta.c mvneta_set_ucast_addr(pp, addr[5], queue); pp 1580 drivers/net/ethernet/marvell/mvneta.c static void mvneta_rx_pkts_coal_set(struct mvneta_port *pp, pp 1583 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_RXQ_THRESHOLD_REG(rxq->id), pp 1590 drivers/net/ethernet/marvell/mvneta.c static void mvneta_rx_time_coal_set(struct mvneta_port *pp, pp 1596 drivers/net/ethernet/marvell/mvneta.c clk_rate = clk_get_rate(pp->clk); pp 1599 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_RXQ_TIME_COAL_REG(rxq->id), val); pp 1603 drivers/net/ethernet/marvell/mvneta.c static void mvneta_tx_done_pkts_coal_set(struct mvneta_port *pp, pp 1608 drivers/net/ethernet/marvell/mvneta.c val = mvreg_read(pp, MVNETA_TXQ_SIZE_REG(txq->id)); pp 1613 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), val); pp 1629 drivers/net/ethernet/marvell/mvneta.c static void mvneta_txq_sent_desc_dec(struct mvneta_port *pp, pp 1638 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); pp 1643 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val); pp 1647 drivers/net/ethernet/marvell/mvneta.c static int mvneta_txq_sent_desc_num_get(struct mvneta_port *pp, pp 1653 drivers/net/ethernet/marvell/mvneta.c val = mvreg_read(pp, MVNETA_TXQ_STATUS_REG(txq->id)); pp 1663 drivers/net/ethernet/marvell/mvneta.c static int mvneta_txq_sent_desc_proc(struct mvneta_port *pp, pp 1669 drivers/net/ethernet/marvell/mvneta.c sent_desc = mvneta_txq_sent_desc_num_get(pp, txq); pp 1673 drivers/net/ethernet/marvell/mvneta.c mvneta_txq_sent_desc_dec(pp, txq, sent_desc); pp 1708 drivers/net/ethernet/marvell/mvneta.c static void mvneta_rx_error(struct mvneta_port *pp, pp 1711 drivers/net/ethernet/marvell/mvneta.c struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); pp 1721 drivers/net/ethernet/marvell/mvneta.c netdev_err(pp->dev, "bad rx status %08x (crc error), size=%d\n", pp 1725 drivers/net/ethernet/marvell/mvneta.c netdev_err(pp->dev, "bad rx status %08x (overrun error), size=%d\n", pp 1729 drivers/net/ethernet/marvell/mvneta.c netdev_err(pp->dev, "bad rx status %08x (max frame length error), size=%d\n", pp 1733 drivers/net/ethernet/marvell/mvneta.c netdev_err(pp->dev, "bad rx status %08x (resource error), size=%d\n", pp 1740 drivers/net/ethernet/marvell/mvneta.c static void mvneta_rx_csum(struct mvneta_port *pp, u32 status, pp 1743 drivers/net/ethernet/marvell/mvneta.c if ((pp->dev->features & NETIF_F_RXCSUM) && pp 1758 drivers/net/ethernet/marvell/mvneta.c static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp, pp 1763 drivers/net/ethernet/marvell/mvneta.c return &pp->txqs[queue]; pp 1767 drivers/net/ethernet/marvell/mvneta.c static void mvneta_txq_bufs_free(struct mvneta_port *pp, pp 1787 drivers/net/ethernet/marvell/mvneta.c dma_unmap_single(pp->dev->dev.parent, pp 1799 drivers/net/ethernet/marvell/mvneta.c static void mvneta_txq_done(struct mvneta_port *pp, pp 1802 drivers/net/ethernet/marvell/mvneta.c struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); pp 1805 drivers/net/ethernet/marvell/mvneta.c tx_done = mvneta_txq_sent_desc_proc(pp, txq); pp 1809 drivers/net/ethernet/marvell/mvneta.c mvneta_txq_bufs_free(pp, txq, tx_done, nq); pp 1821 drivers/net/ethernet/marvell/mvneta.c static int mvneta_rx_refill(struct mvneta_port *pp, pp 1834 drivers/net/ethernet/marvell/mvneta.c phys_addr = dma_map_page(pp->dev->dev.parent, page, 0, PAGE_SIZE, pp 1836 drivers/net/ethernet/marvell/mvneta.c if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) { pp 1841 drivers/net/ethernet/marvell/mvneta.c phys_addr += pp->rx_offset_correction; pp 1847 drivers/net/ethernet/marvell/mvneta.c static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb) pp 1878 drivers/net/ethernet/marvell/mvneta.c static void mvneta_rxq_drop_pkts(struct mvneta_port *pp, pp 1883 drivers/net/ethernet/marvell/mvneta.c rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); pp 1885 drivers/net/ethernet/marvell/mvneta.c mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); pp 1887 drivers/net/ethernet/marvell/mvneta.c if (pp->bm_priv) { pp 1894 drivers/net/ethernet/marvell/mvneta.c bm_pool = &pp->bm_priv->bm_pools[pool_id]; pp 1896 drivers/net/ethernet/marvell/mvneta.c mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, pp 1908 drivers/net/ethernet/marvell/mvneta.c dma_unmap_page(pp->dev->dev.parent, rx_desc->buf_phys_addr, pp 1915 drivers/net/ethernet/marvell/mvneta.c int mvneta_rx_refill_queue(struct mvneta_port *pp, struct mvneta_rx_queue *rxq) pp 1924 drivers/net/ethernet/marvell/mvneta.c if (mvneta_rx_refill(pp, rx_desc, rxq, GFP_ATOMIC)) { pp 1941 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp, int budget, pp 1944 drivers/net/ethernet/marvell/mvneta.c struct net_device *dev = pp->dev; pp 1951 drivers/net/ethernet/marvell/mvneta.c rx_todo = mvneta_rxq_busy_desc_num_get(pp, rxq); pp 1978 drivers/net/ethernet/marvell/mvneta.c mvneta_rx_error(pp, rx_desc); pp 1989 drivers/net/ethernet/marvell/mvneta.c struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); pp 2010 drivers/net/ethernet/marvell/mvneta.c mvneta_rx_csum(pp, rx_status, rxq->skb); pp 2095 drivers/net/ethernet/marvell/mvneta.c struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); pp 2104 drivers/net/ethernet/marvell/mvneta.c refill = mvneta_rx_refill_queue(pp, rxq); pp 2107 drivers/net/ethernet/marvell/mvneta.c mvneta_rxq_desc_num_update(pp, rxq, rx_proc, refill); pp 2114 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp, int rx_todo, pp 2117 drivers/net/ethernet/marvell/mvneta.c struct net_device *dev = pp->dev; pp 2123 drivers/net/ethernet/marvell/mvneta.c rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); pp 2147 drivers/net/ethernet/marvell/mvneta.c bm_pool = &pp->bm_priv->bm_pools[pool_id]; pp 2153 drivers/net/ethernet/marvell/mvneta.c mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, pp 2156 drivers/net/ethernet/marvell/mvneta.c mvneta_rx_error(pp, rx_desc); pp 2167 drivers/net/ethernet/marvell/mvneta.c dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev, pp 2176 drivers/net/ethernet/marvell/mvneta.c mvneta_rx_csum(pp, rx_status, skb); pp 2183 drivers/net/ethernet/marvell/mvneta.c mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, pp 2205 drivers/net/ethernet/marvell/mvneta.c dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr, pp 2219 drivers/net/ethernet/marvell/mvneta.c mvneta_rx_csum(pp, rx_status, skb); pp 2225 drivers/net/ethernet/marvell/mvneta.c struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); pp 2234 drivers/net/ethernet/marvell/mvneta.c mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); pp 2241 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp, struct mvneta_tx_queue *txq) pp 2249 drivers/net/ethernet/marvell/mvneta.c tx_desc->command = mvneta_skb_tx_csum(pp, skb); pp 2293 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(dev); pp 2322 drivers/net/ethernet/marvell/mvneta.c mvneta_tso_put_hdr(skb, pp, txq); pp 2350 drivers/net/ethernet/marvell/mvneta.c dma_unmap_single(pp->dev->dev.parent, pp 2360 drivers/net/ethernet/marvell/mvneta.c static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb, pp 2374 drivers/net/ethernet/marvell/mvneta.c dma_map_single(pp->dev->dev.parent, addr, pp 2377 drivers/net/ethernet/marvell/mvneta.c if (dma_mapping_error(pp->dev->dev.parent, pp 2403 drivers/net/ethernet/marvell/mvneta.c dma_unmap_single(pp->dev->dev.parent, pp 2416 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(dev); pp 2418 drivers/net/ethernet/marvell/mvneta.c struct mvneta_tx_queue *txq = &pp->txqs[txq_id]; pp 2437 drivers/net/ethernet/marvell/mvneta.c tx_cmd = mvneta_skb_tx_csum(pp, skb); pp 2464 drivers/net/ethernet/marvell/mvneta.c if (mvneta_tx_frag_process(pp, skb, txq)) { pp 2477 drivers/net/ethernet/marvell/mvneta.c struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); pp 2488 drivers/net/ethernet/marvell/mvneta.c mvneta_txq_pend_desc_add(pp, txq, frags); pp 2506 drivers/net/ethernet/marvell/mvneta.c static void mvneta_txq_done_force(struct mvneta_port *pp, pp 2510 drivers/net/ethernet/marvell/mvneta.c struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); pp 2513 drivers/net/ethernet/marvell/mvneta.c mvneta_txq_bufs_free(pp, txq, tx_done, nq); pp 2524 drivers/net/ethernet/marvell/mvneta.c static void mvneta_tx_done_gbe(struct mvneta_port *pp, u32 cause_tx_done) pp 2531 drivers/net/ethernet/marvell/mvneta.c txq = mvneta_tx_done_policy(pp, cause_tx_done); pp 2533 drivers/net/ethernet/marvell/mvneta.c nq = netdev_get_tx_queue(pp->dev, txq->id); pp 2537 drivers/net/ethernet/marvell/mvneta.c mvneta_txq_done(pp, txq); pp 2572 drivers/net/ethernet/marvell/mvneta.c static void mvneta_set_special_mcast_addr(struct mvneta_port *pp, pp 2585 drivers/net/ethernet/marvell/mvneta.c smc_table_reg = mvreg_read(pp, (MVNETA_DA_FILT_SPEC_MCAST pp 2595 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_DA_FILT_SPEC_MCAST + tbl_offset * 4, pp 2607 drivers/net/ethernet/marvell/mvneta.c static void mvneta_set_other_mcast_addr(struct mvneta_port *pp, pp 2618 drivers/net/ethernet/marvell/mvneta.c omc_table_reg = mvreg_read(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset); pp 2628 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + tbl_offset, omc_table_reg); pp 2640 drivers/net/ethernet/marvell/mvneta.c static int mvneta_mcast_addr_set(struct mvneta_port *pp, unsigned char *p_addr, pp 2646 drivers/net/ethernet/marvell/mvneta.c mvneta_set_special_mcast_addr(pp, p_addr[5], queue); pp 2652 drivers/net/ethernet/marvell/mvneta.c if (pp->mcast_count[crc_result] == 0) { pp 2653 drivers/net/ethernet/marvell/mvneta.c netdev_info(pp->dev, "No valid Mcast for crc8=0x%02x\n", pp 2658 drivers/net/ethernet/marvell/mvneta.c pp->mcast_count[crc_result]--; pp 2659 drivers/net/ethernet/marvell/mvneta.c if (pp->mcast_count[crc_result] != 0) { pp 2660 drivers/net/ethernet/marvell/mvneta.c netdev_info(pp->dev, pp 2662 drivers/net/ethernet/marvell/mvneta.c pp->mcast_count[crc_result], crc_result); pp 2666 drivers/net/ethernet/marvell/mvneta.c pp->mcast_count[crc_result]++; pp 2668 drivers/net/ethernet/marvell/mvneta.c mvneta_set_other_mcast_addr(pp, crc_result, queue); pp 2674 drivers/net/ethernet/marvell/mvneta.c static void mvneta_rx_unicast_promisc_set(struct mvneta_port *pp, pp 2679 drivers/net/ethernet/marvell/mvneta.c port_cfg_reg = mvreg_read(pp, MVNETA_PORT_CONFIG); pp 2681 drivers/net/ethernet/marvell/mvneta.c val = mvreg_read(pp, MVNETA_TYPE_PRIO); pp 2688 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_MAC_ADDR_LOW, 0xffff); pp 2689 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_MAC_ADDR_HIGH, 0xffffffff); pp 2696 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_PORT_CONFIG, port_cfg_reg); pp 2697 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_TYPE_PRIO, val); pp 2703 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(dev); pp 2708 drivers/net/ethernet/marvell/mvneta.c mvneta_rx_unicast_promisc_set(pp, 1); pp 2709 drivers/net/ethernet/marvell/mvneta.c mvneta_set_ucast_table(pp, pp->rxq_def); pp 2710 drivers/net/ethernet/marvell/mvneta.c mvneta_set_special_mcast_table(pp, pp->rxq_def); pp 2711 drivers/net/ethernet/marvell/mvneta.c mvneta_set_other_mcast_table(pp, pp->rxq_def); pp 2714 drivers/net/ethernet/marvell/mvneta.c mvneta_rx_unicast_promisc_set(pp, 0); pp 2715 drivers/net/ethernet/marvell/mvneta.c mvneta_set_ucast_table(pp, -1); pp 2716 drivers/net/ethernet/marvell/mvneta.c mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def); pp 2720 drivers/net/ethernet/marvell/mvneta.c mvneta_set_special_mcast_table(pp, pp->rxq_def); pp 2721 drivers/net/ethernet/marvell/mvneta.c mvneta_set_other_mcast_table(pp, pp->rxq_def); pp 2724 drivers/net/ethernet/marvell/mvneta.c mvneta_set_special_mcast_table(pp, -1); pp 2725 drivers/net/ethernet/marvell/mvneta.c mvneta_set_other_mcast_table(pp, -1); pp 2729 drivers/net/ethernet/marvell/mvneta.c mvneta_mcast_addr_set(pp, ha->addr, pp 2730 drivers/net/ethernet/marvell/mvneta.c pp->rxq_def); pp 2740 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = (struct mvneta_port *)dev_id; pp 2742 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); pp 2743 drivers/net/ethernet/marvell/mvneta.c napi_schedule(&pp->napi); pp 2753 drivers/net/ethernet/marvell/mvneta.c disable_percpu_irq(port->pp->dev->irq); pp 2759 drivers/net/ethernet/marvell/mvneta.c static void mvneta_link_change(struct mvneta_port *pp) pp 2761 drivers/net/ethernet/marvell/mvneta.c u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); pp 2763 drivers/net/ethernet/marvell/mvneta.c phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP)); pp 2778 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(napi->dev); pp 2779 drivers/net/ethernet/marvell/mvneta.c struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); pp 2781 drivers/net/ethernet/marvell/mvneta.c if (!netif_running(pp->dev)) { pp 2787 drivers/net/ethernet/marvell/mvneta.c cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE); pp 2789 drivers/net/ethernet/marvell/mvneta.c u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE); pp 2791 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); pp 2795 drivers/net/ethernet/marvell/mvneta.c mvneta_link_change(pp); pp 2800 drivers/net/ethernet/marvell/mvneta.c mvneta_tx_done_gbe(pp, (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL)); pp 2807 drivers/net/ethernet/marvell/mvneta.c cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx : pp 2813 drivers/net/ethernet/marvell/mvneta.c if (pp->bm_priv) pp 2814 drivers/net/ethernet/marvell/mvneta.c rx_done = mvneta_rx_hwbm(napi, pp, budget, pp 2815 drivers/net/ethernet/marvell/mvneta.c &pp->rxqs[rx_queue]); pp 2817 drivers/net/ethernet/marvell/mvneta.c rx_done = mvneta_rx_swbm(napi, pp, budget, pp 2818 drivers/net/ethernet/marvell/mvneta.c &pp->rxqs[rx_queue]); pp 2825 drivers/net/ethernet/marvell/mvneta.c if (pp->neta_armada3700) { pp 2829 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_INTR_NEW_MASK, pp 2835 drivers/net/ethernet/marvell/mvneta.c enable_percpu_irq(pp->dev->irq, 0); pp 2839 drivers/net/ethernet/marvell/mvneta.c if (pp->neta_armada3700) pp 2840 drivers/net/ethernet/marvell/mvneta.c pp->cause_rx_tx = cause_rx_tx; pp 2848 drivers/net/ethernet/marvell/mvneta.c static int mvneta_rxq_fill(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, pp 2855 drivers/net/ethernet/marvell/mvneta.c if (mvneta_rx_refill(pp, rxq->descs + i, rxq, pp 2857 drivers/net/ethernet/marvell/mvneta.c netdev_err(pp->dev, pp 2867 drivers/net/ethernet/marvell/mvneta.c mvneta_rxq_non_occup_desc_add(pp, rxq, i); pp 2873 drivers/net/ethernet/marvell/mvneta.c static void mvneta_tx_reset(struct mvneta_port *pp) pp 2879 drivers/net/ethernet/marvell/mvneta.c mvneta_txq_done_force(pp, &pp->txqs[queue]); pp 2881 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_PORT_TX_RESET, MVNETA_PORT_TX_DMA_RESET); pp 2882 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_PORT_TX_RESET, 0); pp 2885 drivers/net/ethernet/marvell/mvneta.c static void mvneta_rx_reset(struct mvneta_port *pp) pp 2887 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); pp 2888 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); pp 2893 drivers/net/ethernet/marvell/mvneta.c static int mvneta_rxq_sw_init(struct mvneta_port *pp, pp 2896 drivers/net/ethernet/marvell/mvneta.c rxq->size = pp->rx_ring_size; pp 2899 drivers/net/ethernet/marvell/mvneta.c rxq->descs = dma_alloc_coherent(pp->dev->dev.parent, pp 2910 drivers/net/ethernet/marvell/mvneta.c static void mvneta_rxq_hw_init(struct mvneta_port *pp, pp 2914 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys); pp 2915 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size); pp 2918 drivers/net/ethernet/marvell/mvneta.c mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); pp 2919 drivers/net/ethernet/marvell/mvneta.c mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); pp 2921 drivers/net/ethernet/marvell/mvneta.c if (!pp->bm_priv) { pp 2923 drivers/net/ethernet/marvell/mvneta.c mvneta_rxq_offset_set(pp, rxq, 0); pp 2924 drivers/net/ethernet/marvell/mvneta.c mvneta_rxq_buf_size_set(pp, rxq, PAGE_SIZE < SZ_64K ? pp 2926 drivers/net/ethernet/marvell/mvneta.c MVNETA_RX_BUF_SIZE(pp->pkt_size)); pp 2927 drivers/net/ethernet/marvell/mvneta.c mvneta_rxq_bm_disable(pp, rxq); pp 2928 drivers/net/ethernet/marvell/mvneta.c mvneta_rxq_fill(pp, rxq, rxq->size); pp 2931 drivers/net/ethernet/marvell/mvneta.c mvneta_rxq_offset_set(pp, rxq, pp 2932 drivers/net/ethernet/marvell/mvneta.c NET_SKB_PAD - pp->rx_offset_correction); pp 2934 drivers/net/ethernet/marvell/mvneta.c mvneta_rxq_bm_enable(pp, rxq); pp 2936 drivers/net/ethernet/marvell/mvneta.c mvneta_rxq_long_pool_set(pp, rxq); pp 2937 drivers/net/ethernet/marvell/mvneta.c mvneta_rxq_short_pool_set(pp, rxq); pp 2938 drivers/net/ethernet/marvell/mvneta.c mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size); pp 2943 drivers/net/ethernet/marvell/mvneta.c static int mvneta_rxq_init(struct mvneta_port *pp, pp 2949 drivers/net/ethernet/marvell/mvneta.c ret = mvneta_rxq_sw_init(pp, rxq); pp 2953 drivers/net/ethernet/marvell/mvneta.c mvneta_rxq_hw_init(pp, rxq); pp 2959 drivers/net/ethernet/marvell/mvneta.c static void mvneta_rxq_deinit(struct mvneta_port *pp, pp 2962 drivers/net/ethernet/marvell/mvneta.c mvneta_rxq_drop_pkts(pp, rxq); pp 2968 drivers/net/ethernet/marvell/mvneta.c dma_free_coherent(pp->dev->dev.parent, pp 2983 drivers/net/ethernet/marvell/mvneta.c static int mvneta_txq_sw_init(struct mvneta_port *pp, pp 2988 drivers/net/ethernet/marvell/mvneta.c txq->size = pp->tx_ring_size; pp 2998 drivers/net/ethernet/marvell/mvneta.c txq->descs = dma_alloc_coherent(pp->dev->dev.parent, pp 3009 drivers/net/ethernet/marvell/mvneta.c dma_free_coherent(pp->dev->dev.parent, pp 3016 drivers/net/ethernet/marvell/mvneta.c txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent, pp 3021 drivers/net/ethernet/marvell/mvneta.c dma_free_coherent(pp->dev->dev.parent, pp 3031 drivers/net/ethernet/marvell/mvneta.c cpu = pp->rxq_def % num_present_cpus(); pp 3033 drivers/net/ethernet/marvell/mvneta.c netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id); pp 3038 drivers/net/ethernet/marvell/mvneta.c static void mvneta_txq_hw_init(struct mvneta_port *pp, pp 3042 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff); pp 3043 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff); pp 3046 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys); pp 3047 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size); pp 3049 drivers/net/ethernet/marvell/mvneta.c mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); pp 3053 drivers/net/ethernet/marvell/mvneta.c static int mvneta_txq_init(struct mvneta_port *pp, pp 3058 drivers/net/ethernet/marvell/mvneta.c ret = mvneta_txq_sw_init(pp, txq); pp 3062 drivers/net/ethernet/marvell/mvneta.c mvneta_txq_hw_init(pp, txq); pp 3068 drivers/net/ethernet/marvell/mvneta.c static void mvneta_txq_sw_deinit(struct mvneta_port *pp, pp 3071 drivers/net/ethernet/marvell/mvneta.c struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id); pp 3076 drivers/net/ethernet/marvell/mvneta.c dma_free_coherent(pp->dev->dev.parent, pp 3080 drivers/net/ethernet/marvell/mvneta.c dma_free_coherent(pp->dev->dev.parent, pp 3092 drivers/net/ethernet/marvell/mvneta.c static void mvneta_txq_hw_deinit(struct mvneta_port *pp, pp 3096 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0); pp 3097 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0); pp 3100 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), 0); pp 3101 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0); pp 3104 drivers/net/ethernet/marvell/mvneta.c static void mvneta_txq_deinit(struct mvneta_port *pp, pp 3107 drivers/net/ethernet/marvell/mvneta.c mvneta_txq_sw_deinit(pp, txq); pp 3108 drivers/net/ethernet/marvell/mvneta.c mvneta_txq_hw_deinit(pp, txq); pp 3112 drivers/net/ethernet/marvell/mvneta.c static void mvneta_cleanup_txqs(struct mvneta_port *pp) pp 3117 drivers/net/ethernet/marvell/mvneta.c mvneta_txq_deinit(pp, &pp->txqs[queue]); pp 3121 drivers/net/ethernet/marvell/mvneta.c static void mvneta_cleanup_rxqs(struct mvneta_port *pp) pp 3126 drivers/net/ethernet/marvell/mvneta.c mvneta_rxq_deinit(pp, &pp->rxqs[queue]); pp 3131 drivers/net/ethernet/marvell/mvneta.c static int mvneta_setup_rxqs(struct mvneta_port *pp) pp 3136 drivers/net/ethernet/marvell/mvneta.c int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); pp 3139 drivers/net/ethernet/marvell/mvneta.c netdev_err(pp->dev, "%s: can't create rxq=%d\n", pp 3141 drivers/net/ethernet/marvell/mvneta.c mvneta_cleanup_rxqs(pp); pp 3150 drivers/net/ethernet/marvell/mvneta.c static int mvneta_setup_txqs(struct mvneta_port *pp) pp 3155 drivers/net/ethernet/marvell/mvneta.c int err = mvneta_txq_init(pp, &pp->txqs[queue]); pp 3157 drivers/net/ethernet/marvell/mvneta.c netdev_err(pp->dev, "%s: can't create txq=%d\n", pp 3159 drivers/net/ethernet/marvell/mvneta.c mvneta_cleanup_txqs(pp); pp 3167 drivers/net/ethernet/marvell/mvneta.c static int mvneta_comphy_init(struct mvneta_port *pp) pp 3171 drivers/net/ethernet/marvell/mvneta.c if (!pp->comphy) pp 3174 drivers/net/ethernet/marvell/mvneta.c ret = phy_set_mode_ext(pp->comphy, PHY_MODE_ETHERNET, pp 3175 drivers/net/ethernet/marvell/mvneta.c pp->phy_interface); pp 3179 drivers/net/ethernet/marvell/mvneta.c return phy_power_on(pp->comphy); pp 3182 drivers/net/ethernet/marvell/mvneta.c static void mvneta_start_dev(struct mvneta_port *pp) pp 3186 drivers/net/ethernet/marvell/mvneta.c WARN_ON(mvneta_comphy_init(pp)); pp 3188 drivers/net/ethernet/marvell/mvneta.c mvneta_max_rx_size_set(pp, pp->pkt_size); pp 3189 drivers/net/ethernet/marvell/mvneta.c mvneta_txq_max_tx_size_set(pp, pp->pkt_size); pp 3192 drivers/net/ethernet/marvell/mvneta.c mvneta_port_enable(pp); pp 3194 drivers/net/ethernet/marvell/mvneta.c if (!pp->neta_armada3700) { pp 3198 drivers/net/ethernet/marvell/mvneta.c per_cpu_ptr(pp->ports, cpu); pp 3203 drivers/net/ethernet/marvell/mvneta.c napi_enable(&pp->napi); pp 3207 drivers/net/ethernet/marvell/mvneta.c on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); pp 3209 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_INTR_MISC_MASK, pp 3213 drivers/net/ethernet/marvell/mvneta.c phylink_start(pp->phylink); pp 3214 drivers/net/ethernet/marvell/mvneta.c netif_tx_start_all_queues(pp->dev); pp 3217 drivers/net/ethernet/marvell/mvneta.c static void mvneta_stop_dev(struct mvneta_port *pp) pp 3221 drivers/net/ethernet/marvell/mvneta.c phylink_stop(pp->phylink); pp 3223 drivers/net/ethernet/marvell/mvneta.c if (!pp->neta_armada3700) { pp 3226 drivers/net/ethernet/marvell/mvneta.c per_cpu_ptr(pp->ports, cpu); pp 3231 drivers/net/ethernet/marvell/mvneta.c napi_disable(&pp->napi); pp 3234 drivers/net/ethernet/marvell/mvneta.c netif_carrier_off(pp->dev); pp 3236 drivers/net/ethernet/marvell/mvneta.c mvneta_port_down(pp); pp 3237 drivers/net/ethernet/marvell/mvneta.c netif_tx_stop_all_queues(pp->dev); pp 3240 drivers/net/ethernet/marvell/mvneta.c mvneta_port_disable(pp); pp 3243 drivers/net/ethernet/marvell/mvneta.c on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true); pp 3246 drivers/net/ethernet/marvell/mvneta.c on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); pp 3248 drivers/net/ethernet/marvell/mvneta.c mvneta_tx_reset(pp); pp 3249 drivers/net/ethernet/marvell/mvneta.c mvneta_rx_reset(pp); pp 3251 drivers/net/ethernet/marvell/mvneta.c WARN_ON(phy_power_off(pp->comphy)); pp 3256 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = arg; pp 3258 drivers/net/ethernet/marvell/mvneta.c enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE); pp 3263 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = arg; pp 3265 drivers/net/ethernet/marvell/mvneta.c disable_percpu_irq(pp->dev->irq); pp 3271 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(dev); pp 3283 drivers/net/ethernet/marvell/mvneta.c if (pp->bm_priv) pp 3284 drivers/net/ethernet/marvell/mvneta.c mvneta_bm_update_mtu(pp, mtu); pp 3293 drivers/net/ethernet/marvell/mvneta.c mvneta_stop_dev(pp); pp 3294 drivers/net/ethernet/marvell/mvneta.c on_each_cpu(mvneta_percpu_disable, pp, true); pp 3296 drivers/net/ethernet/marvell/mvneta.c mvneta_cleanup_txqs(pp); pp 3297 drivers/net/ethernet/marvell/mvneta.c mvneta_cleanup_rxqs(pp); pp 3299 drivers/net/ethernet/marvell/mvneta.c if (pp->bm_priv) pp 3300 drivers/net/ethernet/marvell/mvneta.c mvneta_bm_update_mtu(pp, mtu); pp 3302 drivers/net/ethernet/marvell/mvneta.c pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu); pp 3304 drivers/net/ethernet/marvell/mvneta.c ret = mvneta_setup_rxqs(pp); pp 3310 drivers/net/ethernet/marvell/mvneta.c ret = mvneta_setup_txqs(pp); pp 3316 drivers/net/ethernet/marvell/mvneta.c on_each_cpu(mvneta_percpu_enable, pp, true); pp 3317 drivers/net/ethernet/marvell/mvneta.c mvneta_start_dev(pp); pp 3327 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(dev); pp 3329 drivers/net/ethernet/marvell/mvneta.c if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) { pp 3333 drivers/net/ethernet/marvell/mvneta.c pp->tx_csum_limit); pp 3340 drivers/net/ethernet/marvell/mvneta.c static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr) pp 3344 drivers/net/ethernet/marvell/mvneta.c mac_addr_l = mvreg_read(pp, MVNETA_MAC_ADDR_LOW); pp 3345 drivers/net/ethernet/marvell/mvneta.c mac_addr_h = mvreg_read(pp, MVNETA_MAC_ADDR_HIGH); pp 3357 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(dev); pp 3365 drivers/net/ethernet/marvell/mvneta.c mvneta_mac_addr_set(pp, dev->dev_addr, -1); pp 3368 drivers/net/ethernet/marvell/mvneta.c mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def); pp 3379 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(ndev); pp 3400 drivers/net/ethernet/marvell/mvneta.c if (pp->comphy || state->interface != PHY_INTERFACE_MODE_2500BASEX) { pp 3404 drivers/net/ethernet/marvell/mvneta.c if (pp->comphy || state->interface == PHY_INTERFACE_MODE_2500BASEX) { pp 3432 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(ndev); pp 3435 drivers/net/ethernet/marvell/mvneta.c gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); pp 3462 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(ndev); pp 3463 drivers/net/ethernet/marvell/mvneta.c u32 gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); pp 3465 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, pp 3467 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, pp 3475 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(ndev); pp 3476 drivers/net/ethernet/marvell/mvneta.c u32 new_ctrl0, gmac_ctrl0 = mvreg_read(pp, MVNETA_GMAC_CTRL_0); pp 3477 drivers/net/ethernet/marvell/mvneta.c u32 new_ctrl2, gmac_ctrl2 = mvreg_read(pp, MVNETA_GMAC_CTRL_2); pp 3478 drivers/net/ethernet/marvell/mvneta.c u32 new_ctrl4, gmac_ctrl4 = mvreg_read(pp, MVNETA_GMAC_CTRL_4); pp 3479 drivers/net/ethernet/marvell/mvneta.c u32 new_clk, gmac_clk = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); pp 3480 drivers/net/ethernet/marvell/mvneta.c u32 new_an, gmac_an = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); pp 3552 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, pp 3564 drivers/net/ethernet/marvell/mvneta.c if (pp->comphy && pp->phy_interface != state->interface && pp 3568 drivers/net/ethernet/marvell/mvneta.c pp->phy_interface = state->interface; pp 3570 drivers/net/ethernet/marvell/mvneta.c WARN_ON(phy_power_off(pp->comphy)); pp 3571 drivers/net/ethernet/marvell/mvneta.c WARN_ON(mvneta_comphy_init(pp)); pp 3575 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_GMAC_CTRL_0, new_ctrl0); pp 3577 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_GMAC_CTRL_2, new_ctrl2); pp 3579 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_GMAC_CTRL_4, new_ctrl4); pp 3581 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, new_clk); pp 3583 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, new_an); pp 3586 drivers/net/ethernet/marvell/mvneta.c while ((mvreg_read(pp, MVNETA_GMAC_CTRL_2) & pp 3592 drivers/net/ethernet/marvell/mvneta.c static void mvneta_set_eee(struct mvneta_port *pp, bool enable) pp 3596 drivers/net/ethernet/marvell/mvneta.c lpi_ctl1 = mvreg_read(pp, MVNETA_LPI_CTRL_1); pp 3601 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_LPI_CTRL_1, lpi_ctl1); pp 3608 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(ndev); pp 3611 drivers/net/ethernet/marvell/mvneta.c mvneta_port_down(pp); pp 3614 drivers/net/ethernet/marvell/mvneta.c val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); pp 3617 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); pp 3620 drivers/net/ethernet/marvell/mvneta.c pp->eee_active = false; pp 3621 drivers/net/ethernet/marvell/mvneta.c mvneta_set_eee(pp, false); pp 3629 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(ndev); pp 3633 drivers/net/ethernet/marvell/mvneta.c val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); pp 3636 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); pp 3639 drivers/net/ethernet/marvell/mvneta.c mvneta_port_up(pp); pp 3641 drivers/net/ethernet/marvell/mvneta.c if (phy && pp->eee_enabled) { pp 3642 drivers/net/ethernet/marvell/mvneta.c pp->eee_active = phy_init_eee(phy, 0) >= 0; pp 3643 drivers/net/ethernet/marvell/mvneta.c mvneta_set_eee(pp, pp->eee_active && pp->tx_lpi_enabled); pp 3656 drivers/net/ethernet/marvell/mvneta.c static int mvneta_mdio_probe(struct mvneta_port *pp) pp 3659 drivers/net/ethernet/marvell/mvneta.c int err = phylink_of_phy_connect(pp->phylink, pp->dn, 0); pp 3662 drivers/net/ethernet/marvell/mvneta.c netdev_err(pp->dev, "could not attach PHY: %d\n", err); pp 3664 drivers/net/ethernet/marvell/mvneta.c phylink_ethtool_get_wol(pp->phylink, &wol); pp 3665 drivers/net/ethernet/marvell/mvneta.c device_set_wakeup_capable(&pp->dev->dev, !!wol.supported); pp 3670 drivers/net/ethernet/marvell/mvneta.c static void mvneta_mdio_remove(struct mvneta_port *pp) pp 3672 drivers/net/ethernet/marvell/mvneta.c phylink_disconnect_phy(pp->phylink); pp 3679 drivers/net/ethernet/marvell/mvneta.c static void mvneta_percpu_elect(struct mvneta_port *pp) pp 3686 drivers/net/ethernet/marvell/mvneta.c if (cpu_online(pp->rxq_def)) pp 3687 drivers/net/ethernet/marvell/mvneta.c elected_cpu = pp->rxq_def; pp 3703 drivers/net/ethernet/marvell/mvneta.c rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def); pp 3713 drivers/net/ethernet/marvell/mvneta.c txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) & pp 3716 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); pp 3722 drivers/net/ethernet/marvell/mvneta.c pp, true); pp 3731 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, pp 3733 drivers/net/ethernet/marvell/mvneta.c struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); pp 3736 drivers/net/ethernet/marvell/mvneta.c spin_lock(&pp->lock); pp 3741 drivers/net/ethernet/marvell/mvneta.c if (pp->is_stopped) { pp 3742 drivers/net/ethernet/marvell/mvneta.c spin_unlock(&pp->lock); pp 3745 drivers/net/ethernet/marvell/mvneta.c netif_tx_stop_all_queues(pp->dev); pp 3754 drivers/net/ethernet/marvell/mvneta.c per_cpu_ptr(pp->ports, other_cpu); pp 3761 drivers/net/ethernet/marvell/mvneta.c on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); pp 3768 drivers/net/ethernet/marvell/mvneta.c mvneta_percpu_enable(pp); pp 3774 drivers/net/ethernet/marvell/mvneta.c mvneta_percpu_elect(pp); pp 3777 drivers/net/ethernet/marvell/mvneta.c on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); pp 3778 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_INTR_MISC_MASK, pp 3781 drivers/net/ethernet/marvell/mvneta.c netif_tx_start_all_queues(pp->dev); pp 3782 drivers/net/ethernet/marvell/mvneta.c spin_unlock(&pp->lock); pp 3788 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, pp 3790 drivers/net/ethernet/marvell/mvneta.c struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); pp 3796 drivers/net/ethernet/marvell/mvneta.c spin_lock(&pp->lock); pp 3798 drivers/net/ethernet/marvell/mvneta.c on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); pp 3799 drivers/net/ethernet/marvell/mvneta.c spin_unlock(&pp->lock); pp 3804 drivers/net/ethernet/marvell/mvneta.c mvneta_percpu_disable(pp); pp 3810 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = hlist_entry_safe(node, struct mvneta_port, pp 3814 drivers/net/ethernet/marvell/mvneta.c spin_lock(&pp->lock); pp 3815 drivers/net/ethernet/marvell/mvneta.c mvneta_percpu_elect(pp); pp 3816 drivers/net/ethernet/marvell/mvneta.c spin_unlock(&pp->lock); pp 3818 drivers/net/ethernet/marvell/mvneta.c on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); pp 3819 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_INTR_MISC_MASK, pp 3822 drivers/net/ethernet/marvell/mvneta.c netif_tx_start_all_queues(pp->dev); pp 3828 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(dev); pp 3831 drivers/net/ethernet/marvell/mvneta.c pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); pp 3833 drivers/net/ethernet/marvell/mvneta.c ret = mvneta_setup_rxqs(pp); pp 3837 drivers/net/ethernet/marvell/mvneta.c ret = mvneta_setup_txqs(pp); pp 3842 drivers/net/ethernet/marvell/mvneta.c if (pp->neta_armada3700) pp 3843 drivers/net/ethernet/marvell/mvneta.c ret = request_irq(pp->dev->irq, mvneta_isr, 0, pp 3844 drivers/net/ethernet/marvell/mvneta.c dev->name, pp); pp 3846 drivers/net/ethernet/marvell/mvneta.c ret = request_percpu_irq(pp->dev->irq, mvneta_percpu_isr, pp 3847 drivers/net/ethernet/marvell/mvneta.c dev->name, pp->ports); pp 3849 drivers/net/ethernet/marvell/mvneta.c netdev_err(pp->dev, "cannot request irq %d\n", pp->dev->irq); pp 3853 drivers/net/ethernet/marvell/mvneta.c if (!pp->neta_armada3700) { pp 3857 drivers/net/ethernet/marvell/mvneta.c on_each_cpu(mvneta_percpu_enable, pp, true); pp 3859 drivers/net/ethernet/marvell/mvneta.c pp->is_stopped = false; pp 3864 drivers/net/ethernet/marvell/mvneta.c &pp->node_online); pp 3869 drivers/net/ethernet/marvell/mvneta.c &pp->node_dead); pp 3874 drivers/net/ethernet/marvell/mvneta.c ret = mvneta_mdio_probe(pp); pp 3880 drivers/net/ethernet/marvell/mvneta.c mvneta_start_dev(pp); pp 3885 drivers/net/ethernet/marvell/mvneta.c if (!pp->neta_armada3700) pp 3887 drivers/net/ethernet/marvell/mvneta.c &pp->node_dead); pp 3889 drivers/net/ethernet/marvell/mvneta.c if (!pp->neta_armada3700) pp 3891 drivers/net/ethernet/marvell/mvneta.c &pp->node_online); pp 3893 drivers/net/ethernet/marvell/mvneta.c if (pp->neta_armada3700) { pp 3894 drivers/net/ethernet/marvell/mvneta.c free_irq(pp->dev->irq, pp); pp 3896 drivers/net/ethernet/marvell/mvneta.c on_each_cpu(mvneta_percpu_disable, pp, true); pp 3897 drivers/net/ethernet/marvell/mvneta.c free_percpu_irq(pp->dev->irq, pp->ports); pp 3900 drivers/net/ethernet/marvell/mvneta.c mvneta_cleanup_txqs(pp); pp 3902 drivers/net/ethernet/marvell/mvneta.c mvneta_cleanup_rxqs(pp); pp 3909 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(dev); pp 3911 drivers/net/ethernet/marvell/mvneta.c if (!pp->neta_armada3700) { pp 3917 drivers/net/ethernet/marvell/mvneta.c spin_lock(&pp->lock); pp 3918 drivers/net/ethernet/marvell/mvneta.c pp->is_stopped = true; pp 3919 drivers/net/ethernet/marvell/mvneta.c spin_unlock(&pp->lock); pp 3921 drivers/net/ethernet/marvell/mvneta.c mvneta_stop_dev(pp); pp 3922 drivers/net/ethernet/marvell/mvneta.c mvneta_mdio_remove(pp); pp 3925 drivers/net/ethernet/marvell/mvneta.c &pp->node_online); pp 3927 drivers/net/ethernet/marvell/mvneta.c &pp->node_dead); pp 3928 drivers/net/ethernet/marvell/mvneta.c on_each_cpu(mvneta_percpu_disable, pp, true); pp 3929 drivers/net/ethernet/marvell/mvneta.c free_percpu_irq(dev->irq, pp->ports); pp 3931 drivers/net/ethernet/marvell/mvneta.c mvneta_stop_dev(pp); pp 3932 drivers/net/ethernet/marvell/mvneta.c mvneta_mdio_remove(pp); pp 3933 drivers/net/ethernet/marvell/mvneta.c free_irq(dev->irq, pp); pp 3936 drivers/net/ethernet/marvell/mvneta.c mvneta_cleanup_rxqs(pp); pp 3937 drivers/net/ethernet/marvell/mvneta.c mvneta_cleanup_txqs(pp); pp 3944 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(dev); pp 3946 drivers/net/ethernet/marvell/mvneta.c return phylink_mii_ioctl(pp->phylink, ifr, cmd); pp 3956 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(ndev); pp 3958 drivers/net/ethernet/marvell/mvneta.c return phylink_ethtool_ksettings_set(pp->phylink, cmd); pp 3966 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(ndev); pp 3968 drivers/net/ethernet/marvell/mvneta.c return phylink_ethtool_ksettings_get(pp->phylink, cmd); pp 3973 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(dev); pp 3975 drivers/net/ethernet/marvell/mvneta.c return phylink_ethtool_nway_reset(pp->phylink); pp 3982 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(dev); pp 3986 drivers/net/ethernet/marvell/mvneta.c struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; pp 3989 drivers/net/ethernet/marvell/mvneta.c mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); pp 3990 drivers/net/ethernet/marvell/mvneta.c mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); pp 3994 drivers/net/ethernet/marvell/mvneta.c struct mvneta_tx_queue *txq = &pp->txqs[queue]; pp 3996 drivers/net/ethernet/marvell/mvneta.c mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); pp 4006 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(dev); pp 4008 drivers/net/ethernet/marvell/mvneta.c c->rx_coalesce_usecs = pp->rxqs[0].time_coal; pp 4009 drivers/net/ethernet/marvell/mvneta.c c->rx_max_coalesced_frames = pp->rxqs[0].pkts_coal; pp 4011 drivers/net/ethernet/marvell/mvneta.c c->tx_max_coalesced_frames = pp->txqs[0].done_pkts_coal; pp 4031 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(netdev); pp 4035 drivers/net/ethernet/marvell/mvneta.c ring->rx_pending = pp->rx_ring_size; pp 4036 drivers/net/ethernet/marvell/mvneta.c ring->tx_pending = pp->tx_ring_size; pp 4042 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(dev); pp 4046 drivers/net/ethernet/marvell/mvneta.c pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ? pp 4049 drivers/net/ethernet/marvell/mvneta.c pp->tx_ring_size = clamp_t(u16, ring->tx_pending, pp 4051 drivers/net/ethernet/marvell/mvneta.c if (pp->tx_ring_size != ring->tx_pending) pp 4053 drivers/net/ethernet/marvell/mvneta.c pp->tx_ring_size, ring->tx_pending); pp 4070 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(dev); pp 4072 drivers/net/ethernet/marvell/mvneta.c phylink_ethtool_get_pauseparam(pp->phylink, pause); pp 4078 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(dev); pp 4080 drivers/net/ethernet/marvell/mvneta.c return phylink_ethtool_set_pauseparam(pp->phylink, pause); pp 4095 drivers/net/ethernet/marvell/mvneta.c static void mvneta_ethtool_update_stats(struct mvneta_port *pp) pp 4098 drivers/net/ethernet/marvell/mvneta.c void __iomem *base = pp->base; pp 4121 drivers/net/ethernet/marvell/mvneta.c val = phylink_get_eee_err(pp->phylink); pp 4124 drivers/net/ethernet/marvell/mvneta.c val = pp->rxqs[0].skb_alloc_err; pp 4127 drivers/net/ethernet/marvell/mvneta.c val = pp->rxqs[0].refill_err; pp 4133 drivers/net/ethernet/marvell/mvneta.c pp->ethtool_stats[i] += val; pp 4140 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(dev); pp 4143 drivers/net/ethernet/marvell/mvneta.c mvneta_ethtool_update_stats(pp); pp 4146 drivers/net/ethernet/marvell/mvneta.c *data++ = pp->ethtool_stats[i]; pp 4176 drivers/net/ethernet/marvell/mvneta.c static int mvneta_config_rss(struct mvneta_port *pp) pp 4181 drivers/net/ethernet/marvell/mvneta.c netif_tx_stop_all_queues(pp->dev); pp 4183 drivers/net/ethernet/marvell/mvneta.c on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); pp 4185 drivers/net/ethernet/marvell/mvneta.c if (!pp->neta_armada3700) { pp 4189 drivers/net/ethernet/marvell/mvneta.c per_cpu_ptr(pp->ports, cpu); pp 4195 drivers/net/ethernet/marvell/mvneta.c napi_synchronize(&pp->napi); pp 4196 drivers/net/ethernet/marvell/mvneta.c napi_disable(&pp->napi); pp 4199 drivers/net/ethernet/marvell/mvneta.c pp->rxq_def = pp->indir[0]; pp 4202 drivers/net/ethernet/marvell/mvneta.c mvneta_set_rx_mode(pp->dev); pp 4205 drivers/net/ethernet/marvell/mvneta.c val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); pp 4206 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_PORT_CONFIG, val); pp 4209 drivers/net/ethernet/marvell/mvneta.c spin_lock(&pp->lock); pp 4210 drivers/net/ethernet/marvell/mvneta.c mvneta_percpu_elect(pp); pp 4211 drivers/net/ethernet/marvell/mvneta.c spin_unlock(&pp->lock); pp 4213 drivers/net/ethernet/marvell/mvneta.c if (!pp->neta_armada3700) { pp 4217 drivers/net/ethernet/marvell/mvneta.c per_cpu_ptr(pp->ports, cpu); pp 4222 drivers/net/ethernet/marvell/mvneta.c napi_enable(&pp->napi); pp 4225 drivers/net/ethernet/marvell/mvneta.c netif_tx_start_all_queues(pp->dev); pp 4233 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(dev); pp 4236 drivers/net/ethernet/marvell/mvneta.c if (pp->neta_armada3700) pp 4249 drivers/net/ethernet/marvell/mvneta.c memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE); pp 4251 drivers/net/ethernet/marvell/mvneta.c return mvneta_config_rss(pp); pp 4257 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(dev); pp 4260 drivers/net/ethernet/marvell/mvneta.c if (pp->neta_armada3700) pp 4269 drivers/net/ethernet/marvell/mvneta.c memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE); pp 4277 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(dev); pp 4279 drivers/net/ethernet/marvell/mvneta.c phylink_ethtool_get_wol(pp->phylink, wol); pp 4285 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(dev); pp 4288 drivers/net/ethernet/marvell/mvneta.c ret = phylink_ethtool_set_wol(pp->phylink, wol); pp 4298 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(dev); pp 4301 drivers/net/ethernet/marvell/mvneta.c lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); pp 4303 drivers/net/ethernet/marvell/mvneta.c eee->eee_enabled = pp->eee_enabled; pp 4304 drivers/net/ethernet/marvell/mvneta.c eee->eee_active = pp->eee_active; pp 4305 drivers/net/ethernet/marvell/mvneta.c eee->tx_lpi_enabled = pp->tx_lpi_enabled; pp 4308 drivers/net/ethernet/marvell/mvneta.c return phylink_ethtool_get_eee(pp->phylink, eee); pp 4314 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(dev); pp 4322 drivers/net/ethernet/marvell/mvneta.c lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); pp 4325 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_LPI_CTRL_0, lpi_ctl0); pp 4327 drivers/net/ethernet/marvell/mvneta.c pp->eee_enabled = eee->eee_enabled; pp 4328 drivers/net/ethernet/marvell/mvneta.c pp->tx_lpi_enabled = eee->tx_lpi_enabled; pp 4330 drivers/net/ethernet/marvell/mvneta.c mvneta_set_eee(pp, eee->tx_lpi_enabled && eee->eee_enabled); pp 4332 drivers/net/ethernet/marvell/mvneta.c return phylink_ethtool_set_eee(pp->phylink, eee); pp 4373 drivers/net/ethernet/marvell/mvneta.c static int mvneta_init(struct device *dev, struct mvneta_port *pp) pp 4378 drivers/net/ethernet/marvell/mvneta.c mvneta_port_disable(pp); pp 4381 drivers/net/ethernet/marvell/mvneta.c mvneta_defaults_set(pp); pp 4383 drivers/net/ethernet/marvell/mvneta.c pp->txqs = devm_kcalloc(dev, txq_number, sizeof(*pp->txqs), GFP_KERNEL); pp 4384 drivers/net/ethernet/marvell/mvneta.c if (!pp->txqs) pp 4389 drivers/net/ethernet/marvell/mvneta.c struct mvneta_tx_queue *txq = &pp->txqs[queue]; pp 4391 drivers/net/ethernet/marvell/mvneta.c txq->size = pp->tx_ring_size; pp 4395 drivers/net/ethernet/marvell/mvneta.c pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*pp->rxqs), GFP_KERNEL); pp 4396 drivers/net/ethernet/marvell/mvneta.c if (!pp->rxqs) pp 4401 drivers/net/ethernet/marvell/mvneta.c struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; pp 4403 drivers/net/ethernet/marvell/mvneta.c rxq->size = pp->rx_ring_size; pp 4407 drivers/net/ethernet/marvell/mvneta.c = devm_kmalloc_array(pp->dev->dev.parent, pp 4419 drivers/net/ethernet/marvell/mvneta.c static void mvneta_conf_mbus_windows(struct mvneta_port *pp, pp 4427 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_WIN_BASE(i), 0); pp 4428 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); pp 4431 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); pp 4441 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_WIN_BASE(i), pp 4446 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_WIN_SIZE(i), pp 4457 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_WIN_SIZE(0), 0xffff0000); pp 4462 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); pp 4463 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); pp 4467 drivers/net/ethernet/marvell/mvneta.c static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode) pp 4470 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0); pp 4473 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_QSGMII_SERDES_PROTO); pp 4476 drivers/net/ethernet/marvell/mvneta.c mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO); pp 4488 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp; pp 4524 drivers/net/ethernet/marvell/mvneta.c pp = netdev_priv(dev); pp 4525 drivers/net/ethernet/marvell/mvneta.c spin_lock_init(&pp->lock); pp 4527 drivers/net/ethernet/marvell/mvneta.c pp->phylink_config.dev = &dev->dev; pp 4528 drivers/net/ethernet/marvell/mvneta.c pp->phylink_config.type = PHYLINK_NETDEV; pp 4530 drivers/net/ethernet/marvell/mvneta.c phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode, pp 4543 drivers/net/ethernet/marvell/mvneta.c pp->phylink = phylink; pp 4544 drivers/net/ethernet/marvell/mvneta.c pp->comphy = comphy; pp 4545 drivers/net/ethernet/marvell/mvneta.c pp->phy_interface = phy_mode; pp 4546 drivers/net/ethernet/marvell/mvneta.c pp->dn = dn; pp 4548 drivers/net/ethernet/marvell/mvneta.c pp->rxq_def = rxq_def; pp 4549 drivers/net/ethernet/marvell/mvneta.c pp->indir[0] = rxq_def; pp 4553 drivers/net/ethernet/marvell/mvneta.c pp->neta_armada3700 = true; pp 4555 drivers/net/ethernet/marvell/mvneta.c pp->clk = devm_clk_get(&pdev->dev, "core"); pp 4556 drivers/net/ethernet/marvell/mvneta.c if (IS_ERR(pp->clk)) pp 4557 drivers/net/ethernet/marvell/mvneta.c pp->clk = devm_clk_get(&pdev->dev, NULL); pp 4558 drivers/net/ethernet/marvell/mvneta.c if (IS_ERR(pp->clk)) { pp 4559 drivers/net/ethernet/marvell/mvneta.c err = PTR_ERR(pp->clk); pp 4563 drivers/net/ethernet/marvell/mvneta.c clk_prepare_enable(pp->clk); pp 4565 drivers/net/ethernet/marvell/mvneta.c pp->clk_bus = devm_clk_get(&pdev->dev, "bus"); pp 4566 drivers/net/ethernet/marvell/mvneta.c if (!IS_ERR(pp->clk_bus)) pp 4567 drivers/net/ethernet/marvell/mvneta.c clk_prepare_enable(pp->clk_bus); pp 4569 drivers/net/ethernet/marvell/mvneta.c pp->base = devm_platform_ioremap_resource(pdev, 0); pp 4570 drivers/net/ethernet/marvell/mvneta.c if (IS_ERR(pp->base)) { pp 4571 drivers/net/ethernet/marvell/mvneta.c err = PTR_ERR(pp->base); pp 4576 drivers/net/ethernet/marvell/mvneta.c pp->ports = alloc_percpu(struct mvneta_pcpu_port); pp 4577 drivers/net/ethernet/marvell/mvneta.c if (!pp->ports) { pp 4583 drivers/net/ethernet/marvell/mvneta.c pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats); pp 4584 drivers/net/ethernet/marvell/mvneta.c if (!pp->stats) { pp 4594 drivers/net/ethernet/marvell/mvneta.c mvneta_get_mac_addr(pp, hw_mac_addr); pp 4618 drivers/net/ethernet/marvell/mvneta.c pp->tx_csum_limit = tx_csum_limit; pp 4620 drivers/net/ethernet/marvell/mvneta.c pp->dram_target_info = mv_mbus_dram_info(); pp 4625 drivers/net/ethernet/marvell/mvneta.c if (pp->dram_target_info || pp->neta_armada3700) pp 4626 drivers/net/ethernet/marvell/mvneta.c mvneta_conf_mbus_windows(pp, pp->dram_target_info); pp 4628 drivers/net/ethernet/marvell/mvneta.c pp->tx_ring_size = MVNETA_MAX_TXD; pp 4629 drivers/net/ethernet/marvell/mvneta.c pp->rx_ring_size = MVNETA_MAX_RXD; pp 4631 drivers/net/ethernet/marvell/mvneta.c pp->dev = dev; pp 4634 drivers/net/ethernet/marvell/mvneta.c pp->id = global_port_id++; pp 4635 drivers/net/ethernet/marvell/mvneta.c pp->rx_offset_correction = 0; /* not relevant for SW BM */ pp 4640 drivers/net/ethernet/marvell/mvneta.c pp->bm_priv = mvneta_bm_get(bm_node); pp 4641 drivers/net/ethernet/marvell/mvneta.c if (pp->bm_priv) { pp 4642 drivers/net/ethernet/marvell/mvneta.c err = mvneta_bm_port_init(pdev, pp); pp 4646 drivers/net/ethernet/marvell/mvneta.c mvneta_bm_put(pp->bm_priv); pp 4647 drivers/net/ethernet/marvell/mvneta.c pp->bm_priv = NULL; pp 4654 drivers/net/ethernet/marvell/mvneta.c pp->rx_offset_correction = max(0, pp 4660 drivers/net/ethernet/marvell/mvneta.c err = mvneta_init(&pdev->dev, pp); pp 4664 drivers/net/ethernet/marvell/mvneta.c err = mvneta_port_power_up(pp, phy_mode); pp 4673 drivers/net/ethernet/marvell/mvneta.c if (pp->neta_armada3700) { pp 4674 drivers/net/ethernet/marvell/mvneta.c netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT); pp 4678 drivers/net/ethernet/marvell/mvneta.c per_cpu_ptr(pp->ports, cpu); pp 4682 drivers/net/ethernet/marvell/mvneta.c port->pp = pp; pp 4707 drivers/net/ethernet/marvell/mvneta.c platform_set_drvdata(pdev, pp->dev); pp 4712 drivers/net/ethernet/marvell/mvneta.c if (pp->bm_priv) { pp 4713 drivers/net/ethernet/marvell/mvneta.c mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); pp 4714 drivers/net/ethernet/marvell/mvneta.c mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, pp 4715 drivers/net/ethernet/marvell/mvneta.c 1 << pp->id); pp 4716 drivers/net/ethernet/marvell/mvneta.c mvneta_bm_put(pp->bm_priv); pp 4718 drivers/net/ethernet/marvell/mvneta.c free_percpu(pp->stats); pp 4720 drivers/net/ethernet/marvell/mvneta.c free_percpu(pp->ports); pp 4722 drivers/net/ethernet/marvell/mvneta.c clk_disable_unprepare(pp->clk_bus); pp 4723 drivers/net/ethernet/marvell/mvneta.c clk_disable_unprepare(pp->clk); pp 4725 drivers/net/ethernet/marvell/mvneta.c if (pp->phylink) pp 4726 drivers/net/ethernet/marvell/mvneta.c phylink_destroy(pp->phylink); pp 4736 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(dev); pp 4739 drivers/net/ethernet/marvell/mvneta.c clk_disable_unprepare(pp->clk_bus); pp 4740 drivers/net/ethernet/marvell/mvneta.c clk_disable_unprepare(pp->clk); pp 4741 drivers/net/ethernet/marvell/mvneta.c free_percpu(pp->ports); pp 4742 drivers/net/ethernet/marvell/mvneta.c free_percpu(pp->stats); pp 4744 drivers/net/ethernet/marvell/mvneta.c phylink_destroy(pp->phylink); pp 4746 drivers/net/ethernet/marvell/mvneta.c if (pp->bm_priv) { pp 4747 drivers/net/ethernet/marvell/mvneta.c mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); pp 4748 drivers/net/ethernet/marvell/mvneta.c mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, pp 4749 drivers/net/ethernet/marvell/mvneta.c 1 << pp->id); pp 4750 drivers/net/ethernet/marvell/mvneta.c mvneta_bm_put(pp->bm_priv); pp 4761 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(dev); pp 4766 drivers/net/ethernet/marvell/mvneta.c if (!pp->neta_armada3700) { pp 4767 drivers/net/ethernet/marvell/mvneta.c spin_lock(&pp->lock); pp 4768 drivers/net/ethernet/marvell/mvneta.c pp->is_stopped = true; pp 4769 drivers/net/ethernet/marvell/mvneta.c spin_unlock(&pp->lock); pp 4772 drivers/net/ethernet/marvell/mvneta.c &pp->node_online); pp 4774 drivers/net/ethernet/marvell/mvneta.c &pp->node_dead); pp 4778 drivers/net/ethernet/marvell/mvneta.c mvneta_stop_dev(pp); pp 4782 drivers/net/ethernet/marvell/mvneta.c struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; pp 4784 drivers/net/ethernet/marvell/mvneta.c mvneta_rxq_drop_pkts(pp, rxq); pp 4788 drivers/net/ethernet/marvell/mvneta.c struct mvneta_tx_queue *txq = &pp->txqs[queue]; pp 4790 drivers/net/ethernet/marvell/mvneta.c mvneta_txq_hw_deinit(pp, txq); pp 4795 drivers/net/ethernet/marvell/mvneta.c clk_disable_unprepare(pp->clk_bus); pp 4796 drivers/net/ethernet/marvell/mvneta.c clk_disable_unprepare(pp->clk); pp 4805 drivers/net/ethernet/marvell/mvneta.c struct mvneta_port *pp = netdev_priv(dev); pp 4808 drivers/net/ethernet/marvell/mvneta.c clk_prepare_enable(pp->clk); pp 4809 drivers/net/ethernet/marvell/mvneta.c if (!IS_ERR(pp->clk_bus)) pp 4810 drivers/net/ethernet/marvell/mvneta.c clk_prepare_enable(pp->clk_bus); pp 4811 drivers/net/ethernet/marvell/mvneta.c if (pp->dram_target_info || pp->neta_armada3700) pp 4812 drivers/net/ethernet/marvell/mvneta.c mvneta_conf_mbus_windows(pp, pp->dram_target_info); pp 4813 drivers/net/ethernet/marvell/mvneta.c if (pp->bm_priv) { pp 4814 drivers/net/ethernet/marvell/mvneta.c err = mvneta_bm_port_init(pdev, pp); pp 4817 drivers/net/ethernet/marvell/mvneta.c pp->bm_priv = NULL; pp 4820 drivers/net/ethernet/marvell/mvneta.c mvneta_defaults_set(pp); pp 4821 drivers/net/ethernet/marvell/mvneta.c err = mvneta_port_power_up(pp, pp->phy_interface); pp 4833 drivers/net/ethernet/marvell/mvneta.c struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; pp 4836 drivers/net/ethernet/marvell/mvneta.c mvneta_rxq_hw_init(pp, rxq); pp 4840 drivers/net/ethernet/marvell/mvneta.c struct mvneta_tx_queue *txq = &pp->txqs[queue]; pp 4843 drivers/net/ethernet/marvell/mvneta.c mvneta_txq_hw_init(pp, txq); pp 4846 drivers/net/ethernet/marvell/mvneta.c if (!pp->neta_armada3700) { pp 4847 drivers/net/ethernet/marvell/mvneta.c spin_lock(&pp->lock); pp 4848 drivers/net/ethernet/marvell/mvneta.c pp->is_stopped = false; pp 4849 drivers/net/ethernet/marvell/mvneta.c spin_unlock(&pp->lock); pp 4851 drivers/net/ethernet/marvell/mvneta.c &pp->node_online); pp 4853 drivers/net/ethernet/marvell/mvneta.c &pp->node_dead); pp 4857 drivers/net/ethernet/marvell/mvneta.c mvneta_start_dev(pp); pp 1361 drivers/net/ethernet/mellanox/mlxsw/reg.h MLXSW_ITEM32(reg, slcr, pp, 0x00, 24, 1); pp 7671 drivers/net/ethernet/mellanox/mlxsw/reg.h MLXSW_ITEM32(reg, recr2, pp, 0x00, 24, 1); pp 178 drivers/net/ethernet/mscc/ocelot_police.c struct qos_policer_conf pp = { 0 }; pp 184 drivers/net/ethernet/mscc/ocelot_police.c pp.mode = MSCC_QOS_RATE_MODE_DATA; pp 185 drivers/net/ethernet/mscc/ocelot_police.c pp.pir = pol->rate; pp 186 drivers/net/ethernet/mscc/ocelot_police.c pp.pbs = pol->burst; pp 190 drivers/net/ethernet/mscc/ocelot_police.c __func__, port->chip_port, pp.pir, pp.pbs); pp 192 drivers/net/ethernet/mscc/ocelot_police.c err = qos_policer_conf_set(port, POL_IX_PORT + port->chip_port, &pp); pp 209 drivers/net/ethernet/mscc/ocelot_police.c struct qos_policer_conf pp = { 0 }; pp 214 drivers/net/ethernet/mscc/ocelot_police.c pp.mode = MSCC_QOS_RATE_MODE_DISABLED; pp 216 drivers/net/ethernet/mscc/ocelot_police.c err = qos_policer_conf_set(port, POL_IX_PORT + port->chip_port, &pp); pp 34 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h static inline int pushpull_width(int pp) pp 36 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h pp &= 0xf; pp 38 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h if (pp == 0) pp 40 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h return 2 << pp; pp 758 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c int pp; pp 760 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c pp = nfp_target_pushpull(NFP_CPP_ID(target, action, token), address); pp 761 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c if (pp < 0) pp 762 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c return pp; pp 764 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c priv->width.read = PUSH_WIDTH(pp); pp 765 drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c priv->width.write = PULL_WIDTH(pp); pp 33 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c static int target_rw(u32 cpp_id, int pp, int start, int len) pp 36 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c AT(0, 0, 0, pp); pp 37 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c AT(1, 0, pp, 0); pp 38 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c AT(NFP_CPP_ACTION_RW, 0, pp, pp); pp 197 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c int pp; pp 200 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c pp = nfp6000_mu_ctm(cpp_id); pp 202 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c pp = nfp6000_mu_emu(cpp_id); pp 204 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c pp = nfp6000_mu_ctm(cpp_id); pp 206 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c pp = nfp6000_mu_emu(cpp_id); pp 208 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c pp = nfp6000_mu_imu(cpp_id); pp 210 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c pp = nfp6000_mu_ctm(cpp_id); pp 212 drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c return pp; pp 3282 drivers/net/ethernet/sun/niu.c struct page *p, **pp; pp 3285 drivers/net/ethernet/sun/niu.c pp = &rp->rxhash[h]; pp 3286 drivers/net/ethernet/sun/niu.c for (; (p = *pp) != NULL; pp = (struct page **) &p->mapping) { pp 3288 drivers/net/ethernet/sun/niu.c *link = pp; pp 1606 drivers/net/ethernet/sun/sunvnet_common.c struct vnet_mcast_entry *m, **pp; pp 1640 drivers/net/ethernet/sun/sunvnet_common.c pp = &vp->mcast_list; pp 1641 drivers/net/ethernet/sun/sunvnet_common.c while ((m = *pp) != NULL) { pp 1644 drivers/net/ethernet/sun/sunvnet_common.c pp = &m->next; pp 1657 drivers/net/ethernet/sun/sunvnet_common.c *pp = m->next; pp 97 drivers/net/ethernet/tehuti/tehuti.h #define READ_REG(pp, reg) readl(pp->pBdxRegs + reg) pp 98 drivers/net/ethernet/tehuti/tehuti.h #define WRITE_REG(pp, reg, val) writel(val, pp->pBdxRegs + reg) pp 477 drivers/net/geneve.c struct sk_buff *pp = NULL; pp 526 drivers/net/geneve.c pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); pp 532 drivers/net/geneve.c skb_gro_flush_final(skb, pp, flush); pp 534 drivers/net/geneve.c return pp; pp 422 drivers/net/hamradio/baycom_epp.c struct parport *pp = bc->pdev->port; pp 461 drivers/net/hamradio/baycom_epp.c if (j != pp->ops->epp_write_data(pp, tmp, j, 0)) pp 479 drivers/net/hamradio/baycom_epp.c if (i != pp->ops->epp_write_data(pp, bc->hdlctx.bufptr, i, 0)) pp 497 drivers/net/hamradio/baycom_epp.c if (j != pp->ops->epp_write_data(pp, tmp, j, 0)) pp 514 drivers/net/hamradio/baycom_epp.c if (j != pp->ops->epp_write_data(pp, tmp, j, 0)) pp 554 drivers/net/hamradio/baycom_epp.c struct parport *pp = bc->pdev->port; pp 568 drivers/net/hamradio/baycom_epp.c if (cnt2 != pp->ops->epp_read_data(pp, tmp, cnt2, 0)) { pp 641 drivers/net/hamradio/baycom_epp.c struct parport *pp; pp 652 drivers/net/hamradio/baycom_epp.c pp = bc->pdev->port; pp 654 drivers/net/hamradio/baycom_epp.c if (pp->ops->epp_read_addr(pp, &stat, 1, 0) != 1) pp 662 drivers/net/hamradio/baycom_epp.c if (pp->ops->epp_write_addr(pp, tmp, 1, 0) != 1) pp 664 drivers/net/hamradio/baycom_epp.c if (pp->ops->epp_read_addr(pp, tmp, 2, 0) != 2) pp 670 drivers/net/hamradio/baycom_epp.c if (pp->ops->epp_write_addr(pp, tmp, 1, 0) != 1) pp 672 drivers/net/hamradio/baycom_epp.c if (pp->ops->epp_read_addr(pp, tmp, 2, 0) != 2) pp 678 drivers/net/hamradio/baycom_epp.c if (pp->ops->epp_write_addr(pp, tmp, 1, 0) != 1) pp 685 drivers/net/hamradio/baycom_epp.c if (pp->ops->epp_read_addr(pp, &stat, 1, 0) != 1) pp 727 drivers/net/hamradio/baycom_epp.c if (pp->ops->epp_read_addr(pp, &stat, 1, 0) != 1) pp 739 drivers/net/hamradio/baycom_epp.c if (pp->ops->epp_read_addr(pp, &stat, 1, 0) != 1) pp 824 drivers/net/hamradio/baycom_epp.c struct parport *pp = parport_find_base(dev->base_addr); pp 831 drivers/net/hamradio/baycom_epp.c if (!pp) { pp 836 drivers/net/hamradio/baycom_epp.c if (pp->irq < 0) { pp 837 drivers/net/hamradio/baycom_epp.c printk(KERN_ERR "%s: parport at 0x%lx has no irq\n", bc_drvname, pp->base); pp 838 drivers/net/hamradio/baycom_epp.c parport_put_port(pp); pp 842 drivers/net/hamradio/baycom_epp.c if ((~pp->modes) & (PARPORT_MODE_TRISTATE | PARPORT_MODE_PCSPP | PARPORT_MODE_SAFEININT)) { pp 844 drivers/net/hamradio/baycom_epp.c bc_drvname, pp->base); pp 845 drivers/net/hamradio/baycom_epp.c parport_put_port(pp); pp 859 drivers/net/hamradio/baycom_epp.c parport_put_port(pp); pp 863 drivers/net/hamradio/baycom_epp.c bc->pdev = parport_register_dev_model(pp, dev->name, &par_cb, i); pp 864 drivers/net/hamradio/baycom_epp.c parport_put_port(pp); pp 866 drivers/net/hamradio/baycom_epp.c printk(KERN_ERR "%s: cannot register parport at 0x%lx\n", bc_drvname, pp->base); pp 870 drivers/net/hamradio/baycom_epp.c printk(KERN_ERR "%s: parport at 0x%lx busy\n", bc_drvname, pp->base); pp 882 drivers/net/hamradio/baycom_epp.c parport_write_control(pp, LPTCTRL_PROGRAM); /* prepare EPP mode; we aren't using interrupts */ pp 886 drivers/net/hamradio/baycom_epp.c if (pp->ops->epp_write_addr(pp, tmp, 2, 0) != 2) pp 892 drivers/net/hamradio/baycom_epp.c if (pp->ops->epp_read_addr(pp, &stat, 1, 0) != 1) pp 898 drivers/net/hamradio/baycom_epp.c if (pp->ops->epp_read_data(pp, tmp, 128, 0) != 128) pp 900 drivers/net/hamradio/baycom_epp.c if (pp->ops->epp_read_data(pp, tmp, 128, 0) != 128) pp 905 drivers/net/hamradio/baycom_epp.c if (pp->ops->epp_read_addr(pp, &stat, 1, 0) != 1) pp 909 drivers/net/hamradio/baycom_epp.c if (pp->ops->epp_read_data(pp, tmp, 1, 0) != 1) pp 924 drivers/net/hamradio/baycom_epp.c if (pp->ops->epp_write_addr(pp, tmp, 1, 0) != 1) pp 942 drivers/net/hamradio/baycom_epp.c parport_write_control(pp, 0); /* reset the adapter */ pp 953 drivers/net/hamradio/baycom_epp.c struct parport *pp = bc->pdev->port; pp 960 drivers/net/hamradio/baycom_epp.c pp->ops->epp_write_addr(pp, tmp, 1, 0); pp 961 drivers/net/hamradio/baycom_epp.c parport_write_control(pp, 0); /* reset the adapter */ pp 183 drivers/net/hamradio/baycom_par.c struct parport *pp = bc->pdev->port; pp 196 drivers/net/hamradio/baycom_par.c pp->ops->write_data(pp, val); pp 197 drivers/net/hamradio/baycom_par.c pp->ops->write_data(pp, val | PAR96_BURST); pp 207 drivers/net/hamradio/baycom_par.c struct parport *pp = bc->pdev->port; pp 214 drivers/net/hamradio/baycom_par.c if (pp->ops->read_status(pp) & PAR96_RXBIT) pp 219 drivers/net/hamradio/baycom_par.c pp->ops->write_data(pp, PAR97_POWER | PAR96_PTT); pp 225 drivers/net/hamradio/baycom_par.c pp->ops->write_data(pp, PAR97_POWER | PAR96_PTT | PAR96_BURST); pp 250 drivers/net/hamradio/baycom_par.c hdlcdrv_setdcd(&bc->hdrv, !!(pp->ops->read_status(pp) & PAR96_DCD)); pp 299 drivers/net/hamradio/baycom_par.c struct parport *pp; pp 304 drivers/net/hamradio/baycom_par.c pp = parport_find_base(dev->base_addr); pp 305 drivers/net/hamradio/baycom_par.c if (!pp) { pp 309 drivers/net/hamradio/baycom_par.c if (pp->irq < 0) { pp 310 drivers/net/hamradio/baycom_par.c printk(KERN_ERR "baycom_par: parport at 0x%lx has no irq\n", pp->base); pp 311 drivers/net/hamradio/baycom_par.c parport_put_port(pp); pp 314 drivers/net/hamradio/baycom_par.c if ((~pp->modes) & (PARPORT_MODE_PCSPP | PARPORT_MODE_SAFEININT)) { pp 315 drivers/net/hamradio/baycom_par.c printk(KERN_ERR "baycom_par: parport at 0x%lx cannot be used\n", pp->base); pp 316 drivers/net/hamradio/baycom_par.c parport_put_port(pp); pp 332 drivers/net/hamradio/baycom_par.c parport_put_port(pp); pp 335 drivers/net/hamradio/baycom_par.c bc->pdev = parport_register_dev_model(pp, dev->name, &par_cb, i); pp 336 drivers/net/hamradio/baycom_par.c parport_put_port(pp); pp 342 drivers/net/hamradio/baycom_par.c printk(KERN_ERR "baycom_par: parport at 0x%lx busy\n", pp->base); pp 346 drivers/net/hamradio/baycom_par.c pp = bc->pdev->port; pp 347 drivers/net/hamradio/baycom_par.c dev->irq = pp->irq; pp 348 drivers/net/hamradio/baycom_par.c pp->ops->data_forward(pp); pp 350 drivers/net/hamradio/baycom_par.c pp->ops->write_data(pp, PAR96_PTT | PAR97_POWER); /* switch off PTT */ pp 351 drivers/net/hamradio/baycom_par.c pp->ops->enable_irq(pp); pp 362 drivers/net/hamradio/baycom_par.c struct parport *pp; pp 366 drivers/net/hamradio/baycom_par.c pp = bc->pdev->port; pp 368 drivers/net/hamradio/baycom_par.c pp->ops->disable_irq(pp); pp 370 drivers/net/hamradio/baycom_par.c pp->ops->write_data(pp, PAR96_PTT | PAR97_POWER); pp 2156 drivers/net/phy/phy_device.c struct ethtool_pauseparam *pp) pp 2159 drivers/net/phy/phy_device.c phydev->supported) && pp->rx_pause) pp 2164 drivers/net/phy/phy_device.c pp->rx_pause != pp->tx_pause) pp 1219 drivers/net/ppp/ppp_generic.c unsigned char *pp; pp 1243 drivers/net/ppp/ppp_generic.c pp = skb_push(skb, 2); pp 1245 drivers/net/ppp/ppp_generic.c put_unaligned_be16(proto, pp); pp 37 drivers/net/ppp/pppox.c int register_pppox_proto(int proto_num, const struct pppox_proto *pp) pp 43 drivers/net/ppp/pppox.c pppox_protos[proto_num] = pp; pp 731 drivers/net/vxlan.c struct sk_buff *pp = NULL; pp 779 drivers/net/vxlan.c pp = call_gro_receive(eth_gro_receive, head, skb); pp 783 drivers/net/vxlan.c skb_gro_flush_final_remcsum(skb, pp, flush, &grc); pp 785 drivers/net/vxlan.c return pp; pp 44 drivers/net/wireless/intel/iwlegacy/3945.c #define IL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \ pp 51 drivers/net/wireless/intel/iwlegacy/3945.c RATE_##pp##M_IDX, \ pp 61 drivers/net/wireless/intel/iwlegacy/4965-rs.c #define IL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \ pp 70 drivers/net/wireless/intel/iwlegacy/4965-rs.c RATE_##pp##M_IDX, \ pp 61 drivers/net/wireless/intel/iwlwifi/dvm/rs.c #define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \ pp 71 drivers/net/wireless/intel/iwlwifi/dvm/rs.c IWL_RATE_##pp##M_INDEX, \ pp 50 drivers/nfc/st21nfca/dep.c #define ST21NFCA_PP2LRI(pp) ((pp & 0x30) >> 4) pp 116 drivers/ntb/test/ntb_pingpong.c static int pp_find_next_peer(struct pp_ctx *pp) pp 121 drivers/ntb/test/ntb_pingpong.c link = ntb_link_is_up(pp->ntb, NULL, NULL); pp 124 drivers/ntb/test/ntb_pingpong.c if (link & pp->nmask) { pp 125 drivers/ntb/test/ntb_pingpong.c pidx = __ffs64(link & pp->nmask); pp 127 drivers/ntb/test/ntb_pingpong.c } else if (link & pp->pmask) { pp 128 drivers/ntb/test/ntb_pingpong.c pidx = __ffs64(link & pp->pmask); pp 134 drivers/ntb/test/ntb_pingpong.c spin_lock(&pp->lock); pp 135 drivers/ntb/test/ntb_pingpong.c pp->out_pidx = pidx; pp 136 drivers/ntb/test/ntb_pingpong.c pp->out_db = out_db; pp 137 drivers/ntb/test/ntb_pingpong.c spin_unlock(&pp->lock); pp 142 drivers/ntb/test/ntb_pingpong.c static void pp_setup(struct pp_ctx *pp) pp 146 drivers/ntb/test/ntb_pingpong.c ntb_db_set_mask(pp->ntb, pp->in_db); pp 148 drivers/ntb/test/ntb_pingpong.c hrtimer_cancel(&pp->timer); pp 150 drivers/ntb/test/ntb_pingpong.c ret = pp_find_next_peer(pp); pp 152 drivers/ntb/test/ntb_pingpong.c dev_dbg(&pp->ntb->dev, "Got no peers, so cancel\n"); pp 156 drivers/ntb/test/ntb_pingpong.c dev_dbg(&pp->ntb->dev, "Ping-pong started with port %d, db %#llx\n", pp 157 drivers/ntb/test/ntb_pingpong.c ntb_peer_port_number(pp->ntb, pp->out_pidx), pp->out_db); pp 159 drivers/ntb/test/ntb_pingpong.c hrtimer_start(&pp->timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL); pp 162 drivers/ntb/test/ntb_pingpong.c static void pp_clear(struct pp_ctx *pp) pp 164 drivers/ntb/test/ntb_pingpong.c hrtimer_cancel(&pp->timer); pp 166 drivers/ntb/test/ntb_pingpong.c ntb_db_set_mask(pp->ntb, pp->in_db); pp 168 drivers/ntb/test/ntb_pingpong.c dev_dbg(&pp->ntb->dev, "Ping-pong cancelled\n"); pp 171 drivers/ntb/test/ntb_pingpong.c static void pp_ping(struct pp_ctx *pp) pp 175 drivers/ntb/test/ntb_pingpong.c count = atomic_read(&pp->count); pp 177 drivers/ntb/test/ntb_pingpong.c spin_lock(&pp->lock); pp 178 drivers/ntb/test/ntb_pingpong.c ntb_peer_spad_write(pp->ntb, pp->out_pidx, 0, count); pp 179 drivers/ntb/test/ntb_pingpong.c ntb_peer_msg_write(pp->ntb, pp->out_pidx, 0, count); pp 181 drivers/ntb/test/ntb_pingpong.c dev_dbg(&pp->ntb->dev, "Ping port %d spad %#x, msg %#x\n", pp 182 drivers/ntb/test/ntb_pingpong.c ntb_peer_port_number(pp->ntb, pp->out_pidx), count, count); pp 184 drivers/ntb/test/ntb_pingpong.c ntb_peer_db_set(pp->ntb, pp->out_db); pp 185 drivers/ntb/test/ntb_pingpong.c ntb_db_clear_mask(pp->ntb, pp->in_db); pp 186 drivers/ntb/test/ntb_pingpong.c spin_unlock(&pp->lock); pp 189 drivers/ntb/test/ntb_pingpong.c static void pp_pong(struct pp_ctx *pp) pp 195 drivers/ntb/test/ntb_pingpong.c spad_data = ntb_spad_read(pp->ntb, 0); pp 196 drivers/ntb/test/ntb_pingpong.c msg_data = ntb_msg_read(pp->ntb, &pidx, 0); pp 197 drivers/ntb/test/ntb_pingpong.c ntb_msg_clear_sts(pp->ntb, -1); pp 204 drivers/ntb/test/ntb_pingpong.c dev_dbg(&pp->ntb->dev, "Pong spad %#x, msg %#x (port %d)\n", pp 205 drivers/ntb/test/ntb_pingpong.c spad_data, msg_data, ntb_peer_port_number(pp->ntb, pidx)); pp 207 drivers/ntb/test/ntb_pingpong.c atomic_inc(&pp->count); pp 209 drivers/ntb/test/ntb_pingpong.c ntb_db_set_mask(pp->ntb, pp->in_db); pp 210 drivers/ntb/test/ntb_pingpong.c ntb_db_clear(pp->ntb, pp->in_db); pp 212 drivers/ntb/test/ntb_pingpong.c hrtimer_start(&pp->timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL); pp 217 drivers/ntb/test/ntb_pingpong.c struct pp_ctx *pp = to_pp_timer(t); pp 219 drivers/ntb/test/ntb_pingpong.c pp_ping(pp); pp 226 drivers/ntb/test/ntb_pingpong.c struct pp_ctx *pp = ctx; pp 228 drivers/ntb/test/ntb_pingpong.c pp_setup(pp); pp 233 drivers/ntb/test/ntb_pingpong.c struct pp_ctx *pp = ctx; pp 235 drivers/ntb/test/ntb_pingpong.c pp_pong(pp); pp 279 drivers/ntb/test/ntb_pingpong.c struct pp_ctx *pp; pp 281 drivers/ntb/test/ntb_pingpong.c pp = devm_kzalloc(&ntb->dev, sizeof(*pp), GFP_KERNEL); pp 282 drivers/ntb/test/ntb_pingpong.c if (!pp) pp 285 drivers/ntb/test/ntb_pingpong.c pp->ntb = ntb; pp 286 drivers/ntb/test/ntb_pingpong.c atomic_set(&pp->count, 0); pp 287 drivers/ntb/test/ntb_pingpong.c spin_lock_init(&pp->lock); pp 288 drivers/ntb/test/ntb_pingpong.c hrtimer_init(&pp->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); pp 289 drivers/ntb/test/ntb_pingpong.c pp->timer.function = pp_timer_func; pp 291 drivers/ntb/test/ntb_pingpong.c return pp; pp 294 drivers/ntb/test/ntb_pingpong.c static void pp_init_flds(struct pp_ctx *pp) pp 299 drivers/ntb/test/ntb_pingpong.c lport = ntb_port_number(pp->ntb); pp 300 drivers/ntb/test/ntb_pingpong.c pcnt = ntb_peer_port_count(pp->ntb); pp 302 drivers/ntb/test/ntb_pingpong.c if (lport < ntb_peer_port_number(pp->ntb, pidx)) pp 306 drivers/ntb/test/ntb_pingpong.c pp->in_db = BIT_ULL(pidx); pp 307 drivers/ntb/test/ntb_pingpong.c pp->pmask = GENMASK_ULL(pidx, 0) >> 1; pp 308 drivers/ntb/test/ntb_pingpong.c pp->nmask = GENMASK_ULL(pcnt - 1, pidx); pp 310 drivers/ntb/test/ntb_pingpong.c dev_dbg(&pp->ntb->dev, "Inbound db %#llx, prev %#llx, next %#llx\n", pp 311 drivers/ntb/test/ntb_pingpong.c pp->in_db, pp->pmask, pp->nmask); pp 314 drivers/ntb/test/ntb_pingpong.c static int pp_mask_events(struct pp_ctx *pp) pp 319 drivers/ntb/test/ntb_pingpong.c db_mask = ntb_db_valid_mask(pp->ntb); pp 320 drivers/ntb/test/ntb_pingpong.c ret = ntb_db_set_mask(pp->ntb, db_mask); pp 325 drivers/ntb/test/ntb_pingpong.c if (ntb_msg_count(pp->ntb) < 1) pp 328 drivers/ntb/test/ntb_pingpong.c msg_mask = ntb_msg_outbits(pp->ntb) | ntb_msg_inbits(pp->ntb); pp 329 drivers/ntb/test/ntb_pingpong.c return ntb_msg_set_mask(pp->ntb, msg_mask); pp 332 drivers/ntb/test/ntb_pingpong.c static int pp_setup_ctx(struct pp_ctx *pp) pp 336 drivers/ntb/test/ntb_pingpong.c ret = ntb_set_ctx(pp->ntb, pp, &pp_ops); pp 340 drivers/ntb/test/ntb_pingpong.c ntb_link_enable(pp->ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); pp 342 drivers/ntb/test/ntb_pingpong.c ntb_link_event(pp->ntb); pp 347 drivers/ntb/test/ntb_pingpong.c static void pp_clear_ctx(struct pp_ctx *pp) pp 349 drivers/ntb/test/ntb_pingpong.c ntb_link_disable(pp->ntb); pp 351 drivers/ntb/test/ntb_pingpong.c ntb_clear_ctx(pp->ntb); pp 354 drivers/ntb/test/ntb_pingpong.c static void pp_setup_dbgfs(struct pp_ctx *pp) pp 356 drivers/ntb/test/ntb_pingpong.c struct pci_dev *pdev = pp->ntb->pdev; pp 359 drivers/ntb/test/ntb_pingpong.c pp->dbgfs_dir = debugfs_create_dir(pci_name(pdev), pp_dbgfs_topdir); pp 361 drivers/ntb/test/ntb_pingpong.c ret = debugfs_create_atomic_t("count", 0600, pp->dbgfs_dir, &pp->count); pp 363 drivers/ntb/test/ntb_pingpong.c dev_warn(&pp->ntb->dev, "DebugFS unsupported\n"); pp 366 drivers/ntb/test/ntb_pingpong.c static void pp_clear_dbgfs(struct pp_ctx *pp) pp 368 drivers/ntb/test/ntb_pingpong.c debugfs_remove_recursive(pp->dbgfs_dir); pp 373 drivers/ntb/test/ntb_pingpong.c struct pp_ctx *pp; pp 380 drivers/ntb/test/ntb_pingpong.c pp = pp_create_data(ntb); pp 381 drivers/ntb/test/ntb_pingpong.c if (IS_ERR(pp)) pp 382 drivers/ntb/test/ntb_pingpong.c return PTR_ERR(pp); pp 384 drivers/ntb/test/ntb_pingpong.c pp_init_flds(pp); pp 386 drivers/ntb/test/ntb_pingpong.c ret = pp_mask_events(pp); pp 390 drivers/ntb/test/ntb_pingpong.c ret = pp_setup_ctx(pp); pp 394 drivers/ntb/test/ntb_pingpong.c pp_setup_dbgfs(pp); pp 401 drivers/ntb/test/ntb_pingpong.c struct pp_ctx *pp = ntb->ctx; pp 403 drivers/ntb/test/ntb_pingpong.c pp_clear_dbgfs(pp); pp 405 drivers/ntb/test/ntb_pingpong.c pp_clear_ctx(pp); pp 407 drivers/ntb/test/ntb_pingpong.c pp_clear(pp); pp 256 drivers/of/base.c struct property *pp; pp 261 drivers/of/base.c for (pp = np->properties; pp; pp = pp->next) { pp 262 drivers/of/base.c if (of_prop_cmp(pp->name, name) == 0) { pp 264 drivers/of/base.c *lenp = pp->length; pp 269 drivers/of/base.c return pp; pp 276 drivers/of/base.c struct property *pp; pp 280 drivers/of/base.c pp = __of_find_property(np, name, lenp); pp 283 drivers/of/base.c return pp; pp 333 drivers/of/base.c struct property *pp = __of_find_property(np, name, lenp); pp 335 drivers/of/base.c return pp ? pp->value : NULL; pp 345 drivers/of/base.c struct property *pp = of_find_property(np, name, lenp); pp 347 drivers/of/base.c return pp ? pp->value : NULL; pp 942 drivers/of/base.c struct property *pp; pp 965 drivers/of/base.c for_each_property_of_node(of_aliases, pp) { pp 966 drivers/of/base.c if (strlen(pp->name) == len && !strncmp(pp->name, path, len)) { pp 967 drivers/of/base.c np = of_find_node_by_path(pp->value); pp 1088 drivers/of/base.c struct property *pp; pp 1093 drivers/of/base.c for (pp = np->properties; pp; pp = pp->next) { pp 1094 drivers/of/base.c if (of_prop_cmp(pp->name, prop_name) == 0) { pp 1996 drivers/of/base.c struct property *pp; pp 2019 drivers/of/base.c for_each_property_of_node(of_aliases, pp) { pp 2020 drivers/of/base.c const char *start = pp->name; pp 2027 drivers/of/base.c if (!strcmp(pp->name, "name") || pp 2028 drivers/of/base.c !strcmp(pp->name, "phandle") || pp 2029 drivers/of/base.c !strcmp(pp->name, "linux,phandle")) pp 2032 drivers/of/base.c np = of_find_node_by_path(pp->value); pp 440 drivers/of/dynamic.c struct property *pp, *new_pp; pp 441 drivers/of/dynamic.c for_each_property_of_node(np, pp) { pp 442 drivers/of/dynamic.c new_pp = __of_prop_dup(pp, GFP_KERNEL); pp 114 drivers/of/fdt.c struct property *pp, **pprev = NULL; pp 140 drivers/of/fdt.c pp = unflatten_dt_alloc(mem, sizeof(struct property), pp 164 drivers/of/fdt.c pp->name = (char *)pname; pp 165 drivers/of/fdt.c pp->length = sz; pp 166 drivers/of/fdt.c pp->value = (__be32 *)val; pp 167 drivers/of/fdt.c *pprev = pp; pp 168 drivers/of/fdt.c pprev = &pp->next; pp 189 drivers/of/fdt.c pp = unflatten_dt_alloc(mem, sizeof(struct property) + len, pp 192 drivers/of/fdt.c pp->name = "name"; pp 193 drivers/of/fdt.c pp->length = len; pp 194 drivers/of/fdt.c pp->value = pp + 1; pp 195 drivers/of/fdt.c *pprev = pp; pp 196 drivers/of/fdt.c pprev = &pp->next; pp 197 drivers/of/fdt.c memcpy(pp->value, ps, len - 1); pp 198 drivers/of/fdt.c ((char *)pp->value)[len - 1] = 0; pp 200 drivers/of/fdt.c nodename, (char *)pp->value); pp 35 drivers/of/kobj.c struct property *pp = container_of(bin_attr, struct property, attr); pp 36 drivers/of/kobj.c return memory_read_from_buffer(buf, count, &offset, pp->value, pp->length); pp 63 drivers/of/kobj.c int __of_add_property_sysfs(struct device_node *np, struct property *pp) pp 68 drivers/of/kobj.c bool secure = strncmp(pp->name, "security-", 9) == 0; pp 76 drivers/of/kobj.c sysfs_bin_attr_init(&pp->attr); pp 77 drivers/of/kobj.c pp->attr.attr.name = safe_name(&np->kobj, pp->name); pp 78 drivers/of/kobj.c pp->attr.attr.mode = secure ? 0400 : 0444; pp 79 drivers/of/kobj.c pp->attr.size = secure ? 0 : pp->length; pp 80 drivers/of/kobj.c pp->attr.read = of_node_property_read; pp 82 drivers/of/kobj.c rc = sysfs_create_bin_file(&np->kobj, &pp->attr); pp 83 drivers/of/kobj.c WARN(rc, "error adding attribute %s to node %pOF\n", pp->name, np); pp 119 drivers/of/kobj.c struct property *pp; pp 144 drivers/of/kobj.c for_each_property_of_node(np, pp) pp 145 drivers/of/kobj.c __of_add_property_sysfs(np, pp); pp 152 drivers/of/kobj.c struct property *pp; pp 160 drivers/of/kobj.c for_each_property_of_node(np, pp) pp 161 drivers/of/kobj.c __of_sysfs_remove_bin_file(np, pp); pp 44 drivers/of/of_net.c struct property *pp = of_find_property(np, name, NULL); pp 46 drivers/of/of_net.c if (pp && pp->length == ETH_ALEN && is_valid_ether_addr(pp->value)) pp 47 drivers/of/of_net.c return pp->value; pp 62 drivers/of/of_private.h int __of_add_property_sysfs(struct device_node *np, struct property *pp); pp 69 drivers/of/of_private.h static inline int __of_add_property_sysfs(struct device_node *np, struct property *pp) pp 735 drivers/of/property.c struct device_node *np, *pp; pp 740 drivers/of/property.c pp = of_graph_get_port_parent(np); pp 744 drivers/of/property.c return pp; pp 272 drivers/parport/parport_ax88796.c struct parport *pp; pp 311 drivers/parport/parport_ax88796.c pp = parport_register_port((unsigned long)dd->base, irq, pp 315 drivers/parport/parport_ax88796.c if (pp == NULL) { pp 321 drivers/parport/parport_ax88796.c pp->private_data = dd; pp 322 drivers/parport/parport_ax88796.c dd->parport = pp; pp 335 drivers/parport/parport_ax88796.c IRQF_TRIGGER_FALLING, pdev->name, pp); pp 343 drivers/parport/parport_ax88796.c platform_set_drvdata(pdev, pp); pp 346 drivers/parport/parport_ax88796.c parport_announce_port(pp); pp 351 drivers/parport/parport_ax88796.c parport_remove_port(pp); pp 293 drivers/parport/parport_mfc3.c struct pia *pp; pp 304 drivers/parport/parport_mfc3.c pp = ZTWO_VADDR(piabase); pp 305 drivers/parport/parport_mfc3.c pp->crb = 0; pp 306 drivers/parport/parport_mfc3.c pp->pddrb = 255; /* all data pins output */ pp 307 drivers/parport/parport_mfc3.c pp->crb = PIA_DDR|32|8; pp 308 drivers/parport/parport_mfc3.c dummy = pp->pddrb; /* reading clears interrupt */ pp 309 drivers/parport/parport_mfc3.c pp->cra = 0; pp 310 drivers/parport/parport_mfc3.c pp->pddra = 0xe0; /* /RESET, /DIR ,/AUTO-FEED output */ pp 311 drivers/parport/parport_mfc3.c pp->cra = PIA_DDR; pp 312 drivers/parport/parport_mfc3.c pp->ppra = 0; /* reset printer */ pp 314 drivers/parport/parport_mfc3.c pp->ppra = 128; pp 315 drivers/parport/parport_mfc3.c p = parport_register_port((unsigned long)pp, IRQ_AMIGA_PORTS, pp 590 drivers/parport/procfs.c int parport_proc_register(struct parport *pp) pp 595 drivers/parport/procfs.c int parport_proc_unregister(struct parport *pp) pp 203 drivers/pci/controller/dwc/pci-dra7xx.c static int dra7xx_pcie_host_init(struct pcie_port *pp) pp 205 drivers/pci/controller/dwc/pci-dra7xx.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 208 drivers/pci/controller/dwc/pci-dra7xx.c dw_pcie_setup_rc(pp); pp 212 drivers/pci/controller/dwc/pci-dra7xx.c dw_pcie_msi_init(pp); pp 236 drivers/pci/controller/dwc/pci-dra7xx.c static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp) pp 238 drivers/pci/controller/dwc/pci-dra7xx.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 250 drivers/pci/controller/dwc/pci-dra7xx.c &intx_domain_ops, pp); pp 264 drivers/pci/controller/dwc/pci-dra7xx.c struct pcie_port *pp = &pci->pp; pp 272 drivers/pci/controller/dwc/pci-dra7xx.c dw_handle_msi_irq(pp); pp 460 drivers/pci/controller/dwc/pci-dra7xx.c struct pcie_port *pp = &pci->pp; pp 464 drivers/pci/controller/dwc/pci-dra7xx.c pp->irq = platform_get_irq(pdev, 1); pp 465 drivers/pci/controller/dwc/pci-dra7xx.c if (pp->irq < 0) { pp 467 drivers/pci/controller/dwc/pci-dra7xx.c return pp->irq; pp 470 drivers/pci/controller/dwc/pci-dra7xx.c ret = devm_request_irq(dev, pp->irq, dra7xx_pcie_msi_irq_handler, pp 478 drivers/pci/controller/dwc/pci-dra7xx.c ret = dra7xx_pcie_init_irq_domain(pp); pp 487 drivers/pci/controller/dwc/pci-dra7xx.c pp->ops = &dra7xx_pcie_host_ops; pp 489 drivers/pci/controller/dwc/pci-dra7xx.c ret = dw_pcie_host_init(pp); pp 237 drivers/pci/controller/dwc/pci-exynos.c struct pcie_port *pp = &pci->pp; pp 256 drivers/pci/controller/dwc/pci-exynos.c dw_pcie_setup_rc(pp); pp 300 drivers/pci/controller/dwc/pci-exynos.c struct pcie_port *pp = &pci->pp; pp 303 drivers/pci/controller/dwc/pci-exynos.c dw_pcie_msi_init(pp); pp 341 drivers/pci/controller/dwc/pci-exynos.c static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, pp 344 drivers/pci/controller/dwc/pci-exynos.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 354 drivers/pci/controller/dwc/pci-exynos.c static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, pp 357 drivers/pci/controller/dwc/pci-exynos.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 379 drivers/pci/controller/dwc/pci-exynos.c static int exynos_pcie_host_init(struct pcie_port *pp) pp 381 drivers/pci/controller/dwc/pci-exynos.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 400 drivers/pci/controller/dwc/pci-exynos.c struct pcie_port *pp = &pci->pp; pp 404 drivers/pci/controller/dwc/pci-exynos.c pp->irq = platform_get_irq(pdev, 1); pp 405 drivers/pci/controller/dwc/pci-exynos.c if (pp->irq < 0) { pp 407 drivers/pci/controller/dwc/pci-exynos.c return pp->irq; pp 409 drivers/pci/controller/dwc/pci-exynos.c ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler, pp 417 drivers/pci/controller/dwc/pci-exynos.c pp->msi_irq = platform_get_irq(pdev, 0); pp 418 drivers/pci/controller/dwc/pci-exynos.c if (pp->msi_irq < 0) { pp 420 drivers/pci/controller/dwc/pci-exynos.c return pp->msi_irq; pp 424 drivers/pci/controller/dwc/pci-exynos.c pp->ops = &exynos_pcie_host_ops; pp 426 drivers/pci/controller/dwc/pci-exynos.c ret = dw_pcie_host_init(pp); pp 839 drivers/pci/controller/dwc/pci-imx6.c static int imx6_pcie_host_init(struct pcie_port *pp) pp 841 drivers/pci/controller/dwc/pci-imx6.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 848 drivers/pci/controller/dwc/pci-imx6.c dw_pcie_setup_rc(pp); pp 852 drivers/pci/controller/dwc/pci-imx6.c dw_pcie_msi_init(pp); pp 865 drivers/pci/controller/dwc/pci-imx6.c struct pcie_port *pp = &pci->pp; pp 870 drivers/pci/controller/dwc/pci-imx6.c pp->msi_irq = platform_get_irq_byname(pdev, "msi"); pp 871 drivers/pci/controller/dwc/pci-imx6.c if (pp->msi_irq <= 0) { pp 877 drivers/pci/controller/dwc/pci-imx6.c pp->ops = &imx6_pcie_host_ops; pp 879 drivers/pci/controller/dwc/pci-imx6.c ret = dw_pcie_host_init(pp); pp 988 drivers/pci/controller/dwc/pci-imx6.c struct pcie_port *pp = &imx6_pcie->pci->pp; pp 996 drivers/pci/controller/dwc/pci-imx6.c dw_pcie_setup_rc(pp); pp 1262 drivers/pci/controller/dwc/pci-imx6.c struct pcie_port *pp = bus->sysdata; pp 1272 drivers/pci/controller/dwc/pci-imx6.c if (bus->number == pp->root_bus_nr) { pp 1273 drivers/pci/controller/dwc/pci-imx6.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 152 drivers/pci/controller/dwc/pci-keystone.c struct pcie_port *pp = irq_data_get_irq_chip_data(data); pp 159 drivers/pci/controller/dwc/pci-keystone.c pci = to_dw_pcie_from_pp(pp); pp 172 drivers/pci/controller/dwc/pci-keystone.c struct pcie_port *pp = irq_data_get_irq_chip_data(data); pp 177 drivers/pci/controller/dwc/pci-keystone.c pci = to_dw_pcie_from_pp(pp); pp 197 drivers/pci/controller/dwc/pci-keystone.c struct pcie_port *pp = irq_data_get_irq_chip_data(data); pp 205 drivers/pci/controller/dwc/pci-keystone.c raw_spin_lock_irqsave(&pp->lock, flags); pp 207 drivers/pci/controller/dwc/pci-keystone.c pci = to_dw_pcie_from_pp(pp); pp 216 drivers/pci/controller/dwc/pci-keystone.c raw_spin_unlock_irqrestore(&pp->lock, flags); pp 221 drivers/pci/controller/dwc/pci-keystone.c struct pcie_port *pp = irq_data_get_irq_chip_data(data); pp 229 drivers/pci/controller/dwc/pci-keystone.c raw_spin_lock_irqsave(&pp->lock, flags); pp 231 drivers/pci/controller/dwc/pci-keystone.c pci = to_dw_pcie_from_pp(pp); pp 240 drivers/pci/controller/dwc/pci-keystone.c raw_spin_unlock_irqrestore(&pp->lock, flags); pp 252 drivers/pci/controller/dwc/pci-keystone.c static int ks_pcie_msi_host_init(struct pcie_port *pp) pp 254 drivers/pci/controller/dwc/pci-keystone.c pp->msi_irq_chip = &ks_pcie_msi_irq_chip; pp 255 drivers/pci/controller/dwc/pci-keystone.c return dw_pcie_allocate_domains(pp); pp 281 drivers/pci/controller/dwc/pci-keystone.c static int ks_pcie_am654_msi_host_init(struct pcie_port *pp) pp 402 drivers/pci/controller/dwc/pci-keystone.c struct pcie_port *pp = &pci->pp; pp 403 drivers/pci/controller/dwc/pci-keystone.c u64 start = pp->mem->start; pp 404 drivers/pci/controller/dwc/pci-keystone.c u64 end = pp->mem->end; pp 433 drivers/pci/controller/dwc/pci-keystone.c static int ks_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, pp 437 drivers/pci/controller/dwc/pci-keystone.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 443 drivers/pci/controller/dwc/pci-keystone.c if (bus->parent->number != pp->root_bus_nr) pp 447 drivers/pci/controller/dwc/pci-keystone.c return dw_pcie_read(pp->va_cfg0_base + where, size, val); pp 450 drivers/pci/controller/dwc/pci-keystone.c static int ks_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, pp 454 drivers/pci/controller/dwc/pci-keystone.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 460 drivers/pci/controller/dwc/pci-keystone.c if (bus->parent->number != pp->root_bus_nr) pp 464 drivers/pci/controller/dwc/pci-keystone.c return dw_pcie_write(pp->va_cfg0_base + where, size, val); pp 472 drivers/pci/controller/dwc/pci-keystone.c static void ks_pcie_v3_65_scan_bus(struct pcie_port *pp) pp 474 drivers/pci/controller/dwc/pci-keystone.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 583 drivers/pci/controller/dwc/pci-keystone.c struct pcie_port *pp = &pci->pp; pp 607 drivers/pci/controller/dwc/pci-keystone.c virq = irq_linear_revmap(pp->irq_domain, vector); pp 804 drivers/pci/controller/dwc/pci-keystone.c static int __init ks_pcie_host_init(struct pcie_port *pp) pp 806 drivers/pci/controller/dwc/pci-keystone.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 818 drivers/pci/controller/dwc/pci-keystone.c dw_pcie_setup_rc(pp); pp 868 drivers/pci/controller/dwc/pci-keystone.c struct pcie_port *pp = &pci->pp; pp 874 drivers/pci/controller/dwc/pci-keystone.c pp->va_cfg0_base = devm_pci_remap_cfg_resource(dev, res); pp 875 drivers/pci/controller/dwc/pci-keystone.c if (IS_ERR(pp->va_cfg0_base)) pp 876 drivers/pci/controller/dwc/pci-keystone.c return PTR_ERR(pp->va_cfg0_base); pp 878 drivers/pci/controller/dwc/pci-keystone.c pp->va_cfg1_base = pp->va_cfg0_base; pp 880 drivers/pci/controller/dwc/pci-keystone.c ret = dw_pcie_host_init(pp); pp 1374 drivers/pci/controller/dwc/pci-keystone.c pci->pp.ops = host_ops; pp 134 drivers/pci/controller/dwc/pci-layerscape.c static int ls_pcie_host_init(struct pcie_port *pp) pp 136 drivers/pci/controller/dwc/pci-layerscape.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 153 drivers/pci/controller/dwc/pci-layerscape.c dw_pcie_setup_rc(pp); pp 158 drivers/pci/controller/dwc/pci-layerscape.c static int ls1021_pcie_host_init(struct pcie_port *pp) pp 160 drivers/pci/controller/dwc/pci-layerscape.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 182 drivers/pci/controller/dwc/pci-layerscape.c return ls_pcie_host_init(pp); pp 185 drivers/pci/controller/dwc/pci-layerscape.c static int ls_pcie_msi_host_init(struct pcie_port *pp) pp 187 drivers/pci/controller/dwc/pci-layerscape.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 278 drivers/pci/controller/dwc/pci-layerscape.c struct pcie_port *pp = &pci->pp; pp 282 drivers/pci/controller/dwc/pci-layerscape.c pp->ops = pcie->drvdata->ops; pp 284 drivers/pci/controller/dwc/pci-layerscape.c ret = dw_pcie_host_init(pp); pp 378 drivers/pci/controller/dwc/pci-meson.c struct pcie_port *pp = &pci->pp; pp 384 drivers/pci/controller/dwc/pci-meson.c dw_pcie_setup_rc(pp); pp 395 drivers/pci/controller/dwc/pci-meson.c dw_pcie_msi_init(&mp->pci.pp); pp 398 drivers/pci/controller/dwc/pci-meson.c static int meson_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, pp 401 drivers/pci/controller/dwc/pci-meson.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 425 drivers/pci/controller/dwc/pci-meson.c static int meson_pcie_wr_own_conf(struct pcie_port *pp, int where, pp 428 drivers/pci/controller/dwc/pci-meson.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 472 drivers/pci/controller/dwc/pci-meson.c static int meson_pcie_host_init(struct pcie_port *pp) pp 474 drivers/pci/controller/dwc/pci-meson.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 497 drivers/pci/controller/dwc/pci-meson.c struct pcie_port *pp = &pci->pp; pp 502 drivers/pci/controller/dwc/pci-meson.c pp->msi_irq = platform_get_irq(pdev, 0); pp 503 drivers/pci/controller/dwc/pci-meson.c if (pp->msi_irq < 0) { pp 505 drivers/pci/controller/dwc/pci-meson.c return pp->msi_irq; pp 509 drivers/pci/controller/dwc/pci-meson.c pp->ops = &meson_pcie_host_ops; pp 512 drivers/pci/controller/dwc/pci-meson.c ret = dw_pcie_host_init(pp); pp 232 drivers/pci/controller/dwc/pcie-al.c struct pcie_port *pp = &pcie->pci->pp; pp 235 drivers/pci/controller/dwc/pcie-al.c pci_base_addr = (void __iomem *)((uintptr_t)pp->va_cfg0_base + pp 251 drivers/pci/controller/dwc/pcie-al.c static int al_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, pp 255 drivers/pci/controller/dwc/pcie-al.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 273 drivers/pci/controller/dwc/pcie-al.c static int al_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, pp 277 drivers/pci/controller/dwc/pcie-al.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 298 drivers/pci/controller/dwc/pcie-al.c struct pcie_port *pp = &pcie->pci->pp; pp 318 drivers/pci/controller/dwc/pcie-al.c target_bus_cfg->reg_val = pp->busn->start & target_bus_cfg->reg_mask; pp 323 drivers/pci/controller/dwc/pcie-al.c secondary_bus = pp->busn->start + 1; pp 324 drivers/pci/controller/dwc/pcie-al.c subordinate_bus = pp->busn->end; pp 341 drivers/pci/controller/dwc/pcie-al.c static int al_pcie_host_init(struct pcie_port *pp) pp 343 drivers/pci/controller/dwc/pcie-al.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 366 drivers/pci/controller/dwc/pcie-al.c static int al_add_pcie_port(struct pcie_port *pp, pp 372 drivers/pci/controller/dwc/pcie-al.c pp->ops = &al_pcie_host_ops; pp 374 drivers/pci/controller/dwc/pcie-al.c ret = dw_pcie_host_init(pp); pp 437 drivers/pci/controller/dwc/pcie-al.c return al_add_pcie_port(&pci->pp, pdev); pp 208 drivers/pci/controller/dwc/pcie-armada8k.c static int armada8k_pcie_host_init(struct pcie_port *pp) pp 210 drivers/pci/controller/dwc/pcie-armada8k.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 213 drivers/pci/controller/dwc/pcie-armada8k.c dw_pcie_setup_rc(pp); pp 244 drivers/pci/controller/dwc/pcie-armada8k.c struct pcie_port *pp = &pci->pp; pp 248 drivers/pci/controller/dwc/pcie-armada8k.c pp->ops = &armada8k_pcie_host_ops; pp 250 drivers/pci/controller/dwc/pcie-armada8k.c pp->irq = platform_get_irq(pdev, 0); pp 251 drivers/pci/controller/dwc/pcie-armada8k.c if (pp->irq < 0) { pp 253 drivers/pci/controller/dwc/pcie-armada8k.c return pp->irq; pp 256 drivers/pci/controller/dwc/pcie-armada8k.c ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler, pp 259 drivers/pci/controller/dwc/pcie-armada8k.c dev_err(dev, "failed to request irq %d\n", pp->irq); pp 263 drivers/pci/controller/dwc/pcie-armada8k.c ret = dw_pcie_host_init(pp); pp 110 drivers/pci/controller/dwc/pcie-artpec6.c struct pcie_port *pp = &pci->pp; pp 115 drivers/pci/controller/dwc/pcie-artpec6.c return pci_addr - pp->cfg0_base; pp 358 drivers/pci/controller/dwc/pcie-artpec6.c struct pcie_port *pp = &pci->pp; pp 361 drivers/pci/controller/dwc/pcie-artpec6.c dw_pcie_msi_init(pp); pp 364 drivers/pci/controller/dwc/pcie-artpec6.c static int artpec6_pcie_host_init(struct pcie_port *pp) pp 366 drivers/pci/controller/dwc/pcie-artpec6.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 374 drivers/pci/controller/dwc/pcie-artpec6.c dw_pcie_setup_rc(pp); pp 390 drivers/pci/controller/dwc/pcie-artpec6.c struct pcie_port *pp = &pci->pp; pp 395 drivers/pci/controller/dwc/pcie-artpec6.c pp->msi_irq = platform_get_irq_byname(pdev, "msi"); pp 396 drivers/pci/controller/dwc/pcie-artpec6.c if (pp->msi_irq < 0) { pp 398 drivers/pci/controller/dwc/pcie-artpec6.c return pp->msi_irq; pp 402 drivers/pci/controller/dwc/pcie-artpec6.c pp->ops = &artpec6_pcie_host_ops; pp 404 drivers/pci/controller/dwc/pcie-artpec6.c ret = dw_pcie_host_init(pp); pp 23 drivers/pci/controller/dwc/pcie-designware-host.c static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, pp 28 drivers/pci/controller/dwc/pcie-designware-host.c if (pp->ops->rd_own_conf) pp 29 drivers/pci/controller/dwc/pcie-designware-host.c return pp->ops->rd_own_conf(pp, where, size, val); pp 31 drivers/pci/controller/dwc/pcie-designware-host.c pci = to_dw_pcie_from_pp(pp); pp 35 drivers/pci/controller/dwc/pcie-designware-host.c static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, pp 40 drivers/pci/controller/dwc/pcie-designware-host.c if (pp->ops->wr_own_conf) pp 41 drivers/pci/controller/dwc/pcie-designware-host.c return pp->ops->wr_own_conf(pp, where, size, val); pp 43 drivers/pci/controller/dwc/pcie-designware-host.c pci = to_dw_pcie_from_pp(pp); pp 78 drivers/pci/controller/dwc/pcie-designware-host.c irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) pp 85 drivers/pci/controller/dwc/pcie-designware-host.c num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; pp 88 drivers/pci/controller/dwc/pcie-designware-host.c dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + pp 99 drivers/pci/controller/dwc/pcie-designware-host.c irq = irq_find_mapping(pp->irq_domain, pp 114 drivers/pci/controller/dwc/pcie-designware-host.c struct pcie_port *pp; pp 118 drivers/pci/controller/dwc/pcie-designware-host.c pp = irq_desc_get_handler_data(desc); pp 119 drivers/pci/controller/dwc/pcie-designware-host.c dw_handle_msi_irq(pp); pp 126 drivers/pci/controller/dwc/pcie-designware-host.c struct pcie_port *pp = irq_data_get_irq_chip_data(d); pp 127 drivers/pci/controller/dwc/pcie-designware-host.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 130 drivers/pci/controller/dwc/pcie-designware-host.c msi_target = (u64)pp->msi_data; pp 149 drivers/pci/controller/dwc/pcie-designware-host.c struct pcie_port *pp = irq_data_get_irq_chip_data(d); pp 153 drivers/pci/controller/dwc/pcie-designware-host.c raw_spin_lock_irqsave(&pp->lock, flags); pp 159 drivers/pci/controller/dwc/pcie-designware-host.c pp->irq_mask[ctrl] |= BIT(bit); pp 160 drivers/pci/controller/dwc/pcie-designware-host.c dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, pp 161 drivers/pci/controller/dwc/pcie-designware-host.c pp->irq_mask[ctrl]); pp 163 drivers/pci/controller/dwc/pcie-designware-host.c raw_spin_unlock_irqrestore(&pp->lock, flags); pp 168 drivers/pci/controller/dwc/pcie-designware-host.c struct pcie_port *pp = irq_data_get_irq_chip_data(d); pp 172 drivers/pci/controller/dwc/pcie-designware-host.c raw_spin_lock_irqsave(&pp->lock, flags); pp 178 drivers/pci/controller/dwc/pcie-designware-host.c pp->irq_mask[ctrl] &= ~BIT(bit); pp 179 drivers/pci/controller/dwc/pcie-designware-host.c dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + res, 4, pp 180 drivers/pci/controller/dwc/pcie-designware-host.c pp->irq_mask[ctrl]); pp 182 drivers/pci/controller/dwc/pcie-designware-host.c raw_spin_unlock_irqrestore(&pp->lock, flags); pp 187 drivers/pci/controller/dwc/pcie-designware-host.c struct pcie_port *pp = irq_data_get_irq_chip_data(d); pp 194 drivers/pci/controller/dwc/pcie-designware-host.c dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_STATUS + res, 4, BIT(bit)); pp 210 drivers/pci/controller/dwc/pcie-designware-host.c struct pcie_port *pp = domain->host_data; pp 215 drivers/pci/controller/dwc/pcie-designware-host.c raw_spin_lock_irqsave(&pp->lock, flags); pp 217 drivers/pci/controller/dwc/pcie-designware-host.c bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors, pp 220 drivers/pci/controller/dwc/pcie-designware-host.c raw_spin_unlock_irqrestore(&pp->lock, flags); pp 227 drivers/pci/controller/dwc/pcie-designware-host.c pp->msi_irq_chip, pp 228 drivers/pci/controller/dwc/pcie-designware-host.c pp, handle_edge_irq, pp 238 drivers/pci/controller/dwc/pcie-designware-host.c struct pcie_port *pp = irq_data_get_irq_chip_data(d); pp 241 drivers/pci/controller/dwc/pcie-designware-host.c raw_spin_lock_irqsave(&pp->lock, flags); pp 243 drivers/pci/controller/dwc/pcie-designware-host.c bitmap_release_region(pp->msi_irq_in_use, d->hwirq, pp 246 drivers/pci/controller/dwc/pcie-designware-host.c raw_spin_unlock_irqrestore(&pp->lock, flags); pp 254 drivers/pci/controller/dwc/pcie-designware-host.c int dw_pcie_allocate_domains(struct pcie_port *pp) pp 256 drivers/pci/controller/dwc/pcie-designware-host.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 259 drivers/pci/controller/dwc/pcie-designware-host.c pp->irq_domain = irq_domain_create_linear(fwnode, pp->num_vectors, pp 260 drivers/pci/controller/dwc/pcie-designware-host.c &dw_pcie_msi_domain_ops, pp); pp 261 drivers/pci/controller/dwc/pcie-designware-host.c if (!pp->irq_domain) { pp 266 drivers/pci/controller/dwc/pcie-designware-host.c pp->msi_domain = pci_msi_create_irq_domain(fwnode, pp 268 drivers/pci/controller/dwc/pcie-designware-host.c pp->irq_domain); pp 269 drivers/pci/controller/dwc/pcie-designware-host.c if (!pp->msi_domain) { pp 271 drivers/pci/controller/dwc/pcie-designware-host.c irq_domain_remove(pp->irq_domain); pp 278 drivers/pci/controller/dwc/pcie-designware-host.c void dw_pcie_free_msi(struct pcie_port *pp) pp 280 drivers/pci/controller/dwc/pcie-designware-host.c if (pp->msi_irq) { pp 281 drivers/pci/controller/dwc/pcie-designware-host.c irq_set_chained_handler(pp->msi_irq, NULL); pp 282 drivers/pci/controller/dwc/pcie-designware-host.c irq_set_handler_data(pp->msi_irq, NULL); pp 285 drivers/pci/controller/dwc/pcie-designware-host.c irq_domain_remove(pp->msi_domain); pp 286 drivers/pci/controller/dwc/pcie-designware-host.c irq_domain_remove(pp->irq_domain); pp 288 drivers/pci/controller/dwc/pcie-designware-host.c if (pp->msi_page) pp 289 drivers/pci/controller/dwc/pcie-designware-host.c __free_page(pp->msi_page); pp 292 drivers/pci/controller/dwc/pcie-designware-host.c void dw_pcie_msi_init(struct pcie_port *pp) pp 294 drivers/pci/controller/dwc/pcie-designware-host.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 298 drivers/pci/controller/dwc/pcie-designware-host.c pp->msi_page = alloc_page(GFP_KERNEL); pp 299 drivers/pci/controller/dwc/pcie-designware-host.c pp->msi_data = dma_map_page(dev, pp->msi_page, 0, PAGE_SIZE, pp 301 drivers/pci/controller/dwc/pcie-designware-host.c if (dma_mapping_error(dev, pp->msi_data)) { pp 303 drivers/pci/controller/dwc/pcie-designware-host.c __free_page(pp->msi_page); pp 304 drivers/pci/controller/dwc/pcie-designware-host.c pp->msi_page = NULL; pp 307 drivers/pci/controller/dwc/pcie-designware-host.c msi_target = (u64)pp->msi_data; pp 310 drivers/pci/controller/dwc/pcie-designware-host.c dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4, pp 312 drivers/pci/controller/dwc/pcie-designware-host.c dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, pp 317 drivers/pci/controller/dwc/pcie-designware-host.c int dw_pcie_host_init(struct pcie_port *pp) pp 319 drivers/pci/controller/dwc/pcie-designware-host.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 330 drivers/pci/controller/dwc/pcie-designware-host.c raw_spin_lock_init(&pci->pp.lock); pp 334 drivers/pci/controller/dwc/pcie-designware-host.c pp->cfg0_size = resource_size(cfg_res) >> 1; pp 335 drivers/pci/controller/dwc/pcie-designware-host.c pp->cfg1_size = resource_size(cfg_res) >> 1; pp 336 drivers/pci/controller/dwc/pcie-designware-host.c pp->cfg0_base = cfg_res->start; pp 337 drivers/pci/controller/dwc/pcie-designware-host.c pp->cfg1_base = cfg_res->start + pp->cfg0_size; pp 338 drivers/pci/controller/dwc/pcie-designware-host.c } else if (!pp->va_cfg0_base) { pp 347 drivers/pci/controller/dwc/pcie-designware-host.c &bridge->windows, &pp->io_base); pp 360 drivers/pci/controller/dwc/pcie-designware-host.c pp->io_base); pp 366 drivers/pci/controller/dwc/pcie-designware-host.c pp->io = win->res; pp 367 drivers/pci/controller/dwc/pcie-designware-host.c pp->io->name = "I/O"; pp 368 drivers/pci/controller/dwc/pcie-designware-host.c pp->io_size = resource_size(pp->io); pp 369 drivers/pci/controller/dwc/pcie-designware-host.c pp->io_bus_addr = pp->io->start - win->offset; pp 373 drivers/pci/controller/dwc/pcie-designware-host.c pp->mem = win->res; pp 374 drivers/pci/controller/dwc/pcie-designware-host.c pp->mem->name = "MEM"; pp 375 drivers/pci/controller/dwc/pcie-designware-host.c pp->mem_size = resource_size(pp->mem); pp 376 drivers/pci/controller/dwc/pcie-designware-host.c pp->mem_bus_addr = pp->mem->start - win->offset; pp 379 drivers/pci/controller/dwc/pcie-designware-host.c pp->cfg = win->res; pp 380 drivers/pci/controller/dwc/pcie-designware-host.c pp->cfg0_size = resource_size(pp->cfg) >> 1; pp 381 drivers/pci/controller/dwc/pcie-designware-host.c pp->cfg1_size = resource_size(pp->cfg) >> 1; pp 382 drivers/pci/controller/dwc/pcie-designware-host.c pp->cfg0_base = pp->cfg->start; pp 383 drivers/pci/controller/dwc/pcie-designware-host.c pp->cfg1_base = pp->cfg->start + pp->cfg0_size; pp 386 drivers/pci/controller/dwc/pcie-designware-host.c pp->busn = win->res; pp 393 drivers/pci/controller/dwc/pcie-designware-host.c pp->cfg->start, pp 394 drivers/pci/controller/dwc/pcie-designware-host.c resource_size(pp->cfg)); pp 401 drivers/pci/controller/dwc/pcie-designware-host.c pp->mem_base = pp->mem->start; pp 403 drivers/pci/controller/dwc/pcie-designware-host.c if (!pp->va_cfg0_base) { pp 404 drivers/pci/controller/dwc/pcie-designware-host.c pp->va_cfg0_base = devm_pci_remap_cfgspace(dev, pp 405 drivers/pci/controller/dwc/pcie-designware-host.c pp->cfg0_base, pp->cfg0_size); pp 406 drivers/pci/controller/dwc/pcie-designware-host.c if (!pp->va_cfg0_base) { pp 412 drivers/pci/controller/dwc/pcie-designware-host.c if (!pp->va_cfg1_base) { pp 413 drivers/pci/controller/dwc/pcie-designware-host.c pp->va_cfg1_base = devm_pci_remap_cfgspace(dev, pp 414 drivers/pci/controller/dwc/pcie-designware-host.c pp->cfg1_base, pp 415 drivers/pci/controller/dwc/pcie-designware-host.c pp->cfg1_size); pp 416 drivers/pci/controller/dwc/pcie-designware-host.c if (!pp->va_cfg1_base) { pp 432 drivers/pci/controller/dwc/pcie-designware-host.c if (!pp->ops->set_num_vectors) { pp 433 drivers/pci/controller/dwc/pcie-designware-host.c pp->num_vectors = MSI_DEF_NUM_VECTORS; pp 435 drivers/pci/controller/dwc/pcie-designware-host.c pp->ops->set_num_vectors(pp); pp 437 drivers/pci/controller/dwc/pcie-designware-host.c if (pp->num_vectors > MAX_MSI_IRQS || pp 438 drivers/pci/controller/dwc/pcie-designware-host.c pp->num_vectors == 0) { pp 445 drivers/pci/controller/dwc/pcie-designware-host.c if (!pp->ops->msi_host_init) { pp 446 drivers/pci/controller/dwc/pcie-designware-host.c pp->msi_irq_chip = &dw_pci_msi_bottom_irq_chip; pp 448 drivers/pci/controller/dwc/pcie-designware-host.c ret = dw_pcie_allocate_domains(pp); pp 452 drivers/pci/controller/dwc/pcie-designware-host.c if (pp->msi_irq) pp 453 drivers/pci/controller/dwc/pcie-designware-host.c irq_set_chained_handler_and_data(pp->msi_irq, pp 455 drivers/pci/controller/dwc/pcie-designware-host.c pp); pp 457 drivers/pci/controller/dwc/pcie-designware-host.c ret = pp->ops->msi_host_init(pp); pp 463 drivers/pci/controller/dwc/pcie-designware-host.c if (pp->ops->host_init) { pp 464 drivers/pci/controller/dwc/pcie-designware-host.c ret = pp->ops->host_init(pp); pp 469 drivers/pci/controller/dwc/pcie-designware-host.c ret = dw_pcie_rd_own_conf(pp, PCI_HEADER_TYPE, 1, &hdr_type); pp 484 drivers/pci/controller/dwc/pcie-designware-host.c pp->root_bus_nr = pp->busn->start; pp 487 drivers/pci/controller/dwc/pcie-designware-host.c bridge->sysdata = pp; pp 488 drivers/pci/controller/dwc/pcie-designware-host.c bridge->busnr = pp->root_bus_nr; pp 497 drivers/pci/controller/dwc/pcie-designware-host.c pp->root_bus = bridge->bus; pp 499 drivers/pci/controller/dwc/pcie-designware-host.c if (pp->ops->scan_bus) pp 500 drivers/pci/controller/dwc/pcie-designware-host.c pp->ops->scan_bus(pp); pp 502 drivers/pci/controller/dwc/pcie-designware-host.c pci_bus_size_bridges(pp->root_bus); pp 503 drivers/pci/controller/dwc/pcie-designware-host.c pci_bus_assign_resources(pp->root_bus); pp 505 drivers/pci/controller/dwc/pcie-designware-host.c list_for_each_entry(child, &pp->root_bus->children, node) pp 508 drivers/pci/controller/dwc/pcie-designware-host.c pci_bus_add_devices(pp->root_bus); pp 512 drivers/pci/controller/dwc/pcie-designware-host.c if (pci_msi_enabled() && !pp->ops->msi_host_init) pp 513 drivers/pci/controller/dwc/pcie-designware-host.c dw_pcie_free_msi(pp); pp 518 drivers/pci/controller/dwc/pcie-designware-host.c void dw_pcie_host_deinit(struct pcie_port *pp) pp 520 drivers/pci/controller/dwc/pcie-designware-host.c pci_stop_root_bus(pp->root_bus); pp 521 drivers/pci/controller/dwc/pcie-designware-host.c pci_remove_root_bus(pp->root_bus); pp 522 drivers/pci/controller/dwc/pcie-designware-host.c if (pci_msi_enabled() && !pp->ops->msi_host_init) pp 523 drivers/pci/controller/dwc/pcie-designware-host.c dw_pcie_free_msi(pp); pp 527 drivers/pci/controller/dwc/pcie-designware-host.c static int dw_pcie_access_other_conf(struct pcie_port *pp, struct pci_bus *bus, pp 535 drivers/pci/controller/dwc/pcie-designware-host.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 540 drivers/pci/controller/dwc/pcie-designware-host.c if (bus->parent->number == pp->root_bus_nr) { pp 542 drivers/pci/controller/dwc/pcie-designware-host.c cpu_addr = pp->cfg0_base; pp 543 drivers/pci/controller/dwc/pcie-designware-host.c cfg_size = pp->cfg0_size; pp 544 drivers/pci/controller/dwc/pcie-designware-host.c va_cfg_base = pp->va_cfg0_base; pp 547 drivers/pci/controller/dwc/pcie-designware-host.c cpu_addr = pp->cfg1_base; pp 548 drivers/pci/controller/dwc/pcie-designware-host.c cfg_size = pp->cfg1_size; pp 549 drivers/pci/controller/dwc/pcie-designware-host.c va_cfg_base = pp->va_cfg1_base; pp 562 drivers/pci/controller/dwc/pcie-designware-host.c PCIE_ATU_TYPE_IO, pp->io_base, pp 563 drivers/pci/controller/dwc/pcie-designware-host.c pp->io_bus_addr, pp->io_size); pp 568 drivers/pci/controller/dwc/pcie-designware-host.c static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, pp 571 drivers/pci/controller/dwc/pcie-designware-host.c if (pp->ops->rd_other_conf) pp 572 drivers/pci/controller/dwc/pcie-designware-host.c return pp->ops->rd_other_conf(pp, bus, devfn, where, pp 575 drivers/pci/controller/dwc/pcie-designware-host.c return dw_pcie_access_other_conf(pp, bus, devfn, where, size, val, pp 579 drivers/pci/controller/dwc/pcie-designware-host.c static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, pp 582 drivers/pci/controller/dwc/pcie-designware-host.c if (pp->ops->wr_other_conf) pp 583 drivers/pci/controller/dwc/pcie-designware-host.c return pp->ops->wr_other_conf(pp, bus, devfn, where, pp 586 drivers/pci/controller/dwc/pcie-designware-host.c return dw_pcie_access_other_conf(pp, bus, devfn, where, size, &val, pp 590 drivers/pci/controller/dwc/pcie-designware-host.c static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus, pp 593 drivers/pci/controller/dwc/pcie-designware-host.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 596 drivers/pci/controller/dwc/pcie-designware-host.c if (bus->number != pp->root_bus_nr) { pp 602 drivers/pci/controller/dwc/pcie-designware-host.c if (bus->number == pp->root_bus_nr && dev > 0) pp 611 drivers/pci/controller/dwc/pcie-designware-host.c struct pcie_port *pp = bus->sysdata; pp 613 drivers/pci/controller/dwc/pcie-designware-host.c if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) { pp 618 drivers/pci/controller/dwc/pcie-designware-host.c if (bus->number == pp->root_bus_nr) pp 619 drivers/pci/controller/dwc/pcie-designware-host.c return dw_pcie_rd_own_conf(pp, where, size, val); pp 621 drivers/pci/controller/dwc/pcie-designware-host.c return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val); pp 627 drivers/pci/controller/dwc/pcie-designware-host.c struct pcie_port *pp = bus->sysdata; pp 629 drivers/pci/controller/dwc/pcie-designware-host.c if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) pp 632 drivers/pci/controller/dwc/pcie-designware-host.c if (bus->number == pp->root_bus_nr) pp 633 drivers/pci/controller/dwc/pcie-designware-host.c return dw_pcie_wr_own_conf(pp, where, size, val); pp 635 drivers/pci/controller/dwc/pcie-designware-host.c return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val); pp 643 drivers/pci/controller/dwc/pcie-designware-host.c void dw_pcie_setup_rc(struct pcie_port *pp) pp 646 drivers/pci/controller/dwc/pcie-designware-host.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 656 drivers/pci/controller/dwc/pcie-designware-host.c if (!pp->ops->msi_host_init) { pp 657 drivers/pci/controller/dwc/pcie-designware-host.c num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL; pp 661 drivers/pci/controller/dwc/pcie-designware-host.c pp->irq_mask[ctrl] = ~0; pp 662 drivers/pci/controller/dwc/pcie-designware-host.c dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_MASK + pp 664 drivers/pci/controller/dwc/pcie-designware-host.c 4, pp->irq_mask[ctrl]); pp 665 drivers/pci/controller/dwc/pcie-designware-host.c dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + pp 699 drivers/pci/controller/dwc/pcie-designware-host.c if (!pp->ops->rd_other_conf) { pp 701 drivers/pci/controller/dwc/pcie-designware-host.c PCIE_ATU_TYPE_MEM, pp->mem_base, pp 702 drivers/pci/controller/dwc/pcie-designware-host.c pp->mem_bus_addr, pp->mem_size); pp 705 drivers/pci/controller/dwc/pcie-designware-host.c PCIE_ATU_TYPE_IO, pp->io_base, pp 706 drivers/pci/controller/dwc/pcie-designware-host.c pp->io_bus_addr, pp->io_size); pp 709 drivers/pci/controller/dwc/pcie-designware-host.c dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); pp 712 drivers/pci/controller/dwc/pcie-designware-host.c dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI); pp 714 drivers/pci/controller/dwc/pcie-designware-host.c dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val); pp 716 drivers/pci/controller/dwc/pcie-designware-host.c dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); pp 36 drivers/pci/controller/dwc/pcie-designware-plat.c static int dw_plat_pcie_host_init(struct pcie_port *pp) pp 38 drivers/pci/controller/dwc/pcie-designware-plat.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 40 drivers/pci/controller/dwc/pcie-designware-plat.c dw_pcie_setup_rc(pp); pp 44 drivers/pci/controller/dwc/pcie-designware-plat.c dw_pcie_msi_init(pp); pp 49 drivers/pci/controller/dwc/pcie-designware-plat.c static void dw_plat_set_num_vectors(struct pcie_port *pp) pp 51 drivers/pci/controller/dwc/pcie-designware-plat.c pp->num_vectors = MAX_MSI_IRQS; pp 119 drivers/pci/controller/dwc/pcie-designware-plat.c struct pcie_port *pp = &pci->pp; pp 123 drivers/pci/controller/dwc/pcie-designware-plat.c pp->irq = platform_get_irq(pdev, 1); pp 124 drivers/pci/controller/dwc/pcie-designware-plat.c if (pp->irq < 0) pp 125 drivers/pci/controller/dwc/pcie-designware-plat.c return pp->irq; pp 128 drivers/pci/controller/dwc/pcie-designware-plat.c pp->msi_irq = platform_get_irq(pdev, 0); pp 129 drivers/pci/controller/dwc/pcie-designware-plat.c if (pp->msi_irq < 0) pp 130 drivers/pci/controller/dwc/pcie-designware-plat.c return pp->msi_irq; pp 133 drivers/pci/controller/dwc/pcie-designware-plat.c pp->ops = &dw_plat_pcie_host_ops; pp 135 drivers/pci/controller/dwc/pcie-designware-plat.c ret = dw_pcie_host_init(pp); pp 153 drivers/pci/controller/dwc/pcie-designware.h int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val); pp 154 drivers/pci/controller/dwc/pcie-designware.h int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val); pp 155 drivers/pci/controller/dwc/pcie-designware.h int (*rd_other_conf)(struct pcie_port *pp, struct pci_bus *bus, pp 157 drivers/pci/controller/dwc/pcie-designware.h int (*wr_other_conf)(struct pcie_port *pp, struct pci_bus *bus, pp 159 drivers/pci/controller/dwc/pcie-designware.h int (*host_init)(struct pcie_port *pp); pp 160 drivers/pci/controller/dwc/pcie-designware.h void (*scan_bus)(struct pcie_port *pp); pp 161 drivers/pci/controller/dwc/pcie-designware.h void (*set_num_vectors)(struct pcie_port *pp); pp 162 drivers/pci/controller/dwc/pcie-designware.h int (*msi_host_init)(struct pcie_port *pp); pp 252 drivers/pci/controller/dwc/pcie-designware.h struct pcie_port pp; pp 258 drivers/pci/controller/dwc/pcie-designware.h #define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp) pp 359 drivers/pci/controller/dwc/pcie-designware.h irqreturn_t dw_handle_msi_irq(struct pcie_port *pp); pp 360 drivers/pci/controller/dwc/pcie-designware.h void dw_pcie_msi_init(struct pcie_port *pp); pp 361 drivers/pci/controller/dwc/pcie-designware.h void dw_pcie_free_msi(struct pcie_port *pp); pp 362 drivers/pci/controller/dwc/pcie-designware.h void dw_pcie_setup_rc(struct pcie_port *pp); pp 363 drivers/pci/controller/dwc/pcie-designware.h int dw_pcie_host_init(struct pcie_port *pp); pp 364 drivers/pci/controller/dwc/pcie-designware.h void dw_pcie_host_deinit(struct pcie_port *pp); pp 365 drivers/pci/controller/dwc/pcie-designware.h int dw_pcie_allocate_domains(struct pcie_port *pp); pp 367 drivers/pci/controller/dwc/pcie-designware.h static inline irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) pp 372 drivers/pci/controller/dwc/pcie-designware.h static inline void dw_pcie_msi_init(struct pcie_port *pp) pp 376 drivers/pci/controller/dwc/pcie-designware.h static inline void dw_pcie_free_msi(struct pcie_port *pp) pp 380 drivers/pci/controller/dwc/pcie-designware.h static inline void dw_pcie_setup_rc(struct pcie_port *pp) pp 384 drivers/pci/controller/dwc/pcie-designware.h static inline int dw_pcie_host_init(struct pcie_port *pp) pp 389 drivers/pci/controller/dwc/pcie-designware.h static inline void dw_pcie_host_deinit(struct pcie_port *pp) pp 393 drivers/pci/controller/dwc/pcie-designware.h static inline int dw_pcie_allocate_domains(struct pcie_port *pp) pp 145 drivers/pci/controller/dwc/pcie-hisi.c static int hisi_pcie_cfg_read(struct pcie_port *pp, int where, int size, pp 151 drivers/pci/controller/dwc/pcie-hisi.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 170 drivers/pci/controller/dwc/pcie-hisi.c static int hisi_pcie_cfg_write(struct pcie_port *pp, int where, int size, pp 176 drivers/pci/controller/dwc/pcie-hisi.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 232 drivers/pci/controller/dwc/pcie-hisi.c struct pcie_port *pp = &pci->pp; pp 247 drivers/pci/controller/dwc/pcie-hisi.c pp->ops = &hisi_pcie_host_ops; pp 249 drivers/pci/controller/dwc/pcie-hisi.c ret = dw_pcie_host_init(pp); pp 77 drivers/pci/controller/dwc/pcie-histb.c static void histb_pcie_dbi_w_mode(struct pcie_port *pp, bool enable) pp 79 drivers/pci/controller/dwc/pcie-histb.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 91 drivers/pci/controller/dwc/pcie-histb.c static void histb_pcie_dbi_r_mode(struct pcie_port *pp, bool enable) pp 93 drivers/pci/controller/dwc/pcie-histb.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 110 drivers/pci/controller/dwc/pcie-histb.c histb_pcie_dbi_r_mode(&pci->pp, true); pp 112 drivers/pci/controller/dwc/pcie-histb.c histb_pcie_dbi_r_mode(&pci->pp, false); pp 120 drivers/pci/controller/dwc/pcie-histb.c histb_pcie_dbi_w_mode(&pci->pp, true); pp 122 drivers/pci/controller/dwc/pcie-histb.c histb_pcie_dbi_w_mode(&pci->pp, false); pp 125 drivers/pci/controller/dwc/pcie-histb.c static int histb_pcie_rd_own_conf(struct pcie_port *pp, int where, pp 128 drivers/pci/controller/dwc/pcie-histb.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 131 drivers/pci/controller/dwc/pcie-histb.c histb_pcie_dbi_r_mode(pp, true); pp 133 drivers/pci/controller/dwc/pcie-histb.c histb_pcie_dbi_r_mode(pp, false); pp 138 drivers/pci/controller/dwc/pcie-histb.c static int histb_pcie_wr_own_conf(struct pcie_port *pp, int where, pp 141 drivers/pci/controller/dwc/pcie-histb.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 144 drivers/pci/controller/dwc/pcie-histb.c histb_pcie_dbi_w_mode(pp, true); pp 146 drivers/pci/controller/dwc/pcie-histb.c histb_pcie_dbi_w_mode(pp, false); pp 167 drivers/pci/controller/dwc/pcie-histb.c static int histb_pcie_establish_link(struct pcie_port *pp) pp 169 drivers/pci/controller/dwc/pcie-histb.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 185 drivers/pci/controller/dwc/pcie-histb.c dw_pcie_setup_rc(pp); pp 195 drivers/pci/controller/dwc/pcie-histb.c static int histb_pcie_host_init(struct pcie_port *pp) pp 197 drivers/pci/controller/dwc/pcie-histb.c histb_pcie_establish_link(pp); pp 200 drivers/pci/controller/dwc/pcie-histb.c dw_pcie_msi_init(pp); pp 229 drivers/pci/controller/dwc/pcie-histb.c static int histb_pcie_host_enable(struct pcie_port *pp) pp 231 drivers/pci/controller/dwc/pcie-histb.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 306 drivers/pci/controller/dwc/pcie-histb.c struct pcie_port *pp; pp 323 drivers/pci/controller/dwc/pcie-histb.c pp = &pci->pp; pp 404 drivers/pci/controller/dwc/pcie-histb.c pp->msi_irq = platform_get_irq_byname(pdev, "msi"); pp 405 drivers/pci/controller/dwc/pcie-histb.c if (pp->msi_irq < 0) { pp 407 drivers/pci/controller/dwc/pcie-histb.c return pp->msi_irq; pp 423 drivers/pci/controller/dwc/pcie-histb.c pp->ops = &histb_pcie_host_ops; pp 427 drivers/pci/controller/dwc/pcie-histb.c ret = histb_pcie_host_enable(pp); pp 433 drivers/pci/controller/dwc/pcie-histb.c ret = dw_pcie_host_init(pp); pp 338 drivers/pci/controller/dwc/pcie-kirin.c static int kirin_pcie_rd_own_conf(struct pcie_port *pp, pp 341 drivers/pci/controller/dwc/pcie-kirin.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 352 drivers/pci/controller/dwc/pcie-kirin.c static int kirin_pcie_wr_own_conf(struct pcie_port *pp, pp 355 drivers/pci/controller/dwc/pcie-kirin.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 400 drivers/pci/controller/dwc/pcie-kirin.c static int kirin_pcie_establish_link(struct pcie_port *pp) pp 402 drivers/pci/controller/dwc/pcie-kirin.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 410 drivers/pci/controller/dwc/pcie-kirin.c dw_pcie_setup_rc(pp); pp 429 drivers/pci/controller/dwc/pcie-kirin.c static int kirin_pcie_host_init(struct pcie_port *pp) pp 431 drivers/pci/controller/dwc/pcie-kirin.c kirin_pcie_establish_link(pp); pp 434 drivers/pci/controller/dwc/pcie-kirin.c dw_pcie_msi_init(pp); pp 464 drivers/pci/controller/dwc/pcie-kirin.c pci->pp.msi_irq = irq; pp 479 drivers/pci/controller/dwc/pcie-kirin.c pci->pp.ops = &kirin_pcie_host_ops; pp 481 drivers/pci/controller/dwc/pcie-kirin.c return dw_pcie_host_init(&pci->pp); pp 1078 drivers/pci/controller/dwc/pcie-qcom.c static int qcom_pcie_host_init(struct pcie_port *pp) pp 1080 drivers/pci/controller/dwc/pcie-qcom.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 1100 drivers/pci/controller/dwc/pcie-qcom.c dw_pcie_setup_rc(pp); pp 1103 drivers/pci/controller/dwc/pcie-qcom.c dw_pcie_msi_init(pp); pp 1178 drivers/pci/controller/dwc/pcie-qcom.c struct pcie_port *pp; pp 1200 drivers/pci/controller/dwc/pcie-qcom.c pp = &pci->pp; pp 1243 drivers/pci/controller/dwc/pcie-qcom.c pp->ops = &qcom_pcie_dw_ops; pp 1246 drivers/pci/controller/dwc/pcie-qcom.c pp->msi_irq = platform_get_irq_byname(pdev, "msi"); pp 1247 drivers/pci/controller/dwc/pcie-qcom.c if (pp->msi_irq < 0) { pp 1248 drivers/pci/controller/dwc/pcie-qcom.c ret = pp->msi_irq; pp 1261 drivers/pci/controller/dwc/pcie-qcom.c ret = dw_pcie_host_init(pp); pp 75 drivers/pci/controller/dwc/pcie-spear13xx.c struct pcie_port *pp = &pci->pp; pp 85 drivers/pci/controller/dwc/pcie-spear13xx.c dw_pcie_setup_rc(pp); pp 137 drivers/pci/controller/dwc/pcie-spear13xx.c struct pcie_port *pp = &pci->pp; pp 144 drivers/pci/controller/dwc/pcie-spear13xx.c dw_handle_msi_irq(pp); pp 155 drivers/pci/controller/dwc/pcie-spear13xx.c struct pcie_port *pp = &pci->pp; pp 160 drivers/pci/controller/dwc/pcie-spear13xx.c dw_pcie_msi_init(pp); pp 177 drivers/pci/controller/dwc/pcie-spear13xx.c static int spear13xx_pcie_host_init(struct pcie_port *pp) pp 179 drivers/pci/controller/dwc/pcie-spear13xx.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 196 drivers/pci/controller/dwc/pcie-spear13xx.c struct pcie_port *pp = &pci->pp; pp 200 drivers/pci/controller/dwc/pcie-spear13xx.c pp->irq = platform_get_irq(pdev, 0); pp 201 drivers/pci/controller/dwc/pcie-spear13xx.c if (pp->irq < 0) { pp 203 drivers/pci/controller/dwc/pcie-spear13xx.c return pp->irq; pp 205 drivers/pci/controller/dwc/pcie-spear13xx.c ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler, pp 209 drivers/pci/controller/dwc/pcie-spear13xx.c dev_err(dev, "failed to request irq %d\n", pp->irq); pp 213 drivers/pci/controller/dwc/pcie-spear13xx.c pp->ops = &spear13xx_pcie_host_ops; pp 215 drivers/pci/controller/dwc/pcie-spear13xx.c ret = dw_pcie_host_init(pp); pp 310 drivers/pci/controller/dwc/pcie-tegra194.c static void apply_bad_link_workaround(struct pcie_port *pp) pp 312 drivers/pci/controller/dwc/pcie-tegra194.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 347 drivers/pci/controller/dwc/pcie-tegra194.c struct pcie_port *pp = &pci->pp; pp 378 drivers/pci/controller/dwc/pcie-tegra194.c apply_bad_link_workaround(pp); pp 423 drivers/pci/controller/dwc/pcie-tegra194.c static int tegra_pcie_dw_rd_own_conf(struct pcie_port *pp, int where, int size, pp 426 drivers/pci/controller/dwc/pcie-tegra194.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 442 drivers/pci/controller/dwc/pcie-tegra194.c static int tegra_pcie_dw_wr_own_conf(struct pcie_port *pp, int where, int size, pp 445 drivers/pci/controller/dwc/pcie-tegra194.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 573 drivers/pci/controller/dwc/pcie-tegra194.c static void tegra_pcie_enable_system_interrupts(struct pcie_port *pp) pp 575 drivers/pci/controller/dwc/pcie-tegra194.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 611 drivers/pci/controller/dwc/pcie-tegra194.c static void tegra_pcie_enable_legacy_interrupts(struct pcie_port *pp) pp 613 drivers/pci/controller/dwc/pcie-tegra194.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 632 drivers/pci/controller/dwc/pcie-tegra194.c static void tegra_pcie_enable_msi_interrupts(struct pcie_port *pp) pp 634 drivers/pci/controller/dwc/pcie-tegra194.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 638 drivers/pci/controller/dwc/pcie-tegra194.c dw_pcie_msi_init(pp); pp 647 drivers/pci/controller/dwc/pcie-tegra194.c static void tegra_pcie_enable_interrupts(struct pcie_port *pp) pp 649 drivers/pci/controller/dwc/pcie-tegra194.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 669 drivers/pci/controller/dwc/pcie-tegra194.c tegra_pcie_enable_system_interrupts(pp); pp 670 drivers/pci/controller/dwc/pcie-tegra194.c tegra_pcie_enable_legacy_interrupts(pp); pp 672 drivers/pci/controller/dwc/pcie-tegra194.c tegra_pcie_enable_msi_interrupts(pp); pp 730 drivers/pci/controller/dwc/pcie-tegra194.c static void tegra_pcie_prepare_host(struct pcie_port *pp) pp 732 drivers/pci/controller/dwc/pcie-tegra194.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 795 drivers/pci/controller/dwc/pcie-tegra194.c dw_pcie_setup_rc(pp); pp 819 drivers/pci/controller/dwc/pcie-tegra194.c static int tegra_pcie_dw_host_init(struct pcie_port *pp) pp 821 drivers/pci/controller/dwc/pcie-tegra194.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 825 drivers/pci/controller/dwc/pcie-tegra194.c tegra_pcie_prepare_host(pp); pp 861 drivers/pci/controller/dwc/pcie-tegra194.c tegra_pcie_prepare_host(pp); pp 871 drivers/pci/controller/dwc/pcie-tegra194.c tegra_pcie_enable_interrupts(pp); pp 884 drivers/pci/controller/dwc/pcie-tegra194.c static void tegra_pcie_set_msi_vec_num(struct pcie_port *pp) pp 886 drivers/pci/controller/dwc/pcie-tegra194.c pp->num_vectors = MAX_MSI_IRQS; pp 1024 drivers/pci/controller/dwc/pcie-tegra194.c struct pcie_port *pp = &pcie->pci.pp; pp 1037 drivers/pci/controller/dwc/pcie-tegra194.c list_for_each_entry(child, &pp->root_bus->children, node) { pp 1039 drivers/pci/controller/dwc/pcie-tegra194.c if (child->parent == pp->root_bus) { pp 1275 drivers/pci/controller/dwc/pcie-tegra194.c struct pcie_port *pp = &pci->pp; pp 1282 drivers/pci/controller/dwc/pcie-tegra194.c pp->ops = &tegra_pcie_dw_host_ops; pp 1284 drivers/pci/controller/dwc/pcie-tegra194.c ret = dw_pcie_host_init(pp); pp 1365 drivers/pci/controller/dwc/pcie-tegra194.c dw_pcie_host_deinit(&pcie->pci.pp); pp 1373 drivers/pci/controller/dwc/pcie-tegra194.c struct pcie_port *pp = &pcie->pci.pp; pp 1379 drivers/pci/controller/dwc/pcie-tegra194.c pp->msi_irq = of_irq_get_byname(dev->of_node, "msi"); pp 1380 drivers/pci/controller/dwc/pcie-tegra194.c if (!pp->msi_irq) { pp 1438 drivers/pci/controller/dwc/pcie-tegra194.c struct pcie_port *pp; pp 1452 drivers/pci/controller/dwc/pcie-tegra194.c pp = &pci->pp; pp 1556 drivers/pci/controller/dwc/pcie-tegra194.c pp->irq = platform_get_irq_byname(pdev, "intr"); pp 1557 drivers/pci/controller/dwc/pcie-tegra194.c if (!pp->irq) { pp 1562 drivers/pci/controller/dwc/pcie-tegra194.c ret = devm_request_irq(dev, pp->irq, tegra_pcie_irq_handler, pp 1565 drivers/pci/controller/dwc/pcie-tegra194.c dev_err(dev, "Failed to request IRQ %d: %d\n", pp->irq, ret); pp 1648 drivers/pci/controller/dwc/pcie-tegra194.c ret = tegra_pcie_dw_host_init(&pcie->pci.pp); pp 1694 drivers/pci/controller/dwc/pcie-tegra194.c disable_irq(pcie->pci.pp.irq); pp 1696 drivers/pci/controller/dwc/pcie-tegra194.c disable_irq(pcie->pci.pp.msi_irq); pp 172 drivers/pci/controller/dwc/pcie-uniphier.c struct pcie_port *pp = irq_data_get_irq_chip_data(d); pp 173 drivers/pci/controller/dwc/pcie-uniphier.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 185 drivers/pci/controller/dwc/pcie-uniphier.c struct pcie_port *pp = irq_data_get_irq_chip_data(d); pp 186 drivers/pci/controller/dwc/pcie-uniphier.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 198 drivers/pci/controller/dwc/pcie-uniphier.c struct pcie_port *pp = irq_data_get_irq_chip_data(d); pp 199 drivers/pci/controller/dwc/pcie-uniphier.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 232 drivers/pci/controller/dwc/pcie-uniphier.c struct pcie_port *pp = irq_desc_get_handler_data(desc); pp 233 drivers/pci/controller/dwc/pcie-uniphier.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 267 drivers/pci/controller/dwc/pcie-uniphier.c static int uniphier_pcie_config_legacy_irq(struct pcie_port *pp) pp 269 drivers/pci/controller/dwc/pcie-uniphier.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 281 drivers/pci/controller/dwc/pcie-uniphier.c pp->irq = irq_of_parse_and_map(np_intc, 0); pp 282 drivers/pci/controller/dwc/pcie-uniphier.c if (!pp->irq) { pp 289 drivers/pci/controller/dwc/pcie-uniphier.c &uniphier_intx_domain_ops, pp); pp 296 drivers/pci/controller/dwc/pcie-uniphier.c irq_set_chained_handler_and_data(pp->irq, uniphier_pcie_irq_handler, pp 297 drivers/pci/controller/dwc/pcie-uniphier.c pp); pp 304 drivers/pci/controller/dwc/pcie-uniphier.c static int uniphier_pcie_host_init(struct pcie_port *pp) pp 306 drivers/pci/controller/dwc/pcie-uniphier.c struct dw_pcie *pci = to_dw_pcie_from_pp(pp); pp 310 drivers/pci/controller/dwc/pcie-uniphier.c ret = uniphier_pcie_config_legacy_irq(pp); pp 316 drivers/pci/controller/dwc/pcie-uniphier.c dw_pcie_setup_rc(pp); pp 322 drivers/pci/controller/dwc/pcie-uniphier.c dw_pcie_msi_init(pp); pp 335 drivers/pci/controller/dwc/pcie-uniphier.c struct pcie_port *pp = &pci->pp; pp 339 drivers/pci/controller/dwc/pcie-uniphier.c pp->ops = &uniphier_pcie_host_ops; pp 342 drivers/pci/controller/dwc/pcie-uniphier.c pp->msi_irq = platform_get_irq_byname(pdev, "msi"); pp 343 drivers/pci/controller/dwc/pcie-uniphier.c if (pp->msi_irq < 0) pp 344 drivers/pci/controller/dwc/pcie-uniphier.c return pp->msi_irq; pp 347 drivers/pci/controller/dwc/pcie-uniphier.c ret = dw_pcie_host_init(pp); pp 5369 drivers/pci/quirks.c int pp; pp 5394 drivers/pci/quirks.c for (pp = 0; pp < (sizeof(partition_map) * 8); pp++) { pp 5399 drivers/pci/quirks.c if (!(partition_map & (1ULL << pp))) pp 5402 drivers/pci/quirks.c pci_dbg(pdev, "Processing partition %d\n", pp); pp 5404 drivers/pci/quirks.c mmio_peer_ctrl = &mmio_ctrl[pp]; pp 5408 drivers/pci/quirks.c pci_warn(pdev, "Partition %d table_sz 0\n", pp); pp 5415 drivers/pci/quirks.c pp, table_sz); pp 5427 drivers/pci/quirks.c pp, PCI_SLOT(devfn), PCI_FUNC(devfn)); pp 1168 drivers/pinctrl/pinctrl-st.c struct property *pp; pp 1177 drivers/pinctrl/pinctrl-st.c for_each_property_of_node(pins, pp) { pp 1179 drivers/pinctrl/pinctrl-st.c if (!strcmp(pp->name, "name")) pp 1182 drivers/pinctrl/pinctrl-st.c if (pp->length / sizeof(__be32) >= OF_GPIO_ARGS_MIN) { pp 1203 drivers/pinctrl/pinctrl-st.c for_each_property_of_node(pins, pp) { pp 1204 drivers/pinctrl/pinctrl-st.c if (!strcmp(pp->name, "name")) pp 1206 drivers/pinctrl/pinctrl-st.c nr_props = pp->length/sizeof(u32); pp 1207 drivers/pinctrl/pinctrl-st.c list = pp->value; pp 1213 drivers/pinctrl/pinctrl-st.c conf->pin = of_get_named_gpio(pins, pp->name, 0); pp 1214 drivers/pinctrl/pinctrl-st.c conf->name = pp->name; pp 2340 drivers/scsi/fcoe/fcoe.c struct fcoe_percpu_s *pp; pp 2344 drivers/scsi/fcoe/fcoe.c pp = &per_cpu(fcoe_percpu, cpu); pp 2346 drivers/scsi/fcoe/fcoe.c flush_work(&pp->work); pp 74 drivers/scsi/libfc/fc_disc.c struct fc_els_rscn_page *pp; pp 94 drivers/scsi/libfc/fc_disc.c if (rp->rscn_page_len != sizeof(*pp)) pp 106 drivers/scsi/libfc/fc_disc.c if (len % sizeof(*pp)) pp 109 drivers/scsi/libfc/fc_disc.c for (pp = (void *)(rp + 1); len > 0; len -= sizeof(*pp), pp++) { pp 110 drivers/scsi/libfc/fc_disc.c ev_qual = pp->rscn_page_flags >> ELS_RSCN_EV_QUAL_BIT; pp 112 drivers/scsi/libfc/fc_disc.c fmt = pp->rscn_page_flags >> ELS_RSCN_ADDR_FMT_BIT; pp 121 drivers/scsi/libfc/fc_disc.c "(%6.6x)\n", ntoh24(pp->rscn_fid)); pp 128 drivers/scsi/libfc/fc_disc.c dp->port_id = ntoh24(pp->rscn_fid); pp 415 drivers/scsi/libfc/fc_lport.c void *pp; pp 424 drivers/scsi/libfc/fc_lport.c pp = fc_frame_payload_get(in_fp, len); pp 432 drivers/scsi/libfc/fc_lport.c memcpy(dp, pp, len); pp 1943 drivers/scsi/libfc/fc_lport.c char *pp; pp 1953 drivers/scsi/libfc/fc_lport.c pp = fc_frame_payload_get(fp, len); pp 1957 drivers/scsi/libfc/fc_lport.c pp, len); pp 1117 drivers/scsi/libfc/fc_rport.c } *pp; pp 1151 drivers/scsi/libfc/fc_rport.c pp = fc_frame_payload_get(fp, sizeof(*pp)); pp 1152 drivers/scsi/libfc/fc_rport.c if (!pp) { pp 1157 drivers/scsi/libfc/fc_rport.c resp_code = (pp->spp.spp_flags & FC_SPP_RESP_MASK); pp 1159 drivers/scsi/libfc/fc_rport.c pp->spp.spp_flags, pp->spp.spp_type); pp 1160 drivers/scsi/libfc/fc_rport.c rdata->spp_type = pp->spp.spp_type; pp 1168 drivers/scsi/libfc/fc_rport.c if (pp->prli.prli_spp_len < sizeof(pp->spp)) { pp 1173 drivers/scsi/libfc/fc_rport.c fcp_parm = ntohl(pp->spp.spp_params); pp 1185 drivers/scsi/libfc/fc_rport.c prov->prli(rdata, pp->prli.prli_spp_len, pp 1186 drivers/scsi/libfc/fc_rport.c &pp->spp, &temp_spp); pp 1192 drivers/scsi/libfc/fc_rport.c !(pp->spp.spp_flags & FC_SPP_EST_IMG_PAIR)) { pp 1243 drivers/scsi/libfc/fc_rport.c } *pp; pp 1272 drivers/scsi/libfc/fc_rport.c fp = fc_frame_alloc(lport, sizeof(*pp)); pp 1282 drivers/scsi/libfc/fc_rport.c pp = fc_frame_payload_get(fp, sizeof(*pp)); pp 1283 drivers/scsi/libfc/fc_rport.c prov->prli(rdata, sizeof(pp->spp), NULL, &pp->spp); pp 1965 drivers/scsi/libfc/fc_rport.c } *pp; pp 1980 drivers/scsi/libfc/fc_rport.c pp = fc_frame_payload_get(rx_fp, sizeof(*pp)); pp 1981 drivers/scsi/libfc/fc_rport.c if (!pp) pp 1983 drivers/scsi/libfc/fc_rport.c plen = ntohs(pp->prli.prli_len); pp 1988 drivers/scsi/libfc/fc_rport.c plen = pp->prli.prli_spp_len; pp 1990 drivers/scsi/libfc/fc_rport.c plen > len || len < sizeof(*pp) || plen < 12) pp 1992 drivers/scsi/libfc/fc_rport.c rspp = &pp->spp; pp 2000 drivers/scsi/libfc/fc_rport.c pp = fc_frame_payload_get(fp, len); pp 2001 drivers/scsi/libfc/fc_rport.c WARN_ON(!pp); pp 2002 drivers/scsi/libfc/fc_rport.c memset(pp, 0, len); pp 2003 drivers/scsi/libfc/fc_rport.c pp->prli.prli_cmd = ELS_LS_ACC; pp 2004 drivers/scsi/libfc/fc_rport.c pp->prli.prli_spp_len = plen; pp 2005 drivers/scsi/libfc/fc_rport.c pp->prli.prli_len = htons(len); pp 2013 drivers/scsi/libfc/fc_rport.c spp = &pp->spp; pp 2081 drivers/scsi/libfc/fc_rport.c } *pp; pp 2094 drivers/scsi/libfc/fc_rport.c pp = fc_frame_payload_get(rx_fp, sizeof(*pp)); pp 2095 drivers/scsi/libfc/fc_rport.c if (!pp) pp 2097 drivers/scsi/libfc/fc_rport.c plen = ntohs(pp->prlo.prlo_len); pp 2103 drivers/scsi/libfc/fc_rport.c rspp = &pp->spp; pp 2112 drivers/scsi/libfc/fc_rport.c pp = fc_frame_payload_get(fp, len); pp 2113 drivers/scsi/libfc/fc_rport.c WARN_ON(!pp); pp 2114 drivers/scsi/libfc/fc_rport.c memset(pp, 0, len); pp 2115 drivers/scsi/libfc/fc_rport.c pp->prlo.prlo_cmd = ELS_LS_ACC; pp 2116 drivers/scsi/libfc/fc_rport.c pp->prlo.prlo_obs = 0x10; pp 2117 drivers/scsi/libfc/fc_rport.c pp->prlo.prlo_len = htons(len); pp 2118 drivers/scsi/libfc/fc_rport.c spp = &pp->spp; pp 439 drivers/scsi/ncr53c8xx.c struct m_pool **pp = &mp0.next; pp 441 drivers/scsi/ncr53c8xx.c while (*pp && *pp != p) pp 442 drivers/scsi/ncr53c8xx.c pp = &(*pp)->next; pp 443 drivers/scsi/ncr53c8xx.c if (*pp) { pp 444 drivers/scsi/ncr53c8xx.c *pp = (*pp)->next; pp 4149 drivers/scsi/scsi_debug.c struct partition *pp; pp 4174 drivers/scsi/scsi_debug.c pp = (struct partition *)(ramp + 0x1be); pp 4175 drivers/scsi/scsi_debug.c for (k = 0; starts[k + 1]; ++k, ++pp) { pp 4178 drivers/scsi/scsi_debug.c pp->boot_ind = 0; pp 4180 drivers/scsi/scsi_debug.c pp->cyl = start_sec / heads_by_sects; pp 4181 drivers/scsi/scsi_debug.c pp->head = (start_sec - (pp->cyl * heads_by_sects)) pp 4183 drivers/scsi/scsi_debug.c pp->sector = (start_sec % sdebug_sectors_per) + 1; pp 4185 drivers/scsi/scsi_debug.c pp->end_cyl = end_sec / heads_by_sects; pp 4186 drivers/scsi/scsi_debug.c pp->end_head = (end_sec - (pp->end_cyl * heads_by_sects)) pp 4188 drivers/scsi/scsi_debug.c pp->end_sector = (end_sec % sdebug_sectors_per) + 1; pp 4190 drivers/scsi/scsi_debug.c pp->start_sect = cpu_to_le32(start_sec); pp 4191 drivers/scsi/scsi_debug.c pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1); pp 4192 drivers/scsi/scsi_debug.c pp->sys_ind = 0x83; /* plain Linux partition */ pp 282 drivers/scsi/sym53c8xx_2/sym_malloc.c m_pool_p *pp = &mp0.next; pp 284 drivers/scsi/sym53c8xx_2/sym_malloc.c while (*pp && *pp != p) pp 285 drivers/scsi/sym53c8xx_2/sym_malloc.c pp = &(*pp)->next; pp 286 drivers/scsi/sym53c8xx_2/sym_malloc.c if (*pp) { pp 287 drivers/scsi/sym53c8xx_2/sym_malloc.c *pp = (*pp)->next; pp 69 drivers/spi/spi-butterfly.c struct butterfly *pp = spidev_to_pp(spi); pp 70 drivers/spi/spi-butterfly.c u8 bit, byte = pp->lastbyte; pp 78 drivers/spi/spi-butterfly.c parport_write_data(pp->port, byte); pp 79 drivers/spi/spi-butterfly.c pp->lastbyte = byte; pp 85 drivers/spi/spi-butterfly.c struct butterfly *pp = spidev_to_pp(spi); pp 86 drivers/spi/spi-butterfly.c u8 bit, byte = pp->lastbyte; pp 94 drivers/spi/spi-butterfly.c parport_write_data(pp->port, byte); pp 95 drivers/spi/spi-butterfly.c pp->lastbyte = byte; pp 100 drivers/spi/spi-butterfly.c struct butterfly *pp = spidev_to_pp(spi); pp 107 drivers/spi/spi-butterfly.c value = !(parport_read_status(pp->port) & bit); pp 113 drivers/spi/spi-butterfly.c struct butterfly *pp = spidev_to_pp(spi); pp 126 drivers/spi/spi-butterfly.c parport_frob_control(pp->port, spi_cs_bit, value ? spi_cs_bit : 0); pp 180 drivers/spi/spi-butterfly.c struct butterfly *pp; pp 192 drivers/spi/spi-butterfly.c master = spi_alloc_master(dev, sizeof(*pp)); pp 197 drivers/spi/spi-butterfly.c pp = spi_master_get_devdata(master); pp 208 drivers/spi/spi-butterfly.c pp->bitbang.master = master; pp 209 drivers/spi/spi-butterfly.c pp->bitbang.chipselect = butterfly_chipselect; pp 210 drivers/spi/spi-butterfly.c pp->bitbang.txrx_word[SPI_MODE_0] = butterfly_txrx_word_mode0; pp 215 drivers/spi/spi-butterfly.c pp->port = p; pp 217 drivers/spi/spi-butterfly.c butterfly_cb.private = pp; pp 223 drivers/spi/spi-butterfly.c pp->pd = pd; pp 235 drivers/spi/spi-butterfly.c parport_frob_control(pp->port, spi_cs_bit, 0); pp 240 drivers/spi/spi-butterfly.c pp->lastbyte |= vcc_bits; pp 241 drivers/spi/spi-butterfly.c parport_write_data(pp->port, pp->lastbyte); pp 245 drivers/spi/spi-butterfly.c pp->lastbyte |= butterfly_nreset; pp 246 drivers/spi/spi-butterfly.c parport_write_data(pp->port, pp->lastbyte); pp 252 drivers/spi/spi-butterfly.c status = spi_bitbang_start(&pp->bitbang); pp 261 drivers/spi/spi-butterfly.c pp->info[0].max_speed_hz = 15 * 1000 * 1000; pp 262 drivers/spi/spi-butterfly.c strcpy(pp->info[0].modalias, "mtd_dataflash"); pp 263 drivers/spi/spi-butterfly.c pp->info[0].platform_data = &flash; pp 264 drivers/spi/spi-butterfly.c pp->info[0].chip_select = 1; pp 265 drivers/spi/spi-butterfly.c pp->info[0].controller_data = pp; pp 266 drivers/spi/spi-butterfly.c pp->dataflash = spi_new_device(pp->bitbang.master, &pp->info[0]); pp 267 drivers/spi/spi-butterfly.c if (pp->dataflash) pp 269 drivers/spi/spi-butterfly.c dev_name(&pp->dataflash->dev)); pp 272 drivers/spi/spi-butterfly.c butterfly = pp; pp 277 drivers/spi/spi-butterfly.c parport_write_data(pp->port, 0); pp 279 drivers/spi/spi-butterfly.c parport_release(pp->pd); pp 283 drivers/spi/spi-butterfly.c spi_master_put(pp->bitbang.master); pp 290 drivers/spi/spi-butterfly.c struct butterfly *pp; pp 298 drivers/spi/spi-butterfly.c pp = butterfly; pp 302 drivers/spi/spi-butterfly.c spi_bitbang_stop(&pp->bitbang); pp 305 drivers/spi/spi-butterfly.c parport_write_data(pp->port, 0); pp 308 drivers/spi/spi-butterfly.c parport_release(pp->pd); pp 309 drivers/spi/spi-butterfly.c parport_unregister_device(pp->pd); pp 311 drivers/spi/spi-butterfly.c spi_master_put(pp->bitbang.master); pp 96 drivers/spi/spi-lm70llp.c static inline void deassertCS(struct spi_lm70llp *pp) pp 98 drivers/spi/spi-lm70llp.c u8 data = parport_read_data(pp->port); pp 101 drivers/spi/spi-lm70llp.c parport_write_data(pp->port, data | nCS); pp 104 drivers/spi/spi-lm70llp.c static inline void assertCS(struct spi_lm70llp *pp) pp 106 drivers/spi/spi-lm70llp.c u8 data = parport_read_data(pp->port); pp 109 drivers/spi/spi-lm70llp.c parport_write_data(pp->port, data & ~nCS); pp 112 drivers/spi/spi-lm70llp.c static inline void clkHigh(struct spi_lm70llp *pp) pp 114 drivers/spi/spi-lm70llp.c u8 data = parport_read_data(pp->port); pp 116 drivers/spi/spi-lm70llp.c parport_write_data(pp->port, data | SCLK); pp 119 drivers/spi/spi-lm70llp.c static inline void clkLow(struct spi_lm70llp *pp) pp 121 drivers/spi/spi-lm70llp.c u8 data = parport_read_data(pp->port); pp 123 drivers/spi/spi-lm70llp.c parport_write_data(pp->port, data & ~SCLK); pp 135 drivers/spi/spi-lm70llp.c struct spi_lm70llp *pp = spidev_to_pp(s); pp 138 drivers/spi/spi-lm70llp.c clkHigh(pp); pp 140 drivers/spi/spi-lm70llp.c clkLow(pp); pp 160 drivers/spi/spi-lm70llp.c struct spi_lm70llp *pp = spidev_to_pp(s); pp 162 drivers/spi/spi-lm70llp.c return ((SIO == (parport_read_status(pp->port) & SIO)) ? 0 : 1); pp 171 drivers/spi/spi-lm70llp.c struct spi_lm70llp *pp = spidev_to_pp(spi); pp 174 drivers/spi/spi-lm70llp.c assertCS(pp); pp 176 drivers/spi/spi-lm70llp.c deassertCS(pp); pp 191 drivers/spi/spi-lm70llp.c struct spi_lm70llp *pp; pp 205 drivers/spi/spi-lm70llp.c master = spi_alloc_master(p->physport->dev, sizeof *pp); pp 210 drivers/spi/spi-lm70llp.c pp = spi_master_get_devdata(master); pp 215 drivers/spi/spi-lm70llp.c pp->bitbang.master = master; pp 216 drivers/spi/spi-lm70llp.c pp->bitbang.chipselect = lm70_chipselect; pp 217 drivers/spi/spi-lm70llp.c pp->bitbang.txrx_word[SPI_MODE_0] = lm70_txrx; pp 218 drivers/spi/spi-lm70llp.c pp->bitbang.flags = SPI_3WIRE; pp 223 drivers/spi/spi-lm70llp.c pp->port = p; pp 225 drivers/spi/spi-lm70llp.c lm70llp_cb.private = pp; pp 233 drivers/spi/spi-lm70llp.c pp->pd = pd; pp 242 drivers/spi/spi-lm70llp.c status = spi_bitbang_start(&pp->bitbang); pp 255 drivers/spi/spi-lm70llp.c strcpy(pp->info.modalias, "lm70"); pp 256 drivers/spi/spi-lm70llp.c pp->info.max_speed_hz = 6 * 1000 * 1000; pp 257 drivers/spi/spi-lm70llp.c pp->info.chip_select = 0; pp 258 drivers/spi/spi-lm70llp.c pp->info.mode = SPI_3WIRE | SPI_MODE_0; pp 261 drivers/spi/spi-lm70llp.c parport_write_data(pp->port, lm70_INIT); pp 266 drivers/spi/spi-lm70llp.c pp->info.controller_data = pp; pp 267 drivers/spi/spi-lm70llp.c pp->spidev_lm70 = spi_new_device(pp->bitbang.master, &pp->info); pp 268 drivers/spi/spi-lm70llp.c if (pp->spidev_lm70) pp 269 drivers/spi/spi-lm70llp.c dev_dbg(&pp->spidev_lm70->dev, "spidev_lm70 at %s\n", pp 270 drivers/spi/spi-lm70llp.c dev_name(&pp->spidev_lm70->dev)); pp 276 drivers/spi/spi-lm70llp.c pp->spidev_lm70->bits_per_word = 8; pp 278 drivers/spi/spi-lm70llp.c lm70llp = pp; pp 282 drivers/spi/spi-lm70llp.c spi_bitbang_stop(&pp->bitbang); pp 285 drivers/spi/spi-lm70llp.c parport_write_data(pp->port, 0); pp 287 drivers/spi/spi-lm70llp.c parport_release(pp->pd); pp 298 drivers/spi/spi-lm70llp.c struct spi_lm70llp *pp; pp 303 drivers/spi/spi-lm70llp.c pp = lm70llp; pp 304 drivers/spi/spi-lm70llp.c spi_bitbang_stop(&pp->bitbang); pp 307 drivers/spi/spi-lm70llp.c parport_write_data(pp->port, 0); pp 309 drivers/spi/spi-lm70llp.c parport_release(pp->pd); pp 310 drivers/spi/spi-lm70llp.c parport_unregister_device(pp->pd); pp 312 drivers/spi/spi-lm70llp.c spi_master_put(pp->bitbang.master); pp 52 drivers/staging/comedi/drivers/addi_apci_1500.c unsigned int pp[2]; /* Pattern Polarity */ pp 301 drivers/staging/comedi/drivers/addi_apci_1500.c z8536_write(dev, devpriv->pp[pa_trig] & 0xff, Z8536_PA_PP_REG); pp 306 drivers/staging/comedi/drivers/addi_apci_1500.c z8536_write(dev, (devpriv->pp[pb_trig] >> 8) & 0xff, Z8536_PB_PP_REG); pp 461 drivers/staging/comedi/drivers/addi_apci_1500.c unsigned int pp = devpriv->pp[trig] & old_mask; pp 479 drivers/staging/comedi/drivers/addi_apci_1500.c pp = 0; pp 484 drivers/staging/comedi/drivers/addi_apci_1500.c pp |= hi_mask; /* rising-edge channels */ pp 485 drivers/staging/comedi/drivers/addi_apci_1500.c pp &= ~lo_mask; /* falling-edge channels */ pp 490 drivers/staging/comedi/drivers/addi_apci_1500.c pp |= hi_mask; /* high level channels */ pp 491 drivers/staging/comedi/drivers/addi_apci_1500.c pp &= ~lo_mask; /* low level channels */ pp 523 drivers/staging/comedi/drivers/addi_apci_1500.c devpriv->pp[trig] = pp; pp 161 drivers/staging/isdn/avm/b1dma.c static inline void _put_byte(void **pp, u8 val) pp 163 drivers/staging/isdn/avm/b1dma.c u8 *s = *pp; pp 165 drivers/staging/isdn/avm/b1dma.c *pp = s; pp 168 drivers/staging/isdn/avm/b1dma.c static inline void _put_word(void **pp, u32 val) pp 170 drivers/staging/isdn/avm/b1dma.c u8 *s = *pp; pp 175 drivers/staging/isdn/avm/b1dma.c *pp = s; pp 178 drivers/staging/isdn/avm/b1dma.c static inline void _put_slice(void **pp, unsigned char *dp, unsigned int len) pp 181 drivers/staging/isdn/avm/b1dma.c _put_word(pp, i); pp 183 drivers/staging/isdn/avm/b1dma.c _put_byte(pp, *dp++); pp 186 drivers/staging/isdn/avm/b1dma.c static inline u8 _get_byte(void **pp) pp 188 drivers/staging/isdn/avm/b1dma.c u8 *s = *pp; pp 191 drivers/staging/isdn/avm/b1dma.c *pp = s; pp 195 drivers/staging/isdn/avm/b1dma.c static inline u32 _get_word(void **pp) pp 197 drivers/staging/isdn/avm/b1dma.c u8 *s = *pp; pp 203 drivers/staging/isdn/avm/b1dma.c *pp = s; pp 207 drivers/staging/isdn/avm/b1dma.c static inline u32 _get_slice(void **pp, unsigned char *dp) pp 211 drivers/staging/isdn/avm/b1dma.c len = i = _get_word(pp); pp 212 drivers/staging/isdn/avm/b1dma.c while (i-- > 0) *dp++ = _get_byte(pp); pp 239 drivers/staging/isdn/avm/c4.c static inline void _put_byte(void **pp, u8 val) pp 241 drivers/staging/isdn/avm/c4.c u8 *s = *pp; pp 243 drivers/staging/isdn/avm/c4.c *pp = s; pp 246 drivers/staging/isdn/avm/c4.c static inline void _put_word(void **pp, u32 val) pp 248 drivers/staging/isdn/avm/c4.c u8 *s = *pp; pp 253 drivers/staging/isdn/avm/c4.c *pp = s; pp 256 drivers/staging/isdn/avm/c4.c static inline void _put_slice(void **pp, unsigned char *dp, unsigned int len) pp 259 drivers/staging/isdn/avm/c4.c _put_word(pp, i); pp 261 drivers/staging/isdn/avm/c4.c _put_byte(pp, *dp++); pp 264 drivers/staging/isdn/avm/c4.c static inline u8 _get_byte(void **pp) pp 266 drivers/staging/isdn/avm/c4.c u8 *s = *pp; pp 269 drivers/staging/isdn/avm/c4.c *pp = s; pp 273 drivers/staging/isdn/avm/c4.c static inline u32 _get_word(void **pp) pp 275 drivers/staging/isdn/avm/c4.c u8 *s = *pp; pp 281 drivers/staging/isdn/avm/c4.c *pp = s; pp 285 drivers/staging/isdn/avm/c4.c static inline u32 _get_slice(void **pp, unsigned char *dp) pp 289 drivers/staging/isdn/avm/c4.c len = i = _get_word(pp); pp 290 drivers/staging/isdn/avm/c4.c while (i-- > 0) *dp++ = _get_byte(pp); pp 1334 drivers/staging/isdn/gigaset/capi.c u8 *pp; pp 1375 drivers/staging/isdn/gigaset/capi.c pp = cmsg->CalledPartyNumber; pp 1376 drivers/staging/isdn/gigaset/capi.c if (pp == NULL || *pp == 0) { pp 1382 drivers/staging/isdn/gigaset/capi.c l = *pp++; pp 1384 drivers/staging/isdn/gigaset/capi.c switch (*pp) { pp 1390 drivers/staging/isdn/gigaset/capi.c "CONNECT_REQ", "Called party number", *pp); pp 1392 drivers/staging/isdn/gigaset/capi.c pp++; pp 1395 drivers/staging/isdn/gigaset/capi.c if (l >= 2 && pp[0] == '*' && pp[1] == '*') { pp 1397 drivers/staging/isdn/gigaset/capi.c pp += 2; pp 1408 drivers/staging/isdn/gigaset/capi.c snprintf(commands[AT_DIAL], l + 3, "D%.*s\r", l, pp); pp 1411 drivers/staging/isdn/gigaset/capi.c pp = cmsg->CallingPartyNumber; pp 1412 drivers/staging/isdn/gigaset/capi.c if (pp != NULL && *pp > 0) { pp 1413 drivers/staging/isdn/gigaset/capi.c l = *pp++; pp 1417 drivers/staging/isdn/gigaset/capi.c switch (*pp) { pp 1424 drivers/staging/isdn/gigaset/capi.c "CONNECT_REQ", "Calling party number", *pp); pp 1426 drivers/staging/isdn/gigaset/capi.c pp++; pp 1436 drivers/staging/isdn/gigaset/capi.c switch (*pp & 0xfc) { /* ignore Screening indicator */ pp 1447 drivers/staging/isdn/gigaset/capi.c *pp); pp 1453 drivers/staging/isdn/gigaset/capi.c pp++; pp 1461 drivers/staging/isdn/gigaset/capi.c snprintf(commands[AT_MSN], l + 8, "^SMSN=%*s\r", l, pp); pp 1053 drivers/staging/ks7010/ks7010_sdio.c struct hostif_stop_request *pp; pp 1059 drivers/staging/ks7010/ks7010_sdio.c pp = kzalloc(hif_align_size(sizeof(*pp)), GFP_KERNEL); pp 1060 drivers/staging/ks7010/ks7010_sdio.c if (!pp) pp 1063 drivers/staging/ks7010/ks7010_sdio.c size = sizeof(*pp) - sizeof(pp->header.size); pp 1064 drivers/staging/ks7010/ks7010_sdio.c pp->header.size = cpu_to_le16(size); pp 1065 drivers/staging/ks7010/ks7010_sdio.c pp->header.event = cpu_to_le16(HIF_STOP_REQ); pp 1068 drivers/staging/ks7010/ks7010_sdio.c write_to_device(card->priv, (u8 *)pp, hif_align_size(sizeof(*pp))); pp 1071 drivers/staging/ks7010/ks7010_sdio.c kfree(pp); pp 1068 drivers/staging/ks7010/ks_hostif.c struct hostif_data_request *pp; pp 1103 drivers/staging/ks7010/ks_hostif.c size = sizeof(*pp) + 6 + skb_len + 8; pp 1104 drivers/staging/ks7010/ks_hostif.c pp = kmalloc(hif_align_size(size), GFP_ATOMIC); pp 1105 drivers/staging/ks7010/ks_hostif.c if (!pp) { pp 1110 drivers/staging/ks7010/ks_hostif.c p = (unsigned char *)pp->data; pp 1156 drivers/staging/ks7010/ks_hostif.c eth_hdr = (struct ether_hdr *)&pp->data[0]; pp 1175 drivers/staging/ks7010/ks_hostif.c pp->auth_type = cpu_to_le16(TYPE_AUTH); pp 1181 drivers/staging/ks7010/ks_hostif.c &pp->data[0], skb_len, pp 1190 drivers/staging/ks7010/ks_hostif.c pp->auth_type = pp 1194 drivers/staging/ks7010/ks_hostif.c pp->auth_type = pp 1200 drivers/staging/ks7010/ks_hostif.c pp->auth_type = cpu_to_le16(TYPE_AUTH); pp 1202 drivers/staging/ks7010/ks_hostif.c pp->auth_type = cpu_to_le16(TYPE_DATA); pp 1206 drivers/staging/ks7010/ks_hostif.c pp->header.size = pp 1207 drivers/staging/ks7010/ks_hostif.c cpu_to_le16((sizeof(*pp) - sizeof(pp->header.size) + skb_len)); pp 1208 drivers/staging/ks7010/ks_hostif.c pp->header.event = cpu_to_le16(HIF_DATA_REQ); pp 1211 drivers/staging/ks7010/ks_hostif.c ret = ks_wlan_hw_tx(priv, pp, hif_align_size(sizeof(*pp) + skb_len), pp 1230 drivers/staging/ks7010/ks_hostif.c kfree(pp); pp 1253 drivers/staging/ks7010/ks_hostif.c struct hostif_mib_get_request *pp; pp 1255 drivers/staging/ks7010/ks_hostif.c pp = hostif_generic_request(sizeof(*pp), HIF_MIB_GET_REQ); pp 1256 drivers/staging/ks7010/ks_hostif.c if (!pp) pp 1259 drivers/staging/ks7010/ks_hostif.c pp->mib_attribute = cpu_to_le32(mib_attribute); pp 1261 drivers/staging/ks7010/ks_hostif.c send_request_to_device(priv, pp, hif_align_size(sizeof(*pp))); pp 1269 drivers/staging/ks7010/ks_hostif.c struct hostif_mib_set_request_t *pp; pp 1274 drivers/staging/ks7010/ks_hostif.c pp = hostif_generic_request(sizeof(*pp), HIF_MIB_SET_REQ); pp 1275 drivers/staging/ks7010/ks_hostif.c if (!pp) pp 1278 drivers/staging/ks7010/ks_hostif.c pp->mib_attribute = cpu_to_le32(attr); pp 1279 drivers/staging/ks7010/ks_hostif.c pp->mib_value.size = cpu_to_le16(size); pp 1280 drivers/staging/ks7010/ks_hostif.c pp->mib_value.type = cpu_to_le16(type); pp 1281 drivers/staging/ks7010/ks_hostif.c memcpy(&pp->mib_value.body, data, size); pp 1283 drivers/staging/ks7010/ks_hostif.c send_request_to_device(priv, pp, hif_align_size(sizeof(*pp) + size)); pp 1315 drivers/staging/ks7010/ks_hostif.c struct hostif_start_request *pp; pp 1317 drivers/staging/ks7010/ks_hostif.c pp = hostif_generic_request(sizeof(*pp), HIF_START_REQ); pp 1318 drivers/staging/ks7010/ks_hostif.c if (!pp) pp 1321 drivers/staging/ks7010/ks_hostif.c pp->mode = cpu_to_le16(mode); pp 1323 drivers/staging/ks7010/ks_hostif.c send_request_to_device(priv, pp, hif_align_size(sizeof(*pp))); pp 1361 drivers/staging/ks7010/ks_hostif.c struct hostif_ps_adhoc_set_request *pp; pp 1363 drivers/staging/ks7010/ks_hostif.c pp = hostif_generic_request(sizeof(*pp), HIF_PS_ADH_SET_REQ); pp 1364 drivers/staging/ks7010/ks_hostif.c if (!pp) pp 1367 drivers/staging/ks7010/ks_hostif.c init_request(priv, &pp->request); pp 1368 drivers/staging/ks7010/ks_hostif.c pp->channel = cpu_to_le16(priv->reg.channel); pp 1370 drivers/staging/ks7010/ks_hostif.c send_request_to_device(priv, pp, hif_align_size(sizeof(*pp))); pp 1376 drivers/staging/ks7010/ks_hostif.c struct hostif_infrastructure_set_request *pp; pp 1378 drivers/staging/ks7010/ks_hostif.c pp = hostif_generic_request(sizeof(*pp), event); pp 1379 drivers/staging/ks7010/ks_hostif.c if (!pp) pp 1382 drivers/staging/ks7010/ks_hostif.c init_request(priv, &pp->request); pp 1383 drivers/staging/ks7010/ks_hostif.c pp->ssid.size = priv->reg.ssid.size; pp 1384 drivers/staging/ks7010/ks_hostif.c memcpy(&pp->ssid.body[0], &priv->reg.ssid.body[0], priv->reg.ssid.size); pp 1385 drivers/staging/ks7010/ks_hostif.c pp->beacon_lost_count = pp 1387 drivers/staging/ks7010/ks_hostif.c pp->auth_type = cpu_to_le16(priv->reg.authenticate_type); pp 1389 drivers/staging/ks7010/ks_hostif.c pp->channel_list.body[0] = 1; pp 1390 drivers/staging/ks7010/ks_hostif.c pp->channel_list.body[1] = 8; pp 1391 drivers/staging/ks7010/ks_hostif.c pp->channel_list.body[2] = 2; pp 1392 drivers/staging/ks7010/ks_hostif.c pp->channel_list.body[3] = 9; pp 1393 drivers/staging/ks7010/ks_hostif.c pp->channel_list.body[4] = 3; pp 1394 drivers/staging/ks7010/ks_hostif.c pp->channel_list.body[5] = 10; pp 1395 drivers/staging/ks7010/ks_hostif.c pp->channel_list.body[6] = 4; pp 1396 drivers/staging/ks7010/ks_hostif.c pp->channel_list.body[7] = 11; pp 1397 drivers/staging/ks7010/ks_hostif.c pp->channel_list.body[8] = 5; pp 1398 drivers/staging/ks7010/ks_hostif.c pp->channel_list.body[9] = 12; pp 1399 drivers/staging/ks7010/ks_hostif.c pp->channel_list.body[10] = 6; pp 1400 drivers/staging/ks7010/ks_hostif.c pp->channel_list.body[11] = 13; pp 1401 drivers/staging/ks7010/ks_hostif.c pp->channel_list.body[12] = 7; pp 1403 drivers/staging/ks7010/ks_hostif.c pp->channel_list.size = 13; pp 1405 drivers/staging/ks7010/ks_hostif.c pp->channel_list.body[13] = 14; pp 1406 drivers/staging/ks7010/ks_hostif.c pp->channel_list.size = 14; pp 1409 drivers/staging/ks7010/ks_hostif.c send_request_to_device(priv, pp, hif_align_size(sizeof(*pp))); pp 1415 drivers/staging/ks7010/ks_hostif.c struct hostif_adhoc_set_request *pp; pp 1417 drivers/staging/ks7010/ks_hostif.c pp = hostif_generic_request(sizeof(*pp), HIF_ADH_SET_REQ); pp 1418 drivers/staging/ks7010/ks_hostif.c if (!pp) pp 1421 drivers/staging/ks7010/ks_hostif.c init_request(priv, &pp->request); pp 1422 drivers/staging/ks7010/ks_hostif.c pp->channel = cpu_to_le16(priv->reg.channel); pp 1423 drivers/staging/ks7010/ks_hostif.c pp->ssid.size = priv->reg.ssid.size; pp 1424 drivers/staging/ks7010/ks_hostif.c memcpy(&pp->ssid.body[0], &priv->reg.ssid.body[0], priv->reg.ssid.size); pp 1426 drivers/staging/ks7010/ks_hostif.c send_request_to_device(priv, pp, hif_align_size(sizeof(*pp))); pp 1432 drivers/staging/ks7010/ks_hostif.c struct hostif_adhoc_set2_request *pp; pp 1434 drivers/staging/ks7010/ks_hostif.c pp = hostif_generic_request(sizeof(*pp), HIF_ADH_SET_REQ); pp 1435 drivers/staging/ks7010/ks_hostif.c if (!pp) pp 1438 drivers/staging/ks7010/ks_hostif.c init_request(priv, &pp->request); pp 1439 drivers/staging/ks7010/ks_hostif.c pp->ssid.size = priv->reg.ssid.size; pp 1440 drivers/staging/ks7010/ks_hostif.c memcpy(&pp->ssid.body[0], &priv->reg.ssid.body[0], priv->reg.ssid.size); pp 1442 drivers/staging/ks7010/ks_hostif.c pp->channel_list.body[0] = priv->reg.channel; pp 1443 drivers/staging/ks7010/ks_hostif.c pp->channel_list.size = 1; pp 1444 drivers/staging/ks7010/ks_hostif.c memcpy(pp->bssid, priv->reg.bssid, ETH_ALEN); pp 1446 drivers/staging/ks7010/ks_hostif.c send_request_to_device(priv, pp, hif_align_size(sizeof(*pp))); pp 1452 drivers/staging/ks7010/ks_hostif.c struct hostif_stop_request *pp; pp 1454 drivers/staging/ks7010/ks_hostif.c pp = hostif_generic_request(sizeof(*pp), HIF_STOP_REQ); pp 1455 drivers/staging/ks7010/ks_hostif.c if (!pp) pp 1458 drivers/staging/ks7010/ks_hostif.c send_request_to_device(priv, pp, hif_align_size(sizeof(*pp))); pp 1464 drivers/staging/ks7010/ks_hostif.c struct hostif_phy_information_request *pp; pp 1466 drivers/staging/ks7010/ks_hostif.c pp = hostif_generic_request(sizeof(*pp), HIF_PHY_INFO_REQ); pp 1467 drivers/staging/ks7010/ks_hostif.c if (!pp) pp 1471 drivers/staging/ks7010/ks_hostif.c pp->type = cpu_to_le16(TIME_TYPE); pp 1472 drivers/staging/ks7010/ks_hostif.c pp->time = cpu_to_le16(priv->reg.phy_info_timer); pp 1474 drivers/staging/ks7010/ks_hostif.c pp->type = cpu_to_le16(NORMAL_TYPE); pp 1475 drivers/staging/ks7010/ks_hostif.c pp->time = cpu_to_le16(0); pp 1478 drivers/staging/ks7010/ks_hostif.c send_request_to_device(priv, pp, hif_align_size(sizeof(*pp))); pp 1485 drivers/staging/ks7010/ks_hostif.c struct hostif_power_mgmt_request *pp; pp 1487 drivers/staging/ks7010/ks_hostif.c pp = hostif_generic_request(sizeof(*pp), HIF_POWER_MGMT_REQ); pp 1488 drivers/staging/ks7010/ks_hostif.c if (!pp) pp 1491 drivers/staging/ks7010/ks_hostif.c pp->mode = cpu_to_le32(mode); pp 1492 drivers/staging/ks7010/ks_hostif.c pp->wake_up = cpu_to_le32(wake_up); pp 1493 drivers/staging/ks7010/ks_hostif.c pp->receive_dtims = cpu_to_le32(receive_dtims); pp 1495 drivers/staging/ks7010/ks_hostif.c send_request_to_device(priv, pp, hif_align_size(sizeof(*pp))); pp 1502 drivers/staging/ks7010/ks_hostif.c struct hostif_sleep_request *pp; pp 1505 drivers/staging/ks7010/ks_hostif.c pp = hostif_generic_request(sizeof(*pp), HIF_SLEEP_REQ); pp 1506 drivers/staging/ks7010/ks_hostif.c if (!pp) pp 1509 drivers/staging/ks7010/ks_hostif.c send_request_to_device(priv, pp, hif_align_size(sizeof(*pp))); pp 1524 drivers/staging/ks7010/ks_hostif.c struct hostif_bss_scan_request *pp; pp 1526 drivers/staging/ks7010/ks_hostif.c pp = hostif_generic_request(sizeof(*pp), HIF_SCAN_REQ); pp 1527 drivers/staging/ks7010/ks_hostif.c if (!pp) pp 1530 drivers/staging/ks7010/ks_hostif.c pp->scan_type = scan_type; pp 1532 drivers/staging/ks7010/ks_hostif.c pp->ch_time_min = cpu_to_le32(110); /* default value */ pp 1533 drivers/staging/ks7010/ks_hostif.c pp->ch_time_max = cpu_to_le32(130); /* default value */ pp 1534 drivers/staging/ks7010/ks_hostif.c pp->channel_list.body[0] = 1; pp 1535 drivers/staging/ks7010/ks_hostif.c pp->channel_list.body[1] = 8; pp 1536 drivers/staging/ks7010/ks_hostif.c pp->channel_list.body[2] = 2; pp 1537 drivers/staging/ks7010/ks_hostif.c pp->channel_list.body[3] = 9; pp 1538 drivers/staging/ks7010/ks_hostif.c pp->channel_list.body[4] = 3; pp 1539 drivers/staging/ks7010/ks_hostif.c pp->channel_list.body[5] = 10; pp 1540 drivers/staging/ks7010/ks_hostif.c pp->channel_list.body[6] = 4; pp 1541 drivers/staging/ks7010/ks_hostif.c pp->channel_list.body[7] = 11; pp 1542 drivers/staging/ks7010/ks_hostif.c pp->channel_list.body[8] = 5; pp 1543 drivers/staging/ks7010/ks_hostif.c pp->channel_list.body[9] = 12; pp 1544 drivers/staging/ks7010/ks_hostif.c pp->channel_list.body[10] = 6; pp 1545 drivers/staging/ks7010/ks_hostif.c pp->channel_list.body[11] = 13; pp 1546 drivers/staging/ks7010/ks_hostif.c pp->channel_list.body[12] = 7; pp 1548 drivers/staging/ks7010/ks_hostif.c pp->channel_list.size = 13; pp 1550 drivers/staging/ks7010/ks_hostif.c pp->channel_list.body[13] = 14; pp 1551 drivers/staging/ks7010/ks_hostif.c pp->channel_list.size = 14; pp 1553 drivers/staging/ks7010/ks_hostif.c pp->ssid.size = 0; pp 1557 drivers/staging/ks7010/ks_hostif.c pp->ssid.size = scan_ssid_len; pp 1558 drivers/staging/ks7010/ks_hostif.c memcpy(&pp->ssid.body[0], scan_ssid, scan_ssid_len); pp 1561 drivers/staging/ks7010/ks_hostif.c send_request_to_device(priv, pp, hif_align_size(sizeof(*pp))); pp 1571 drivers/staging/ks7010/ks_hostif.c struct hostif_mic_failure_request *pp; pp 1573 drivers/staging/ks7010/ks_hostif.c pp = hostif_generic_request(sizeof(*pp), HIF_MIC_FAILURE_REQ); pp 1574 drivers/staging/ks7010/ks_hostif.c if (!pp) pp 1577 drivers/staging/ks7010/ks_hostif.c pp->failure_count = cpu_to_le16(failure_count); pp 1578 drivers/staging/ks7010/ks_hostif.c pp->timer = cpu_to_le16(timer); pp 1580 drivers/staging/ks7010/ks_hostif.c send_request_to_device(priv, pp, hif_align_size(sizeof(*pp))); pp 2187 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c int pp; pp 2189 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c for (pp = 0; pp < pattrib->pkt_len; pp++) pp 2190 drivers/staging/rtl8723bs/core/rtw_mlme_ext.c printk(" %02x ", pframe[pp]); pp 79 drivers/tty/serial/altera_jtaguart.c struct altera_jtaguart *pp = pp 82 drivers/tty/serial/altera_jtaguart.c pp->imr |= ALTERA_JTAGUART_CONTROL_WE_MSK; pp 83 drivers/tty/serial/altera_jtaguart.c writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG); pp 88 drivers/tty/serial/altera_jtaguart.c struct altera_jtaguart *pp = pp 91 drivers/tty/serial/altera_jtaguart.c pp->imr &= ~ALTERA_JTAGUART_CONTROL_WE_MSK; pp 92 drivers/tty/serial/altera_jtaguart.c writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG); pp 97 drivers/tty/serial/altera_jtaguart.c struct altera_jtaguart *pp = pp 100 drivers/tty/serial/altera_jtaguart.c pp->imr &= ~ALTERA_JTAGUART_CONTROL_RE_MSK; pp 101 drivers/tty/serial/altera_jtaguart.c writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG); pp 117 drivers/tty/serial/altera_jtaguart.c static void altera_jtaguart_rx_chars(struct altera_jtaguart *pp) pp 119 drivers/tty/serial/altera_jtaguart.c struct uart_port *port = &pp->port; pp 139 drivers/tty/serial/altera_jtaguart.c static void altera_jtaguart_tx_chars(struct altera_jtaguart *pp) pp 141 drivers/tty/serial/altera_jtaguart.c struct uart_port *port = &pp->port; pp 174 drivers/tty/serial/altera_jtaguart.c pp->imr &= ~ALTERA_JTAGUART_CONTROL_WE_MSK; pp 175 drivers/tty/serial/altera_jtaguart.c writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG); pp 182 drivers/tty/serial/altera_jtaguart.c struct altera_jtaguart *pp = pp 187 drivers/tty/serial/altera_jtaguart.c ALTERA_JTAGUART_CONTROL_RI_OFF) & pp->imr; pp 192 drivers/tty/serial/altera_jtaguart.c altera_jtaguart_rx_chars(pp); pp 194 drivers/tty/serial/altera_jtaguart.c altera_jtaguart_tx_chars(pp); pp 211 drivers/tty/serial/altera_jtaguart.c struct altera_jtaguart *pp = pp 227 drivers/tty/serial/altera_jtaguart.c pp->imr = ALTERA_JTAGUART_CONTROL_RE_MSK; pp 228 drivers/tty/serial/altera_jtaguart.c writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG); pp 237 drivers/tty/serial/altera_jtaguart.c struct altera_jtaguart *pp = pp 244 drivers/tty/serial/altera_jtaguart.c pp->imr = 0; pp 245 drivers/tty/serial/altera_jtaguart.c writel(pp->imr, port->membase + ALTERA_JTAGUART_CONTROL_REG); pp 102 drivers/tty/serial/altera_uart.c struct altera_uart *pp = container_of(port, struct altera_uart, port); pp 107 drivers/tty/serial/altera_uart.c sigs |= (pp->sigs & TIOCM_RTS); pp 112 drivers/tty/serial/altera_uart.c static void altera_uart_update_ctrl_reg(struct altera_uart *pp) pp 114 drivers/tty/serial/altera_uart.c unsigned short imr = pp->imr; pp 120 drivers/tty/serial/altera_uart.c if (!pp->port.irq) pp 123 drivers/tty/serial/altera_uart.c altera_uart_writel(&pp->port, imr, ALTERA_UART_CONTROL_REG); pp 128 drivers/tty/serial/altera_uart.c struct altera_uart *pp = container_of(port, struct altera_uart, port); pp 130 drivers/tty/serial/altera_uart.c pp->sigs = sigs; pp 132 drivers/tty/serial/altera_uart.c pp->imr |= ALTERA_UART_CONTROL_RTS_MSK; pp 134 drivers/tty/serial/altera_uart.c pp->imr &= ~ALTERA_UART_CONTROL_RTS_MSK; pp 135 drivers/tty/serial/altera_uart.c altera_uart_update_ctrl_reg(pp); pp 140 drivers/tty/serial/altera_uart.c struct altera_uart *pp = container_of(port, struct altera_uart, port); pp 142 drivers/tty/serial/altera_uart.c pp->imr |= ALTERA_UART_CONTROL_TRDY_MSK; pp 143 drivers/tty/serial/altera_uart.c altera_uart_update_ctrl_reg(pp); pp 148 drivers/tty/serial/altera_uart.c struct altera_uart *pp = container_of(port, struct altera_uart, port); pp 150 drivers/tty/serial/altera_uart.c pp->imr &= ~ALTERA_UART_CONTROL_TRDY_MSK; pp 151 drivers/tty/serial/altera_uart.c altera_uart_update_ctrl_reg(pp); pp 156 drivers/tty/serial/altera_uart.c struct altera_uart *pp = container_of(port, struct altera_uart, port); pp 158 drivers/tty/serial/altera_uart.c pp->imr &= ~ALTERA_UART_CONTROL_RRDY_MSK; pp 159 drivers/tty/serial/altera_uart.c altera_uart_update_ctrl_reg(pp); pp 164 drivers/tty/serial/altera_uart.c struct altera_uart *pp = container_of(port, struct altera_uart, port); pp 169 drivers/tty/serial/altera_uart.c pp->imr |= ALTERA_UART_CONTROL_TRBK_MSK; pp 171 drivers/tty/serial/altera_uart.c pp->imr &= ~ALTERA_UART_CONTROL_TRBK_MSK; pp 172 drivers/tty/serial/altera_uart.c altera_uart_update_ctrl_reg(pp); pp 202 drivers/tty/serial/altera_uart.c static void altera_uart_rx_chars(struct altera_uart *pp) pp 204 drivers/tty/serial/altera_uart.c struct uart_port *port = &pp->port; pp 251 drivers/tty/serial/altera_uart.c static void altera_uart_tx_chars(struct altera_uart *pp) pp 253 drivers/tty/serial/altera_uart.c struct uart_port *port = &pp->port; pp 278 drivers/tty/serial/altera_uart.c pp->imr &= ~ALTERA_UART_CONTROL_TRDY_MSK; pp 279 drivers/tty/serial/altera_uart.c altera_uart_update_ctrl_reg(pp); pp 286 drivers/tty/serial/altera_uart.c struct altera_uart *pp = container_of(port, struct altera_uart, port); pp 289 drivers/tty/serial/altera_uart.c isr = altera_uart_readl(port, ALTERA_UART_STATUS_REG) & pp->imr; pp 293 drivers/tty/serial/altera_uart.c altera_uart_rx_chars(pp); pp 295 drivers/tty/serial/altera_uart.c altera_uart_tx_chars(pp); pp 303 drivers/tty/serial/altera_uart.c struct altera_uart *pp = from_timer(pp, t, tmr); pp 304 drivers/tty/serial/altera_uart.c struct uart_port *port = &pp->port; pp 307 drivers/tty/serial/altera_uart.c mod_timer(&pp->tmr, jiffies + uart_poll_timeout(port)); pp 322 drivers/tty/serial/altera_uart.c struct altera_uart *pp = container_of(port, struct altera_uart, port); pp 326 drivers/tty/serial/altera_uart.c timer_setup(&pp->tmr, altera_uart_timer, 0); pp 327 drivers/tty/serial/altera_uart.c mod_timer(&pp->tmr, jiffies + uart_poll_timeout(port)); pp 343 drivers/tty/serial/altera_uart.c pp->imr = ALTERA_UART_CONTROL_RRDY_MSK; pp 344 drivers/tty/serial/altera_uart.c altera_uart_update_ctrl_reg(pp); pp 353 drivers/tty/serial/altera_uart.c struct altera_uart *pp = container_of(port, struct altera_uart, port); pp 359 drivers/tty/serial/altera_uart.c pp->imr = 0; pp 360 drivers/tty/serial/altera_uart.c altera_uart_update_ctrl_reg(pp); pp 367 drivers/tty/serial/altera_uart.c del_timer_sync(&pp->tmr); pp 70 drivers/tty/serial/mcf.c struct mcf_uart *pp = container_of(port, struct mcf_uart, port); pp 75 drivers/tty/serial/mcf.c sigs |= (pp->sigs & TIOCM_RTS); pp 86 drivers/tty/serial/mcf.c struct mcf_uart *pp = container_of(port, struct mcf_uart, port); pp 88 drivers/tty/serial/mcf.c pp->sigs = sigs; pp 100 drivers/tty/serial/mcf.c struct mcf_uart *pp = container_of(port, struct mcf_uart, port); pp 108 drivers/tty/serial/mcf.c pp->imr |= MCFUART_UIR_TXREADY; pp 109 drivers/tty/serial/mcf.c writeb(pp->imr, port->membase + MCFUART_UIMR); pp 116 drivers/tty/serial/mcf.c struct mcf_uart *pp = container_of(port, struct mcf_uart, port); pp 118 drivers/tty/serial/mcf.c pp->imr &= ~MCFUART_UIR_TXREADY; pp 119 drivers/tty/serial/mcf.c writeb(pp->imr, port->membase + MCFUART_UIMR); pp 126 drivers/tty/serial/mcf.c struct mcf_uart *pp = container_of(port, struct mcf_uart, port); pp 128 drivers/tty/serial/mcf.c pp->imr &= ~MCFUART_UIR_RXREADY; pp 129 drivers/tty/serial/mcf.c writeb(pp->imr, port->membase + MCFUART_UIMR); pp 150 drivers/tty/serial/mcf.c struct mcf_uart *pp = container_of(port, struct mcf_uart, port); pp 164 drivers/tty/serial/mcf.c pp->imr = MCFUART_UIR_RXREADY; pp 165 drivers/tty/serial/mcf.c writeb(pp->imr, port->membase + MCFUART_UIMR); pp 176 drivers/tty/serial/mcf.c struct mcf_uart *pp = container_of(port, struct mcf_uart, port); pp 182 drivers/tty/serial/mcf.c pp->imr = 0; pp 183 drivers/tty/serial/mcf.c writeb(pp->imr, port->membase + MCFUART_UIMR); pp 281 drivers/tty/serial/mcf.c static void mcf_rx_chars(struct mcf_uart *pp) pp 283 drivers/tty/serial/mcf.c struct uart_port *port = &pp->port; pp 329 drivers/tty/serial/mcf.c static void mcf_tx_chars(struct mcf_uart *pp) pp 331 drivers/tty/serial/mcf.c struct uart_port *port = &pp->port; pp 354 drivers/tty/serial/mcf.c pp->imr &= ~MCFUART_UIR_TXREADY; pp 355 drivers/tty/serial/mcf.c writeb(pp->imr, port->membase + MCFUART_UIMR); pp 368 drivers/tty/serial/mcf.c struct mcf_uart *pp = container_of(port, struct mcf_uart, port); pp 372 drivers/tty/serial/mcf.c isr = readb(port->membase + MCFUART_UISR) & pp->imr; pp 376 drivers/tty/serial/mcf.c mcf_rx_chars(pp); pp 380 drivers/tty/serial/mcf.c mcf_tx_chars(pp); pp 727 drivers/usb/atm/ueagle-atm.c unsigned int i, j, p, pp; pp 748 drivers/usb/atm/ueagle-atm.c pp = pageoffset; pp 749 drivers/usb/atm/ueagle-atm.c blockcount = FW_GET_BYTE(dsp + pp); pp 750 drivers/usb/atm/ueagle-atm.c pp += 1; pp 755 drivers/usb/atm/ueagle-atm.c if (pp + 4 > len) pp 758 drivers/usb/atm/ueagle-atm.c pp += 2; /* skip blockaddr */ pp 759 drivers/usb/atm/ueagle-atm.c blocksize = get_unaligned_le16(dsp + pp); pp 760 drivers/usb/atm/ueagle-atm.c pp += 2; pp 763 drivers/usb/atm/ueagle-atm.c if (pp + blocksize > len) pp 766 drivers/usb/atm/ueagle-atm.c pp += blocksize; pp 27 drivers/usb/gadget/function/u_audio.c struct uac_rtd_params *pp; /* parent param */ pp 88 drivers/usb/gadget/function/u_audio.c struct uac_rtd_params *prm = ur->pp; pp 391 drivers/usb/gadget/function/u_audio.c prm->ureq[i].pp = prm; pp 467 drivers/usb/gadget/function/u_audio.c prm->ureq[i].pp = prm; pp 2060 drivers/usb/gadget/udc/atmel_usba_udc.c struct device_node *pp; pp 2083 drivers/usb/gadget/udc/atmel_usba_udc.c pp = NULL; pp 2084 drivers/usb/gadget/udc/atmel_usba_udc.c while ((pp = of_get_next_child(np, pp))) pp 2100 drivers/usb/gadget/udc/atmel_usba_udc.c pp = NULL; pp 2102 drivers/usb/gadget/udc/atmel_usba_udc.c while ((pp = of_get_next_child(np, pp)) && i < udc->num_ep) { pp 2105 drivers/usb/gadget/udc/atmel_usba_udc.c ret = of_property_read_u32(pp, "reg", &val); pp 2112 drivers/usb/gadget/udc/atmel_usba_udc.c ret = of_property_read_u32(pp, "atmel,fifo-size", &val); pp 2129 drivers/usb/gadget/udc/atmel_usba_udc.c ret = of_property_read_u32(pp, "atmel,nb-banks", &val); pp 2146 drivers/usb/gadget/udc/atmel_usba_udc.c ep->can_dma = of_property_read_bool(pp, "atmel,can-dma"); pp 2147 drivers/usb/gadget/udc/atmel_usba_udc.c ep->can_isoc = of_property_read_bool(pp, "atmel,can-isoc"); pp 49 drivers/usb/misc/uss720.c struct parport *pp; pp 98 drivers/usb/misc/uss720.c struct parport *pp; pp 104 drivers/usb/misc/uss720.c pp = priv->pp; pp 115 drivers/usb/misc/uss720.c if (rq->reg[2] & rq->reg[1] & 0x10 && pp) pp 116 drivers/usb/misc/uss720.c parport_generic_irq(pp); pp 192 drivers/usb/misc/uss720.c static int get_1284_register(struct parport *pp, unsigned char reg, unsigned char *val, gfp_t mem_flags) pp 201 drivers/usb/misc/uss720.c if (!pp) pp 203 drivers/usb/misc/uss720.c priv = pp->private_data; pp 228 drivers/usb/misc/uss720.c static int set_1284_register(struct parport *pp, unsigned char reg, unsigned char val, gfp_t mem_flags) pp 233 drivers/usb/misc/uss720.c if (!pp) pp 235 drivers/usb/misc/uss720.c priv = pp->private_data; pp 256 drivers/usb/misc/uss720.c static int change_mode(struct parport *pp, int m) pp 258 drivers/usb/misc/uss720.c struct parport_uss720_private *priv = pp->private_data; pp 262 drivers/usb/misc/uss720.c if (get_1284_register(pp, 6, ®, GFP_KERNEL)) pp 270 drivers/usb/misc/uss720.c if (change_mode(pp, ECR_PS2)) pp 276 drivers/usb/misc/uss720.c unsigned long expire = jiffies + pp->physport->cad->timeout; pp 282 drivers/usb/misc/uss720.c if (get_1284_register(pp, 6, ®, GFP_KERNEL)) pp 296 drivers/usb/misc/uss720.c if (set_1284_register(pp, 6, m << 5, GFP_KERNEL)) pp 298 drivers/usb/misc/uss720.c if (get_1284_register(pp, 6, ®, GFP_KERNEL)) pp 306 drivers/usb/misc/uss720.c static int clear_epp_timeout(struct parport *pp) pp 310 drivers/usb/misc/uss720.c if (get_1284_register(pp, 1, &stat, GFP_KERNEL)) pp 321 drivers/usb/misc/uss720.c struct parport *pp = (struct parport *)dev_id; pp 322 drivers/usb/misc/uss720.c struct parport_uss720_private *priv = pp->private_data; pp 329 drivers/usb/misc/uss720.c parport_generic_irq(pp); pp 334 drivers/usb/misc/uss720.c static void parport_uss720_write_data(struct parport *pp, unsigned char d) pp 336 drivers/usb/misc/uss720.c set_1284_register(pp, 0, d, GFP_KERNEL); pp 339 drivers/usb/misc/uss720.c static unsigned char parport_uss720_read_data(struct parport *pp) pp 343 drivers/usb/misc/uss720.c if (get_1284_register(pp, 0, &ret, GFP_KERNEL)) pp 348 drivers/usb/misc/uss720.c static void parport_uss720_write_control(struct parport *pp, unsigned char d) pp 350 drivers/usb/misc/uss720.c struct parport_uss720_private *priv = pp->private_data; pp 353 drivers/usb/misc/uss720.c if (set_1284_register(pp, 2, d, GFP_KERNEL)) pp 358 drivers/usb/misc/uss720.c static unsigned char parport_uss720_read_control(struct parport *pp) pp 360 drivers/usb/misc/uss720.c struct parport_uss720_private *priv = pp->private_data; pp 364 drivers/usb/misc/uss720.c static unsigned char parport_uss720_frob_control(struct parport *pp, unsigned char mask, unsigned char val) pp 366 drivers/usb/misc/uss720.c struct parport_uss720_private *priv = pp->private_data; pp 372 drivers/usb/misc/uss720.c if (set_1284_register(pp, 2, d, GFP_ATOMIC)) pp 378 drivers/usb/misc/uss720.c static unsigned char parport_uss720_read_status(struct parport *pp) pp 382 drivers/usb/misc/uss720.c if (get_1284_register(pp, 1, &ret, GFP_ATOMIC)) pp 387 drivers/usb/misc/uss720.c static void parport_uss720_disable_irq(struct parport *pp) pp 389 drivers/usb/misc/uss720.c struct parport_uss720_private *priv = pp->private_data; pp 393 drivers/usb/misc/uss720.c if (set_1284_register(pp, 2, d, GFP_KERNEL)) pp 398 drivers/usb/misc/uss720.c static void parport_uss720_enable_irq(struct parport *pp) pp 400 drivers/usb/misc/uss720.c struct parport_uss720_private *priv = pp->private_data; pp 404 drivers/usb/misc/uss720.c if (set_1284_register(pp, 2, d, GFP_KERNEL)) pp 409 drivers/usb/misc/uss720.c static void parport_uss720_data_forward (struct parport *pp) pp 411 drivers/usb/misc/uss720.c struct parport_uss720_private *priv = pp->private_data; pp 415 drivers/usb/misc/uss720.c if (set_1284_register(pp, 2, d, GFP_KERNEL)) pp 420 drivers/usb/misc/uss720.c static void parport_uss720_data_reverse (struct parport *pp) pp 422 drivers/usb/misc/uss720.c struct parport_uss720_private *priv = pp->private_data; pp 426 drivers/usb/misc/uss720.c if (set_1284_register(pp, 2, d, GFP_KERNEL)) pp 437 drivers/usb/misc/uss720.c static void parport_uss720_save_state(struct parport *pp, struct parport_state *s) pp 439 drivers/usb/misc/uss720.c struct parport_uss720_private *priv = pp->private_data; pp 442 drivers/usb/misc/uss720.c if (get_1284_register(pp, 2, NULL, GFP_ATOMIC)) pp 449 drivers/usb/misc/uss720.c static void parport_uss720_restore_state(struct parport *pp, struct parport_state *s) pp 451 drivers/usb/misc/uss720.c struct parport_uss720_private *priv = pp->private_data; pp 453 drivers/usb/misc/uss720.c set_1284_register(pp, 2, s->u.pc.ctr, GFP_ATOMIC); pp 454 drivers/usb/misc/uss720.c set_1284_register(pp, 6, s->u.pc.ecr, GFP_ATOMIC); pp 455 drivers/usb/misc/uss720.c get_1284_register(pp, 2, NULL, GFP_ATOMIC); pp 460 drivers/usb/misc/uss720.c static size_t parport_uss720_epp_read_data(struct parport *pp, void *buf, size_t length, int flags) pp 462 drivers/usb/misc/uss720.c struct parport_uss720_private *priv = pp->private_data; pp 465 drivers/usb/misc/uss720.c if (change_mode(pp, ECR_EPP)) pp 468 drivers/usb/misc/uss720.c if (get_1284_register(pp, 4, (char *)buf, GFP_KERNEL)) pp 472 drivers/usb/misc/uss720.c clear_epp_timeout(pp); pp 476 drivers/usb/misc/uss720.c change_mode(pp, ECR_PS2); pp 480 drivers/usb/misc/uss720.c static size_t parport_uss720_epp_write_data(struct parport *pp, const void *buf, size_t length, int flags) pp 483 drivers/usb/misc/uss720.c struct parport_uss720_private *priv = pp->private_data; pp 486 drivers/usb/misc/uss720.c if (change_mode(pp, ECR_EPP)) pp 489 drivers/usb/misc/uss720.c if (set_1284_register(pp, 4, (char *)buf, GFP_KERNEL)) pp 492 drivers/usb/misc/uss720.c if (get_1284_register(pp, 1, NULL, GFP_KERNEL)) pp 495 drivers/usb/misc/uss720.c clear_epp_timeout(pp); pp 499 drivers/usb/misc/uss720.c change_mode(pp, ECR_PS2); pp 502 drivers/usb/misc/uss720.c struct parport_uss720_private *priv = pp->private_data; pp 509 drivers/usb/misc/uss720.c if (change_mode(pp, ECR_EPP)) pp 514 drivers/usb/misc/uss720.c change_mode(pp, ECR_PS2); pp 519 drivers/usb/misc/uss720.c static size_t parport_uss720_epp_read_addr(struct parport *pp, void *buf, size_t length, int flags) pp 521 drivers/usb/misc/uss720.c struct parport_uss720_private *priv = pp->private_data; pp 524 drivers/usb/misc/uss720.c if (change_mode(pp, ECR_EPP)) pp 527 drivers/usb/misc/uss720.c if (get_1284_register(pp, 3, (char *)buf, GFP_KERNEL)) pp 531 drivers/usb/misc/uss720.c clear_epp_timeout(pp); pp 535 drivers/usb/misc/uss720.c change_mode(pp, ECR_PS2); pp 539 drivers/usb/misc/uss720.c static size_t parport_uss720_epp_write_addr(struct parport *pp, const void *buf, size_t length, int flags) pp 541 drivers/usb/misc/uss720.c struct parport_uss720_private *priv = pp->private_data; pp 544 drivers/usb/misc/uss720.c if (change_mode(pp, ECR_EPP)) pp 547 drivers/usb/misc/uss720.c if (set_1284_register(pp, 3, *(char *)buf, GFP_KERNEL)) pp 550 drivers/usb/misc/uss720.c if (get_1284_register(pp, 1, NULL, GFP_KERNEL)) pp 553 drivers/usb/misc/uss720.c clear_epp_timeout(pp); pp 557 drivers/usb/misc/uss720.c change_mode(pp, ECR_PS2); pp 561 drivers/usb/misc/uss720.c static size_t parport_uss720_ecp_write_data(struct parport *pp, const void *buffer, size_t len, int flags) pp 563 drivers/usb/misc/uss720.c struct parport_uss720_private *priv = pp->private_data; pp 570 drivers/usb/misc/uss720.c if (change_mode(pp, ECR_ECP)) pp 575 drivers/usb/misc/uss720.c change_mode(pp, ECR_PS2); pp 579 drivers/usb/misc/uss720.c static size_t parport_uss720_ecp_read_data(struct parport *pp, void *buffer, size_t len, int flags) pp 581 drivers/usb/misc/uss720.c struct parport_uss720_private *priv = pp->private_data; pp 588 drivers/usb/misc/uss720.c if (change_mode(pp, ECR_ECP)) pp 593 drivers/usb/misc/uss720.c change_mode(pp, ECR_PS2); pp 597 drivers/usb/misc/uss720.c static size_t parport_uss720_ecp_write_addr(struct parport *pp, const void *buffer, size_t len, int flags) pp 601 drivers/usb/misc/uss720.c if (change_mode(pp, ECR_ECP)) pp 604 drivers/usb/misc/uss720.c if (set_1284_register(pp, 5, *(char *)buffer, GFP_KERNEL)) pp 608 drivers/usb/misc/uss720.c change_mode(pp, ECR_PS2); pp 612 drivers/usb/misc/uss720.c static size_t parport_uss720_write_compat(struct parport *pp, const void *buffer, size_t len, int flags) pp 614 drivers/usb/misc/uss720.c struct parport_uss720_private *priv = pp->private_data; pp 621 drivers/usb/misc/uss720.c if (change_mode(pp, ECR_PPF)) pp 626 drivers/usb/misc/uss720.c change_mode(pp, ECR_PS2); pp 677 drivers/usb/misc/uss720.c struct parport *pp; pp 708 drivers/usb/misc/uss720.c priv->pp = NULL; pp 713 drivers/usb/misc/uss720.c pp = parport_register_port(0, PARPORT_IRQ_NONE, PARPORT_DMA_NONE, &parport_uss720_ops); pp 714 drivers/usb/misc/uss720.c if (!pp) { pp 719 drivers/usb/misc/uss720.c priv->pp = pp; pp 720 drivers/usb/misc/uss720.c pp->private_data = priv; pp 721 drivers/usb/misc/uss720.c pp->modes = PARPORT_MODE_PCSPP | PARPORT_MODE_TRISTATE | PARPORT_MODE_EPP | PARPORT_MODE_ECP | PARPORT_MODE_COMPAT; pp 724 drivers/usb/misc/uss720.c set_1284_register(pp, 7, 0x00, GFP_KERNEL); pp 725 drivers/usb/misc/uss720.c set_1284_register(pp, 6, 0x30, GFP_KERNEL); /* PS/2 mode */ pp 726 drivers/usb/misc/uss720.c set_1284_register(pp, 2, 0x0c, GFP_KERNEL); pp 728 drivers/usb/misc/uss720.c get_1284_register(pp, 0, ®, GFP_KERNEL); pp 736 drivers/usb/misc/uss720.c parport_announce_port(pp); pp 738 drivers/usb/misc/uss720.c usb_set_intfdata(intf, pp); pp 749 drivers/usb/misc/uss720.c struct parport *pp = usb_get_intfdata(intf); pp 754 drivers/usb/misc/uss720.c if (pp) { pp 755 drivers/usb/misc/uss720.c priv = pp->private_data; pp 757 drivers/usb/misc/uss720.c priv->pp = NULL; pp 759 drivers/usb/misc/uss720.c parport_remove_port(pp); pp 760 drivers/usb/misc/uss720.c parport_put_port(pp); pp 97 drivers/usb/serial/mos7720.c struct parport *pp; /* back to containing struct */ pp 445 drivers/usb/serial/mos7720.c static int parport_prologue(struct parport *pp) pp 450 drivers/usb/serial/mos7720.c mos_parport = pp->private_data; pp 476 drivers/usb/serial/mos7720.c static inline void parport_epilogue(struct parport *pp) pp 478 drivers/usb/serial/mos7720.c struct mos7715_parport *mos_parport = pp->private_data; pp 484 drivers/usb/serial/mos7720.c static void parport_mos7715_write_data(struct parport *pp, unsigned char d) pp 486 drivers/usb/serial/mos7720.c struct mos7715_parport *mos_parport = pp->private_data; pp 488 drivers/usb/serial/mos7720.c if (parport_prologue(pp) < 0) pp 492 drivers/usb/serial/mos7720.c parport_epilogue(pp); pp 495 drivers/usb/serial/mos7720.c static unsigned char parport_mos7715_read_data(struct parport *pp) pp 497 drivers/usb/serial/mos7720.c struct mos7715_parport *mos_parport = pp->private_data; pp 500 drivers/usb/serial/mos7720.c if (parport_prologue(pp) < 0) pp 503 drivers/usb/serial/mos7720.c parport_epilogue(pp); pp 507 drivers/usb/serial/mos7720.c static void parport_mos7715_write_control(struct parport *pp, unsigned char d) pp 509 drivers/usb/serial/mos7720.c struct mos7715_parport *mos_parport = pp->private_data; pp 512 drivers/usb/serial/mos7720.c if (parport_prologue(pp) < 0) pp 517 drivers/usb/serial/mos7720.c parport_epilogue(pp); pp 520 drivers/usb/serial/mos7720.c static unsigned char parport_mos7715_read_control(struct parport *pp) pp 526 drivers/usb/serial/mos7720.c mos_parport = pp->private_data; pp 536 drivers/usb/serial/mos7720.c static unsigned char parport_mos7715_frob_control(struct parport *pp, pp 540 drivers/usb/serial/mos7720.c struct mos7715_parport *mos_parport = pp->private_data; pp 545 drivers/usb/serial/mos7720.c if (parport_prologue(pp) < 0) pp 551 drivers/usb/serial/mos7720.c parport_epilogue(pp); pp 555 drivers/usb/serial/mos7720.c static unsigned char parport_mos7715_read_status(struct parport *pp) pp 561 drivers/usb/serial/mos7720.c mos_parport = pp->private_data; pp 571 drivers/usb/serial/mos7720.c static void parport_mos7715_enable_irq(struct parport *pp) pp 575 drivers/usb/serial/mos7720.c static void parport_mos7715_disable_irq(struct parport *pp) pp 579 drivers/usb/serial/mos7720.c static void parport_mos7715_data_forward(struct parport *pp) pp 581 drivers/usb/serial/mos7720.c struct mos7715_parport *mos_parport = pp->private_data; pp 583 drivers/usb/serial/mos7720.c if (parport_prologue(pp) < 0) pp 589 drivers/usb/serial/mos7720.c parport_epilogue(pp); pp 592 drivers/usb/serial/mos7720.c static void parport_mos7715_data_reverse(struct parport *pp) pp 594 drivers/usb/serial/mos7720.c struct mos7715_parport *mos_parport = pp->private_data; pp 596 drivers/usb/serial/mos7720.c if (parport_prologue(pp) < 0) pp 602 drivers/usb/serial/mos7720.c parport_epilogue(pp); pp 613 drivers/usb/serial/mos7720.c static void parport_mos7715_save_state(struct parport *pp, pp 619 drivers/usb/serial/mos7720.c mos_parport = pp->private_data; pp 630 drivers/usb/serial/mos7720.c static void parport_mos7715_restore_state(struct parport *pp, pp 636 drivers/usb/serial/mos7720.c mos_parport = pp->private_data; pp 648 drivers/usb/serial/mos7720.c static size_t parport_mos7715_write_compat(struct parport *pp, pp 653 drivers/usb/serial/mos7720.c struct mos7715_parport *mos_parport = pp->private_data; pp 656 drivers/usb/serial/mos7720.c if (parport_prologue(pp) < 0) pp 663 drivers/usb/serial/mos7720.c parport_epilogue(pp); pp 736 drivers/usb/serial/mos7720.c mos_parport->pp = parport_register_port(0, PARPORT_IRQ_NONE, pp 739 drivers/usb/serial/mos7720.c if (mos_parport->pp == NULL) { pp 745 drivers/usb/serial/mos7720.c mos_parport->pp->private_data = mos_parport; pp 746 drivers/usb/serial/mos7720.c mos_parport->pp->modes = PARPORT_MODE_COMPAT | PARPORT_MODE_PCSPP; pp 747 drivers/usb/serial/mos7720.c mos_parport->pp->dev = &serial->interface->dev; pp 748 drivers/usb/serial/mos7720.c parport_announce_port(mos_parport->pp); pp 1877 drivers/usb/serial/mos7720.c mos_parport->pp->private_data = NULL; pp 1885 drivers/usb/serial/mos7720.c parport_remove_port(mos_parport->pp); pp 1899 drivers/usb/serial/mos7720.c parport_del_port(mos_parport->pp); pp 545 drivers/video/fbdev/offb.c const __be32 *pp, *addrp, *up; pp 557 drivers/video/fbdev/offb.c pp = of_get_property(dp, "linux,bootx-depth", &len); pp 558 drivers/video/fbdev/offb.c if (pp == NULL) pp 559 drivers/video/fbdev/offb.c pp = of_get_property(dp, "depth", &len); pp 560 drivers/video/fbdev/offb.c if (pp && len == sizeof(u32)) pp 561 drivers/video/fbdev/offb.c depth = be32_to_cpup(pp); pp 563 drivers/video/fbdev/offb.c pp = of_get_property(dp, "linux,bootx-width", &len); pp 564 drivers/video/fbdev/offb.c if (pp == NULL) pp 565 drivers/video/fbdev/offb.c pp = of_get_property(dp, "width", &len); pp 566 drivers/video/fbdev/offb.c if (pp && len == sizeof(u32)) pp 567 drivers/video/fbdev/offb.c width = be32_to_cpup(pp); pp 569 drivers/video/fbdev/offb.c pp = of_get_property(dp, "linux,bootx-height", &len); pp 570 drivers/video/fbdev/offb.c if (pp == NULL) pp 571 drivers/video/fbdev/offb.c pp = of_get_property(dp, "height", &len); pp 572 drivers/video/fbdev/offb.c if (pp && len == sizeof(u32)) pp 573 drivers/video/fbdev/offb.c height = be32_to_cpup(pp); pp 575 drivers/video/fbdev/offb.c pp = of_get_property(dp, "linux,bootx-linebytes", &len); pp 576 drivers/video/fbdev/offb.c if (pp == NULL) pp 577 drivers/video/fbdev/offb.c pp = of_get_property(dp, "linebytes", &len); pp 578 drivers/video/fbdev/offb.c if (pp && len == sizeof(u32) && (*pp != 0xffffffffu)) pp 579 drivers/video/fbdev/offb.c pitch = be32_to_cpup(pp); pp 200 drivers/video/fbdev/pm2fb.c u16 pp; pp 228 drivers/video/fbdev/pm2fb.c return pp_table[i].pp; pp 251 drivers/video/fbdev/pm2fb.c unsigned char *pp) pp 260 drivers/video/fbdev/pm2fb.c *mm = *nn = *pp = 0; pp 271 drivers/video/fbdev/pm2fb.c *pp = p; pp 280 drivers/video/fbdev/pm2fb.c unsigned char *pp) pp 288 drivers/video/fbdev/pm2fb.c *mm = *nn = *pp = 0; pp 297 drivers/video/fbdev/pm2fb.c *pp = p; pp 31 fs/afs/callback.c struct hlist_node **pp; pp 56 fs/afs/callback.c for (pp = &server->cb_volumes.first; *pp; pp = &(*pp)->next) { pp 57 fs/afs/callback.c vi = hlist_entry(*pp, struct afs_vol_interest, srv_link); pp 66 fs/afs/callback.c new_vi->srv_link.pprev = pp; pp 67 fs/afs/callback.c new_vi->srv_link.next = *pp; pp 68 fs/afs/callback.c if (*pp) pp 69 fs/afs/callback.c (*pp)->pprev = &new_vi->srv_link.next; pp 70 fs/afs/callback.c *pp = &new_vi->srv_link; pp 229 fs/afs/cell.c struct rb_node *parent, **pp; pp 261 fs/afs/cell.c pp = &net->cells.rb_node; pp 263 fs/afs/cell.c while (*pp) { pp 264 fs/afs/cell.c parent = *pp; pp 272 fs/afs/cell.c pp = &(*pp)->rb_left; pp 274 fs/afs/cell.c pp = &(*pp)->rb_right; pp 281 fs/afs/cell.c rb_link_node_rcu(&cell->net_node, parent, pp); pp 148 fs/afs/server.c struct rb_node **pp, *p; pp 156 fs/afs/server.c pp = &net->fs_servers.rb_node; pp 158 fs/afs/server.c while (*pp) { pp 159 fs/afs/server.c p = *pp; pp 164 fs/afs/server.c pp = &(*pp)->rb_left; pp 166 fs/afs/server.c pp = &(*pp)->rb_right; pp 172 fs/afs/server.c rb_link_node(&server->uuid_rb, p, pp); pp 66 fs/freevxfs/vxfs_immed.c vxfs_immed_readpage(struct file *fp, struct page *pp) pp 68 fs/freevxfs/vxfs_immed.c struct vxfs_inode_info *vip = VXFS_INO(pp->mapping->host); pp 69 fs/freevxfs/vxfs_immed.c u_int64_t offset = (u_int64_t)pp->index << PAGE_SHIFT; pp 72 fs/freevxfs/vxfs_immed.c kaddr = kmap(pp); pp 74 fs/freevxfs/vxfs_immed.c kunmap(pp); pp 76 fs/freevxfs/vxfs_immed.c flush_dcache_page(pp); pp 77 fs/freevxfs/vxfs_immed.c SetPageUptodate(pp); pp 78 fs/freevxfs/vxfs_immed.c unlock_page(pp); pp 213 fs/freevxfs/vxfs_inode.c struct page *pp; pp 217 fs/freevxfs/vxfs_inode.c pp = vxfs_get_page(ilistp->i_mapping, ino * VXFS_ISIZE / PAGE_SIZE); pp 219 fs/freevxfs/vxfs_inode.c if (!IS_ERR(pp)) { pp 221 fs/freevxfs/vxfs_inode.c caddr_t kaddr = (char *)page_address(pp); pp 229 fs/freevxfs/vxfs_inode.c vxfs_put_page(pp); pp 234 fs/freevxfs/vxfs_inode.c pp, (unsigned long)ino); pp 235 fs/freevxfs/vxfs_inode.c return PTR_ERR(pp); pp 92 fs/freevxfs/vxfs_lookup.c struct page *pp; pp 96 fs/freevxfs/vxfs_lookup.c pp = vxfs_get_page(ip->i_mapping, pos >> PAGE_SHIFT); pp 97 fs/freevxfs/vxfs_lookup.c if (IS_ERR(pp)) pp 99 fs/freevxfs/vxfs_lookup.c kaddr = (char *)page_address(pp); pp 129 fs/freevxfs/vxfs_lookup.c *ppp = pp; pp 135 fs/freevxfs/vxfs_lookup.c vxfs_put_page(pp); pp 159 fs/freevxfs/vxfs_lookup.c struct page *pp; pp 162 fs/freevxfs/vxfs_lookup.c de = vxfs_find_entry(dip, dp, &pp); pp 165 fs/freevxfs/vxfs_lookup.c kunmap(pp); pp 166 fs/freevxfs/vxfs_lookup.c put_page(pp); pp 241 fs/freevxfs/vxfs_lookup.c struct page *pp; pp 246 fs/freevxfs/vxfs_lookup.c pp = vxfs_get_page(ip->i_mapping, pos >> PAGE_SHIFT); pp 247 fs/freevxfs/vxfs_lookup.c if (IS_ERR(pp)) pp 250 fs/freevxfs/vxfs_lookup.c kaddr = (char *)page_address(pp); pp 287 fs/freevxfs/vxfs_lookup.c vxfs_put_page(pp); pp 50 fs/freevxfs/vxfs_subr.c vxfs_put_page(struct page *pp) pp 52 fs/freevxfs/vxfs_subr.c kunmap(pp); pp 53 fs/freevxfs/vxfs_subr.c put_page(pp); pp 70 fs/freevxfs/vxfs_subr.c struct page * pp; pp 72 fs/freevxfs/vxfs_subr.c pp = read_mapping_page(mapping, n, NULL); pp 74 fs/freevxfs/vxfs_subr.c if (!IS_ERR(pp)) { pp 75 fs/freevxfs/vxfs_subr.c kmap(pp); pp 78 fs/freevxfs/vxfs_subr.c if (PageError(pp)) pp 82 fs/freevxfs/vxfs_subr.c return (pp); pp 85 fs/freevxfs/vxfs_subr.c vxfs_put_page(pp); pp 217 fs/fuse/cuse.c static int cuse_parse_one(char **pp, char *end, char **keyp, char **valp) pp 219 fs/fuse/cuse.c char *p = *pp; pp 249 fs/fuse/cuse.c *pp = p; pp 269 fs/hpfs/map.c unsigned p, pp = 0; pp 287 fs/hpfs/map.c hpfs_error(s, "bad dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp); pp 292 fs/hpfs/map.c hpfs_error(s, "namelen does not match dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp); pp 298 fs/hpfs/map.c hpfs_error(s, "bad down pointer in dnode %08x, dirent %03x, last %03x", secno, p, pp); pp 301 fs/hpfs/map.c pp = p; pp 308 fs/hpfs/map.c if (d[pp + 30] != 1 || d[pp + 31] != 255) { pp 42 fs/jffs2/compr_rubin.c struct pushpull pp; pp 47 fs/jffs2/compr_rubin.c static inline void init_pushpull(struct pushpull *pp, char *buf, pp 51 fs/jffs2/compr_rubin.c pp->buf = buf; pp 52 fs/jffs2/compr_rubin.c pp->buflen = buflen; pp 53 fs/jffs2/compr_rubin.c pp->ofs = ofs; pp 54 fs/jffs2/compr_rubin.c pp->reserve = reserve; pp 57 fs/jffs2/compr_rubin.c static inline int pushbit(struct pushpull *pp, int bit, int use_reserved) pp 59 fs/jffs2/compr_rubin.c if (pp->ofs >= pp->buflen - (use_reserved?0:pp->reserve)) pp 63 fs/jffs2/compr_rubin.c pp->buf[pp->ofs >> 3] |= (1<<(7-(pp->ofs & 7))); pp 65 fs/jffs2/compr_rubin.c pp->buf[pp->ofs >> 3] &= ~(1<<(7-(pp->ofs & 7))); pp 67 fs/jffs2/compr_rubin.c pp->ofs++; pp 72 fs/jffs2/compr_rubin.c static inline int pushedbits(struct pushpull *pp) pp 74 fs/jffs2/compr_rubin.c return pp->ofs; pp 77 fs/jffs2/compr_rubin.c static inline int pullbit(struct pushpull *pp) pp 81 fs/jffs2/compr_rubin.c bit = (pp->buf[pp->ofs >> 3] >> (7-(pp->ofs & 7))) & 1; pp 83 fs/jffs2/compr_rubin.c pp->ofs++; pp 112 fs/jffs2/compr_rubin.c ret = pushbit(&rs->pp, (rs->q & UPPER_BIT_RUBIN) ? 1 : 0, 0); pp 144 fs/jffs2/compr_rubin.c pushbit(&rs->pp, (UPPER_BIT_RUBIN & rs->q) ? 1 : 0, 1); pp 159 fs/jffs2/compr_rubin.c rs->rec_q = rs->rec_q * 2 + (long) (pullbit(&rs->pp))) pp 192 fs/jffs2/compr_rubin.c c = pullbit(&rs->pp); pp 270 fs/jffs2/compr_rubin.c init_pushpull(&rs.pp, cpage_out, *dstlen * 8, 0, 32); pp 287 fs/jffs2/compr_rubin.c outpos = (pushedbits(&rs.pp)+7)/8; pp 376 fs/jffs2/compr_rubin.c init_pushpull(&rs.pp, cdata_in, srclen, 0, 0); pp 2887 fs/jfs/jfs_dmap.c int lp, pp, k; pp 2914 fs/jfs/jfs_dmap.c pp = (lp - 1) >> 2; pp 2923 fs/jfs/jfs_dmap.c if (tp->dmt_stree[pp] == max) pp 2928 fs/jfs/jfs_dmap.c tp->dmt_stree[pp] = max; pp 2932 fs/jfs/jfs_dmap.c lp = pp; pp 1636 fs/jfs/jfs_dtree.c dtpage_t *sp, *pp; pp 1661 fs/jfs/jfs_dtree.c DT_GETPAGE(ip, parent->bn, pmp, PSIZE, pp, rc); pp 1837 fs/jfs/jfs_dtree.c tpxd = (pxd_t *) & pp->slot[1]; pp 2438 fs/jfs/jfs_dtree.c dtpage_t *p, *pp, *rp = 0, *lp= 0; pp 2467 fs/jfs/jfs_dtree.c DT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index); pp 2606 fs/jfs/jfs_dtree.c stbl = DT_GETSTBL(pp); pp 2607 fs/jfs/jfs_dtree.c pxd = (pxd_t *) & pp->slot[stbl[index]]; pp 2555 fs/jfs/jfs_xtree.c xtpage_t *p, *pp, *rp, *lp; /* base B+-tree index page */ pp 2595 fs/jfs/jfs_xtree.c XT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index); pp 2603 fs/jfs/jfs_xtree.c xad = &pp->xad[index]; pp 2616 fs/jfs/jfs_xtree.c XT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index); pp 2625 fs/jfs/jfs_xtree.c xad = &pp->xad[index]; pp 2699 fs/jfs/jfs_xtree.c XT_GETSEARCH(ip, btstack.top, bn, pmp, pp, index); pp 2845 fs/jfs/jfs_xtree.c xad = &pp->xad[index]; pp 2850 fs/jfs/jfs_xtree.c xtlck->lwm.length = le16_to_cpu(pp->header.nextindex) - pp 822 fs/nfsd/vfs.c struct page **pp = rqstp->rq_next_page; pp 834 fs/nfsd/vfs.c } else if (page != pp[-1]) { pp 265 fs/xfs/libxfs/xfs_bmap.c __be64 *pp, *thispa; /* pointer to block address */ pp 285 fs/xfs/libxfs/xfs_bmap.c pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, i, sz); pp 287 fs/xfs/libxfs/xfs_bmap.c pp = XFS_BMBT_PTR_ADDR(mp, block, i, dmxr); pp 294 fs/xfs/libxfs/xfs_bmap.c if (*thispa == *pp) { pp 327 fs/xfs/libxfs/xfs_bmap.c __be64 *pp; /* pointer to block address */ pp 351 fs/xfs/libxfs/xfs_bmap.c pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); pp 352 fs/xfs/libxfs/xfs_bmap.c bno = be64_to_cpu(*pp); pp 384 fs/xfs/libxfs/xfs_bmap.c pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); pp 385 fs/xfs/libxfs/xfs_bmap.c bno = be64_to_cpu(*pp); pp 596 fs/xfs/libxfs/xfs_bmap.c __be64 *pp; /* ptr to block address */ pp 611 fs/xfs/libxfs/xfs_bmap.c pp = XFS_BMAP_BROOT_PTR_ADDR(mp, rblock, 1, ifp->if_broot_bytes); pp 612 fs/xfs/libxfs/xfs_bmap.c cbno = be64_to_cpu(*pp); pp 662 fs/xfs/libxfs/xfs_bmap.c xfs_bmbt_ptr_t *pp; /* root block address pointer */ pp 761 fs/xfs/libxfs/xfs_bmap.c pp = XFS_BMBT_PTR_ADDR(mp, block, 1, xfs_bmbt_get_maxrecs(cur, pp 763 fs/xfs/libxfs/xfs_bmap.c *pp = cpu_to_be64(args.fsbno); pp 1177 fs/xfs/libxfs/xfs_bmap.c __be64 *pp; pp 1195 fs/xfs/libxfs/xfs_bmap.c pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); pp 1196 fs/xfs/libxfs/xfs_bmap.c bno = be64_to_cpu(*pp); pp 1210 fs/xfs/libxfs/xfs_bmap.c pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); pp 1211 fs/xfs/libxfs/xfs_bmap.c bno = be64_to_cpu(*pp); pp 1769 fs/xfs/libxfs/xfs_btree.c union xfs_btree_ptr *pp, /* ptr to btree block */ pp 1790 fs/xfs/libxfs/xfs_btree.c error = xfs_btree_ptr_to_daddr(cur, pp, &daddr); pp 1798 fs/xfs/libxfs/xfs_btree.c error = xfs_btree_read_buf_block(cur, pp, 0, blkp, &bp); pp 1864 fs/xfs/libxfs/xfs_btree.c union xfs_btree_ptr *pp; /* ptr to btree block */ pp 1878 fs/xfs/libxfs/xfs_btree.c pp = &ptr; pp 1888 fs/xfs/libxfs/xfs_btree.c error = xfs_btree_lookup_get_block(cur, level, pp, &block); pp 1963 fs/xfs/libxfs/xfs_btree.c pp = xfs_btree_ptr_addr(cur, keyno, block); pp 1965 fs/xfs/libxfs/xfs_btree.c error = xfs_btree_debug_check_ptr(cur, pp, 0, level); pp 2928 fs/xfs/libxfs/xfs_btree.c union xfs_btree_ptr *pp; /* pointer to block addr */ pp 2941 fs/xfs/libxfs/xfs_btree.c pp = xfs_btree_ptr_addr(cur, 1, block); pp 2944 fs/xfs/libxfs/xfs_btree.c error = cur->bc_ops->alloc_block(cur, pp, &nptr, stat); pp 2980 fs/xfs/libxfs/xfs_btree.c error = xfs_btree_debug_check_ptr(cur, pp, i, level); pp 2985 fs/xfs/libxfs/xfs_btree.c xfs_btree_copy_ptrs(cur, cpp, pp, xfs_btree_get_numrecs(cblock)); pp 2991 fs/xfs/libxfs/xfs_btree.c xfs_btree_copy_ptrs(cur, pp, &nptr, 1); pp 3319 fs/xfs/libxfs/xfs_btree.c union xfs_btree_ptr *pp; pp 3322 fs/xfs/libxfs/xfs_btree.c pp = xfs_btree_ptr_addr(cur, ptr, block); pp 3325 fs/xfs/libxfs/xfs_btree.c error = xfs_btree_debug_check_ptr(cur, pp, i, level); pp 3331 fs/xfs/libxfs/xfs_btree.c xfs_btree_shift_ptrs(cur, pp, 1, numrecs - ptr + 1); pp 3339 fs/xfs/libxfs/xfs_btree.c xfs_btree_copy_ptrs(cur, pp, ptrp, 1); pp 3514 fs/xfs/libxfs/xfs_btree.c union xfs_btree_ptr *pp; pp 3578 fs/xfs/libxfs/xfs_btree.c pp = xfs_btree_ptr_addr(cur, 1, block); pp 3587 fs/xfs/libxfs/xfs_btree.c xfs_btree_copy_ptrs(cur, pp, cpp, numrecs); pp 3784 fs/xfs/libxfs/xfs_btree.c union xfs_btree_ptr *pp; pp 3789 fs/xfs/libxfs/xfs_btree.c pp = xfs_btree_ptr_addr(cur, 1, block); pp 3790 fs/xfs/libxfs/xfs_btree.c error = xfs_btree_kill_root(cur, bp, level, pp); pp 4643 fs/xfs/libxfs/xfs_btree.c union xfs_btree_ptr *pp; pp 4716 fs/xfs/libxfs/xfs_btree.c pp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[level], block); pp 4728 fs/xfs/libxfs/xfs_btree.c error = xfs_btree_lookup_get_block(cur, level, pp, pp 499 fs/xfs/libxfs/xfs_btree.h union xfs_btree_ptr *pp, struct xfs_btree_block **blkp); pp 255 fs/xfs/scrub/btree.c union xfs_btree_ptr *pp; pp 293 fs/xfs/scrub/btree.c pp = xfs_btree_ptr_addr(ncur, ncur->bc_ptrs[level + 1], pblock); pp 294 fs/xfs/scrub/btree.c if (!xchk_btree_ptr_ok(bs, level + 1, pp)) pp 299 fs/xfs/scrub/btree.c if (xfs_btree_diff_two_ptrs(cur, pp, sibling)) pp 491 fs/xfs/scrub/btree.c union xfs_btree_ptr *pp, pp 501 fs/xfs/scrub/btree.c error = xfs_btree_lookup_get_block(bs->cur, level, pp, pblock); pp 603 fs/xfs/scrub/btree.c union xfs_btree_ptr *pp; pp 681 fs/xfs/scrub/btree.c pp = xfs_btree_ptr_addr(cur, cur->bc_ptrs[level], block); pp 682 fs/xfs/scrub/btree.c if (!xchk_btree_ptr_ok(&bs, level, pp)) { pp 687 fs/xfs/scrub/btree.c error = xchk_btree_get_block(&bs, level, pp, &block, &bp); pp 268 fs/xfs/xfs_bmap_util.c __be64 *pp; pp 297 fs/xfs/xfs_bmap_util.c pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]); pp 298 fs/xfs/xfs_bmap_util.c bno = be64_to_cpu(*pp); pp 344 fs/xfs/xfs_bmap_util.c __be64 *pp; /* pointer to block address */ pp 376 fs/xfs/xfs_bmap_util.c pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes); pp 377 fs/xfs/xfs_bmap_util.c bno = be64_to_cpu(*pp); pp 79 include/linux/if_pppox.h extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp); pp 149 include/linux/mm.h #define mm_zero_struct_page(pp) __mm_zero_struct_page(pp) pp 177 include/linux/mm.h #define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page))) pp 2875 include/linux/netdevice.h static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush) pp 2877 include/linux/netdevice.h if (PTR_ERR(pp) != -EINPROGRESS) pp 2881 include/linux/netdevice.h struct sk_buff *pp, pp 2885 include/linux/netdevice.h if (PTR_ERR(pp) != -EINPROGRESS) { pp 2892 include/linux/netdevice.h static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush) pp 2897 include/linux/netdevice.h struct sk_buff *pp, pp 355 include/linux/of.h #define for_each_property_of_node(dn, pp) \ pp 356 include/linux/of.h for (pp = dn->properties; pp != NULL; pp = pp->next) pp 482 include/linux/parport.h extern int parport_proc_register(struct parport *pp); pp 483 include/linux/parport.h extern int parport_proc_unregister(struct parport *pp); pp 1169 include/linux/phy.h struct ethtool_pauseparam *pp); pp 365 include/linux/sunrpc/svc.h struct page **pp = --rqstp->rq_next_page; pp 366 include/linux/sunrpc/svc.h if (*pp) { pp 367 include/linux/sunrpc/svc.h put_page(*pp); pp 368 include/linux/sunrpc/svc.h *pp = NULL; pp 240 include/net/ip_vs.h #define IP_VS_DBG_PKT(level, af, pp, skb, ofs, msg) \ pp 243 include/net/ip_vs.h pp->debug_packet(af, pp, skb, ofs, msg); \ pp 245 include/net/ip_vs.h #define IP_VS_DBG_RL_PKT(level, af, pp, skb, ofs, msg) \ pp 249 include/net/ip_vs.h pp->debug_packet(af, pp, skb, ofs, msg); \ pp 256 include/net/ip_vs.h #define IP_VS_DBG_PKT(level, af, pp, skb, ofs, msg) do {} while (0) pp 257 include/net/ip_vs.h #define IP_VS_DBG_RL_PKT(level, af, pp, skb, ofs, msg) do {} while (0) pp 424 include/net/ip_vs.h void (*init)(struct ip_vs_protocol *pp); pp 426 include/net/ip_vs.h void (*exit)(struct ip_vs_protocol *pp); pp 450 include/net/ip_vs.h int (*snat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp, pp 453 include/net/ip_vs.h int (*dnat_handler)(struct sk_buff *skb, struct ip_vs_protocol *pp, pp 468 include/net/ip_vs.h void (*debug_packet)(int af, struct ip_vs_protocol *pp, pp 479 include/net/ip_vs.h struct ip_vs_protocol *pp; pp 546 include/net/ip_vs.h struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); pp 1336 include/net/ip_vs.h int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp); pp 1367 include/net/ip_vs.h void ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp, pp 1462 include/net/ip_vs.h struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); pp 1464 include/net/ip_vs.h struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); pp 1466 include/net/ip_vs.h struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); pp 1468 include/net/ip_vs.h struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); pp 1470 include/net/ip_vs.h struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); pp 1472 include/net/ip_vs.h struct ip_vs_protocol *pp, int offset, pp 1478 include/net/ip_vs.h struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); pp 1480 include/net/ip_vs.h struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); pp 1482 include/net/ip_vs.h struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); pp 1484 include/net/ip_vs.h struct ip_vs_protocol *pp, struct ip_vs_iphdr *iph); pp 1486 include/net/ip_vs.h struct ip_vs_protocol *pp, int offset, pp 1535 include/net/ip_vs.h void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp, pp 1539 include/net/ip_vs.h void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp, pp 99 include/net/phonet/phonet.h const struct phonet_protocol *pp); pp 101 include/net/phonet/phonet.h const struct phonet_protocol *pp); pp 649 include/scsi/fc_encode.h } *pp; pp 651 include/scsi/fc_encode.h pp = fc_frame_payload_get(fp, sizeof(*pp)); pp 652 include/scsi/fc_encode.h memset(pp, 0, sizeof(*pp)); pp 653 include/scsi/fc_encode.h pp->prli.prli_cmd = ELS_PRLI; pp 654 include/scsi/fc_encode.h pp->prli.prli_spp_len = sizeof(struct fc_els_spp); pp 655 include/scsi/fc_encode.h pp->prli.prli_len = htons(sizeof(*pp)); pp 656 include/scsi/fc_encode.h pp->spp.spp_type = FC_TYPE_FCP; pp 657 include/scsi/fc_encode.h pp->spp.spp_flags = FC_SPP_EST_IMG_PAIR; pp 658 include/scsi/fc_encode.h pp->spp.spp_params = htonl(lport->service_params); pp 194 include/scsi/fc_frame.h void *pp = NULL; pp 197 include/scsi/fc_frame.h pp = fc_frame_header_get(fp) + 1; pp 198 include/scsi/fc_frame.h return pp; pp 2329 kernel/kprobes.c const char *sym, int offset, char *modname, struct kprobe *pp) pp 2350 kernel/kprobes.c if (!pp) pp 2351 kernel/kprobes.c pp = p; pp 2355 kernel/kprobes.c (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""), pp 2356 kernel/kprobes.c (kprobe_ftrace(pp) ? "[FTRACE]" : "")); pp 296 kernel/locking/qspinlock_paravirt.h struct pv_node *pp = (struct pv_node *)prev; pp 304 kernel/locking/qspinlock_paravirt.h if (pv_wait_early(pp, loop)) { pp 720 kernel/printk/printk.c static void append_char(char **pp, char *e, char c) pp 722 kernel/printk/printk.c if (*pp < e) pp 723 kernel/printk/printk.c *(*pp)++ = c; pp 395 lib/bch.c int k, pp = -1; pp 408 lib/bch.c k = 2*i-pp; pp 424 lib/bch.c pp = 2*i; pp 236 lib/decompress_bunzip2.c int minLen, maxLen, pp; pp 307 lib/decompress_bunzip2.c pp = 0; pp 312 lib/decompress_bunzip2.c hufGroup->permute[pp++] = t; pp 323 lib/decompress_bunzip2.c pp = t = 0; pp 325 lib/decompress_bunzip2.c pp += temp[i]; pp 336 lib/decompress_bunzip2.c limit[i] = (pp << (maxLen - i)) - 1; pp 337 lib/decompress_bunzip2.c pp <<= 1; pp 338 lib/decompress_bunzip2.c base[i+1] = pp-(t += temp[i]); pp 342 lib/decompress_bunzip2.c limit[maxLen] = pp+temp[maxLen]-1; pp 462 net/8021q/vlan_core.c struct sk_buff *pp = NULL; pp 499 net/8021q/vlan_core.c pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); pp 504 net/8021q/vlan_core.c skb_gro_flush_final(skb, pp, flush); pp 506 net/8021q/vlan_core.c return pp; pp 144 net/bridge/br_mdb.c struct net_bridge_port_group __rcu **pp; pp 163 net/bridge/br_mdb.c for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL; pp 164 net/bridge/br_mdb.c pp = &p->next) { pp 317 net/bridge/br_mdb.c struct net_bridge_port_group __rcu **pp; pp 330 net/bridge/br_mdb.c for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; pp 331 net/bridge/br_mdb.c pp = &p->next) { pp 607 net/bridge/br_mdb.c struct net_bridge_port_group __rcu **pp; pp 632 net/bridge/br_mdb.c for (pp = &mp->ports; pp 633 net/bridge/br_mdb.c (p = mlock_dereference(*pp, br)) != NULL; pp 634 net/bridge/br_mdb.c pp = &p->next) { pp 641 net/bridge/br_mdb.c p = br_multicast_new_port_group(port, group, *pp, state, NULL); pp 644 net/bridge/br_mdb.c rcu_assign_pointer(*pp, p); pp 735 net/bridge/br_mdb.c struct net_bridge_port_group __rcu **pp; pp 758 net/bridge/br_mdb.c for (pp = &mp->ports; pp 759 net/bridge/br_mdb.c (p = mlock_dereference(*pp, br)) != NULL; pp 760 net/bridge/br_mdb.c pp = &p->next) { pp 768 net/bridge/br_mdb.c rcu_assign_pointer(*pp, p->next); pp 171 net/bridge/br_multicast.c struct net_bridge_port_group __rcu **pp; pp 177 net/bridge/br_multicast.c for (pp = &mp->ports; pp 178 net/bridge/br_multicast.c (p = mlock_dereference(*pp, br)) != NULL; pp 179 net/bridge/br_multicast.c pp = &p->next) { pp 183 net/bridge/br_multicast.c rcu_assign_pointer(*pp, p->next); pp 540 net/bridge/br_multicast.c struct net_bridge_port_group __rcu **pp; pp 561 net/bridge/br_multicast.c for (pp = &mp->ports; pp 562 net/bridge/br_multicast.c (p = mlock_dereference(*pp, br)) != NULL; pp 563 net/bridge/br_multicast.c pp = &p->next) { pp 570 net/bridge/br_multicast.c p = br_multicast_new_port_group(port, group, *pp, 0, src); pp 573 net/bridge/br_multicast.c rcu_assign_pointer(*pp, p); pp 1226 net/bridge/br_multicast.c struct net_bridge_port_group __rcu **pp; pp 1278 net/bridge/br_multicast.c for (pp = &mp->ports; pp 1279 net/bridge/br_multicast.c (p = mlock_dereference(*pp, br)) != NULL; pp 1280 net/bridge/br_multicast.c pp = &p->next) { pp 1302 net/bridge/br_multicast.c struct net_bridge_port_group __rcu **pp; pp 1361 net/bridge/br_multicast.c for (pp = &mp->ports; pp 1362 net/bridge/br_multicast.c (p = mlock_dereference(*pp, br)) != NULL; pp 1363 net/bridge/br_multicast.c pp = &p->next) { pp 1399 net/bridge/br_multicast.c struct net_bridge_port_group __rcu **pp; pp 1401 net/bridge/br_multicast.c for (pp = &mp->ports; pp 1402 net/bridge/br_multicast.c (p = mlock_dereference(*pp, br)) != NULL; pp 1403 net/bridge/br_multicast.c pp = &p->next) { pp 1410 net/bridge/br_multicast.c rcu_assign_pointer(*pp, p->next); pp 337 net/ceph/debugfs.c static int osdc_show(struct seq_file *s, void *pp) pp 5469 net/core/dev.c struct sk_buff *pp = NULL; pp 5511 net/core/dev.c pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive, pp 5521 net/core/dev.c if (IS_ERR(pp) && PTR_ERR(pp) == -EINPROGRESS) { pp 5529 net/core/dev.c if (pp) { pp 5530 net/core/dev.c skb_list_del_init(pp); pp 5531 net/core/dev.c napi_gro_complete(napi, pp); pp 457 net/ethernet/eth.c struct sk_buff *pp = NULL; pp 496 net/ethernet/eth.c pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); pp 501 net/ethernet/eth.c skb_gro_flush_final(skb, pp, flush); pp 503 net/ethernet/eth.c return pp; pp 1410 net/ipv4/af_inet.c struct sk_buff *pp = NULL; pp 1516 net/ipv4/af_inet.c pp = indirect_call_gro_receive(tcp4_gro_receive, udp4_gro_receive, pp 1523 net/ipv4/af_inet.c skb_gro_flush_final(skb, pp, flush); pp 1525 net/ipv4/af_inet.c return pp; pp 236 net/ipv4/fou.c struct sk_buff *pp = NULL; pp 255 net/ipv4/fou.c pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); pp 260 net/ipv4/fou.c return pp; pp 316 net/ipv4/fou.c struct sk_buff *pp = NULL; pp 447 net/ipv4/fou.c pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); pp 453 net/ipv4/fou.c skb_gro_flush_final_remcsum(skb, pp, flush, &grc); pp 455 net/ipv4/fou.c return pp; pp 110 net/ipv4/gre_offload.c struct sk_buff *pp = NULL; pp 216 net/ipv4/gre_offload.c pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb); pp 222 net/ipv4/gre_offload.c skb_gro_flush_final(skb, pp, flush); pp 224 net/ipv4/gre_offload.c return pp; pp 106 net/ipv4/inetpeer.c struct rb_node **pp, *parent, *next; pp 109 net/ipv4/inetpeer.c pp = &base->rb_root.rb_node; pp 114 net/ipv4/inetpeer.c next = rcu_dereference_raw(*pp); pp 132 net/ipv4/inetpeer.c pp = &next->rb_left; pp 134 net/ipv4/inetpeer.c pp = &next->rb_right; pp 137 net/ipv4/inetpeer.c *pp_p = pp; pp 187 net/ipv4/inetpeer.c struct rb_node **pp, *parent; pp 196 net/ipv4/inetpeer.c p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp); pp 214 net/ipv4/inetpeer.c p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp); pp 230 net/ipv4/inetpeer.c rb_link_node(&p->rb_node, parent, pp); pp 145 net/ipv4/nexthop.c struct rb_node **pp, *parent = NULL, *next; pp 147 net/ipv4/nexthop.c pp = &net->nexthop.rb_root.rb_node; pp 151 net/ipv4/nexthop.c next = rcu_dereference_raw(*pp); pp 158 net/ipv4/nexthop.c pp = &next->rb_left; pp 160 net/ipv4/nexthop.c pp = &next->rb_right; pp 1010 net/ipv4/nexthop.c struct rb_node **pp, *parent = NULL, *next; pp 1018 net/ipv4/nexthop.c pp = &root->rb_node; pp 1022 net/ipv4/nexthop.c next = rtnl_dereference(*pp); pp 1030 net/ipv4/nexthop.c pp = &next->rb_left; pp 1032 net/ipv4/nexthop.c pp = &next->rb_right; pp 1052 net/ipv4/nexthop.c rb_link_node_rcu(&new_nh->rb_node, parent, pp); pp 878 net/ipv4/tcp_metrics.c struct tcp_metrics_block __rcu **pp; pp 882 net/ipv4/tcp_metrics.c pp = &hb->chain; pp 883 net/ipv4/tcp_metrics.c for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) { pp 887 net/ipv4/tcp_metrics.c *pp = tm->tcpm_next; pp 890 net/ipv4/tcp_metrics.c pp = &tm->tcpm_next; pp 901 net/ipv4/tcp_metrics.c struct tcp_metrics_block __rcu **pp; pp 922 net/ipv4/tcp_metrics.c pp = &hb->chain; pp 924 net/ipv4/tcp_metrics.c for (tm = deref_locked(*pp); tm; tm = deref_locked(*pp)) { pp 928 net/ipv4/tcp_metrics.c *pp = tm->tcpm_next; pp 932 net/ipv4/tcp_metrics.c pp = &tm->tcpm_next; pp 182 net/ipv4/tcp_offload.c struct sk_buff *pp = NULL; pp 280 net/ipv4/tcp_offload.c pp = p; pp 285 net/ipv4/tcp_offload.c return pp; pp 353 net/ipv4/udp_offload.c struct sk_buff *pp = NULL; pp 395 net/ipv4/udp_offload.c pp = p; pp 397 net/ipv4/udp_offload.c return pp; pp 409 net/ipv4/udp_offload.c struct sk_buff *pp = NULL; pp 423 net/ipv4/udp_offload.c pp = call_gro_receive(udp_gro_receive_segment, head, skb); pp 425 net/ipv4/udp_offload.c return pp; pp 458 net/ipv4/udp_offload.c pp = call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb); pp 462 net/ipv4/udp_offload.c skb_gro_flush_final(skb, pp, flush); pp 463 net/ipv4/udp_offload.c return pp; pp 188 net/ipv6/ip6_offload.c struct sk_buff *pp = NULL; pp 280 net/ipv6/ip6_offload.c pp = indirect_call_gro_receive_l4(tcp6_gro_receive, udp6_gro_receive, pp 287 net/ipv6/ip6_offload.c skb_gro_flush_final(skb, pp, flush); pp 289 net/ipv6/ip6_offload.c return pp; pp 185 net/netfilter/core.c int nf_hook_entries_insert_raw(struct nf_hook_entries __rcu **pp, pp 191 net/netfilter/core.c p = rcu_dereference_raw(*pp); pp 198 net/netfilter/core.c rcu_assign_pointer(*pp, new_hooks); pp 223 net/netfilter/core.c struct nf_hook_entries __rcu **pp) pp 262 net/netfilter/core.c rcu_assign_pointer(*pp, new); pp 318 net/netfilter/core.c struct nf_hook_entries __rcu **pp; pp 330 net/netfilter/core.c pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev); pp 331 net/netfilter/core.c if (!pp) pp 336 net/netfilter/core.c p = nf_entry_dereference(*pp); pp 340 net/netfilter/core.c rcu_assign_pointer(*pp, new_hooks); pp 389 net/netfilter/core.c struct nf_hook_entries __rcu **pp; pp 392 net/netfilter/core.c pp = nf_hook_entry_head(net, pf, reg->hooknum, reg->dev); pp 393 net/netfilter/core.c if (!pp) pp 398 net/netfilter/core.c p = nf_entry_dereference(*pp); pp 416 net/netfilter/core.c p = __nf_hook_entries_try_shrink(p, pp); pp 436 net/netfilter/core.c void nf_hook_entries_delete_raw(struct nf_hook_entries __rcu **pp, pp 441 net/netfilter/core.c p = rcu_dereference_raw(*pp); pp 443 net/netfilter/core.c p = __nf_hook_entries_try_shrink(p, pp); pp 76 net/netfilter/ipvs/ip_vs_app.c struct ip_vs_protocol *pp; pp 80 net/netfilter/ipvs/ip_vs_app.c if (!(pp = ip_vs_proto_get(proto))) pp 83 net/netfilter/ipvs/ip_vs_app.c if (!pp->unregister_app) pp 105 net/netfilter/ipvs/ip_vs_app.c ret = pp->register_app(ipvs, inc); pp 111 net/netfilter/ipvs/ip_vs_app.c pp->name, inc->name, ntohs(inc->port)); pp 127 net/netfilter/ipvs/ip_vs_app.c struct ip_vs_protocol *pp; pp 129 net/netfilter/ipvs/ip_vs_app.c if (!(pp = ip_vs_proto_get(inc->protocol))) pp 132 net/netfilter/ipvs/ip_vs_app.c if (pp->unregister_app) pp 133 net/netfilter/ipvs/ip_vs_app.c pp->unregister_app(ipvs, inc); pp 136 net/netfilter/ipvs/ip_vs_app.c pp->name, inc->name, ntohs(inc->port)); pp 260 net/netfilter/ipvs/ip_vs_app.c struct ip_vs_protocol *pp) pp 262 net/netfilter/ipvs/ip_vs_app.c return pp->app_conn_bind(cp); pp 682 net/netfilter/ipvs/ip_vs_conn.c ip_vs_bind_app(cp, pd->pp); pp 985 net/netfilter/ipvs/ip_vs_conn.c ip_vs_bind_app(cp, pd->pp); pp 73 net/netfilter/ipvs/ip_vs_core.c tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, pp 79 net/netfilter/ipvs/ip_vs_core.c udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, pp 237 net/netfilter/ipvs/ip_vs_core.c if (likely(pd->pp->state_transition)) pp 238 net/netfilter/ipvs/ip_vs_core.c pd->pp->state_transition(cp, direction, skb, pd); pp 457 net/netfilter/ipvs/ip_vs_core.c struct ip_vs_protocol *pp = pd->pp; pp 492 net/netfilter/ipvs/ip_vs_core.c IP_VS_DBG_PKT(12, svc->af, pp, skb, iph->off, pp 502 net/netfilter/ipvs/ip_vs_core.c cp = INDIRECT_CALL_1(pp->conn_in_get, pp 508 net/netfilter/ipvs/ip_vs_core.c IP_VS_DBG_PKT(12, svc->af, pp, skb, iph->off, pp 643 net/netfilter/ipvs/ip_vs_core.c ret = cp->packet_xmit(skb, cp, pd->pp, iph); pp 766 net/netfilter/ipvs/ip_vs_core.c void ip_vs_nat_icmp(struct sk_buff *skb, struct ip_vs_protocol *pp, pp 804 net/netfilter/ipvs/ip_vs_core.c IP_VS_DBG_PKT(11, AF_INET, pp, skb, (void *)ciph - (void *)iph, pp 807 net/netfilter/ipvs/ip_vs_core.c IP_VS_DBG_PKT(11, AF_INET, pp, skb, (void *)ciph - (void *)iph, pp 812 net/netfilter/ipvs/ip_vs_core.c void ip_vs_nat_icmp_v6(struct sk_buff *skb, struct ip_vs_protocol *pp, pp 861 net/netfilter/ipvs/ip_vs_core.c IP_VS_DBG_PKT(11, AF_INET6, pp, skb, pp 865 net/netfilter/ipvs/ip_vs_core.c IP_VS_DBG_PKT(11, AF_INET6, pp, skb, pp 877 net/netfilter/ipvs/ip_vs_core.c struct ip_vs_protocol *pp, pp 902 net/netfilter/ipvs/ip_vs_core.c ip_vs_nat_icmp_v6(skb, pp, cp, 1); pp 905 net/netfilter/ipvs/ip_vs_core.c ip_vs_nat_icmp(skb, pp, cp, 1); pp 941 net/netfilter/ipvs/ip_vs_core.c struct ip_vs_protocol *pp; pp 983 net/netfilter/ipvs/ip_vs_core.c pp = ip_vs_proto_get(cih->protocol); pp 984 net/netfilter/ipvs/ip_vs_core.c if (!pp) pp 989 net/netfilter/ipvs/ip_vs_core.c pp->dont_defrag)) pp 992 net/netfilter/ipvs/ip_vs_core.c IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset, pp 998 net/netfilter/ipvs/ip_vs_core.c cp = INDIRECT_CALL_1(pp->conn_out_get, ip_vs_conn_out_get_proto, pp 1005 net/netfilter/ipvs/ip_vs_core.c pp, ciph.len, ihl, hooknum); pp 1016 net/netfilter/ipvs/ip_vs_core.c struct ip_vs_protocol *pp; pp 1050 net/netfilter/ipvs/ip_vs_core.c pp = ip_vs_proto_get(ciph.protocol); pp 1051 net/netfilter/ipvs/ip_vs_core.c if (!pp) pp 1055 net/netfilter/ipvs/ip_vs_core.c cp = INDIRECT_CALL_1(pp->conn_out_get, ip_vs_conn_out_get_proto, pp 1063 net/netfilter/ipvs/ip_vs_core.c pp, offset, sizeof(struct ipv6hdr), pp 1283 net/netfilter/ipvs/ip_vs_core.c struct ip_vs_protocol *pp = pd->pp; pp 1285 net/netfilter/ipvs/ip_vs_core.c IP_VS_DBG_PKT(11, af, pp, skb, iph->off, "Outgoing packet"); pp 1291 net/netfilter/ipvs/ip_vs_core.c if (pp->snat_handler && pp 1292 net/netfilter/ipvs/ip_vs_core.c !SNAT_CALL(pp->snat_handler, skb, pp, cp, iph)) pp 1323 net/netfilter/ipvs/ip_vs_core.c IP_VS_DBG_PKT(10, af, pp, skb, iph->off, "After SNAT"); pp 1351 net/netfilter/ipvs/ip_vs_core.c struct ip_vs_protocol *pp; pp 1401 net/netfilter/ipvs/ip_vs_core.c pp = pd->pp; pp 1407 net/netfilter/ipvs/ip_vs_core.c if (unlikely(ip_is_fragment(ip_hdr(skb)) && !pp->dont_defrag)) { pp 1418 net/netfilter/ipvs/ip_vs_core.c cp = INDIRECT_CALL_1(pp->conn_out_get, ip_vs_conn_out_get_proto, pp 1434 net/netfilter/ipvs/ip_vs_core.c if (pp->protocol == IPPROTO_UDP) { pp 1443 net/netfilter/ipvs/ip_vs_core.c (pp->protocol == IPPROTO_TCP || pp 1444 net/netfilter/ipvs/ip_vs_core.c pp->protocol == IPPROTO_UDP || pp 1445 net/netfilter/ipvs/ip_vs_core.c pp->protocol == IPPROTO_SCTP)) { pp 1485 net/netfilter/ipvs/ip_vs_core.c IP_VS_DBG_PKT(12, af, pp, skb, iph.off, pp 1550 net/netfilter/ipvs/ip_vs_core.c struct ip_vs_protocol *pp = pd->pp; pp 1558 net/netfilter/ipvs/ip_vs_core.c if (!pp->conn_schedule(ipvs, af, skb, pd, verdict, cpp, iph)) pp 1564 net/netfilter/ipvs/ip_vs_core.c IP_VS_DBG_PKT(12, af, pp, skb, iph->off, pp 1569 net/netfilter/ipvs/ip_vs_core.c IP_VS_DBG_PKT(7, af, pp, skb, iph->off, pp 1661 net/netfilter/ipvs/ip_vs_core.c struct ip_vs_protocol *pp; pp 1759 net/netfilter/ipvs/ip_vs_core.c pp = pd->pp; pp 1763 net/netfilter/ipvs/ip_vs_core.c pp->dont_defrag)) pp 1766 net/netfilter/ipvs/ip_vs_core.c IP_VS_DBG_PKT(11, AF_INET, pp, skb, offset, pp 1776 net/netfilter/ipvs/ip_vs_core.c cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto, pp 1859 net/netfilter/ipvs/ip_vs_core.c verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum, &ciph); pp 1878 net/netfilter/ipvs/ip_vs_core.c struct ip_vs_protocol *pp; pp 1917 net/netfilter/ipvs/ip_vs_core.c pp = pd->pp; pp 1923 net/netfilter/ipvs/ip_vs_core.c IP_VS_DBG_PKT(11, AF_INET6, pp, skb, offset, pp 1929 net/netfilter/ipvs/ip_vs_core.c cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto, pp 1960 net/netfilter/ipvs/ip_vs_core.c verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset, hooknum, &ciph); pp 1981 net/netfilter/ipvs/ip_vs_core.c struct ip_vs_protocol *pp; pp 2055 net/netfilter/ipvs/ip_vs_core.c pp = pd->pp; pp 2059 net/netfilter/ipvs/ip_vs_core.c cp = INDIRECT_CALL_1(pp->conn_in_get, ip_vs_conn_in_get_proto, pp 2100 net/netfilter/ipvs/ip_vs_core.c IP_VS_DBG_PKT(11, af, pp, skb, iph.off, "Incoming packet"); pp 2126 net/netfilter/ipvs/ip_vs_core.c ret = cp->packet_xmit(skb, cp, pp, &iph); pp 49 net/netfilter/ipvs/ip_vs_proto.c static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp) pp 51 net/netfilter/ipvs/ip_vs_proto.c unsigned int hash = IP_VS_PROTO_HASH(pp->protocol); pp 53 net/netfilter/ipvs/ip_vs_proto.c pp->next = ip_vs_proto_table[hash]; pp 54 net/netfilter/ipvs/ip_vs_proto.c ip_vs_proto_table[hash] = pp; pp 56 net/netfilter/ipvs/ip_vs_proto.c if (pp->init != NULL) pp 57 net/netfilter/ipvs/ip_vs_proto.c pp->init(pp); pp 66 net/netfilter/ipvs/ip_vs_proto.c register_ip_vs_proto_netns(struct netns_ipvs *ipvs, struct ip_vs_protocol *pp) pp 68 net/netfilter/ipvs/ip_vs_proto.c unsigned int hash = IP_VS_PROTO_HASH(pp->protocol); pp 75 net/netfilter/ipvs/ip_vs_proto.c pd->pp = pp; /* For speed issues */ pp 80 net/netfilter/ipvs/ip_vs_proto.c if (pp->init_netns != NULL) { pp 81 net/netfilter/ipvs/ip_vs_proto.c int ret = pp->init_netns(ipvs, pd); pp 96 net/netfilter/ipvs/ip_vs_proto.c static int unregister_ip_vs_protocol(struct ip_vs_protocol *pp) pp 99 net/netfilter/ipvs/ip_vs_proto.c unsigned int hash = IP_VS_PROTO_HASH(pp->protocol); pp 103 net/netfilter/ipvs/ip_vs_proto.c if (*pp_p == pp) { pp 104 net/netfilter/ipvs/ip_vs_proto.c *pp_p = pp->next; pp 105 net/netfilter/ipvs/ip_vs_proto.c if (pp->exit != NULL) pp 106 net/netfilter/ipvs/ip_vs_proto.c pp->exit(pp); pp 121 net/netfilter/ipvs/ip_vs_proto.c unsigned int hash = IP_VS_PROTO_HASH(pd->pp->protocol); pp 127 net/netfilter/ipvs/ip_vs_proto.c if (pd->pp->exit_netns != NULL) pp 128 net/netfilter/ipvs/ip_vs_proto.c pd->pp->exit_netns(ipvs, pd); pp 142 net/netfilter/ipvs/ip_vs_proto.c struct ip_vs_protocol *pp; pp 145 net/netfilter/ipvs/ip_vs_proto.c for (pp = ip_vs_proto_table[hash]; pp; pp = pp->next) { pp 146 net/netfilter/ipvs/ip_vs_proto.c if (pp->protocol == proto) pp 147 net/netfilter/ipvs/ip_vs_proto.c return pp; pp 164 net/netfilter/ipvs/ip_vs_proto.c if (pd->pp->protocol == proto) pp 182 net/netfilter/ipvs/ip_vs_proto.c if (pd->pp->timeout_change) pp 183 net/netfilter/ipvs/ip_vs_proto.c pd->pp->timeout_change(pd, flags); pp 199 net/netfilter/ipvs/ip_vs_proto.c struct ip_vs_protocol *pp; pp 207 net/netfilter/ipvs/ip_vs_proto.c pp = ip_vs_proto_get(cp->protocol); pp 208 net/netfilter/ipvs/ip_vs_proto.c if (pp == NULL || pp->state_name == NULL) pp 210 net/netfilter/ipvs/ip_vs_proto.c return pp->state_name(state); pp 215 net/netfilter/ipvs/ip_vs_proto.c ip_vs_tcpudp_debug_packet_v4(struct ip_vs_protocol *pp, pp 242 net/netfilter/ipvs/ip_vs_proto.c pr_debug("%s: %s %s\n", msg, pp->name, buf); pp 247 net/netfilter/ipvs/ip_vs_proto.c ip_vs_tcpudp_debug_packet_v6(struct ip_vs_protocol *pp, pp 274 net/netfilter/ipvs/ip_vs_proto.c pr_debug("%s: %s %s\n", msg, pp->name, buf); pp 280 net/netfilter/ipvs/ip_vs_proto.c ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp, pp 287 net/netfilter/ipvs/ip_vs_proto.c ip_vs_tcpudp_debug_packet_v6(pp, skb, offset, msg); pp 290 net/netfilter/ipvs/ip_vs_proto.c ip_vs_tcpudp_debug_packet_v4(pp, skb, offset, msg); pp 376 net/netfilter/ipvs/ip_vs_proto.c struct ip_vs_protocol *pp; pp 381 net/netfilter/ipvs/ip_vs_proto.c while ((pp = ip_vs_proto_table[i]) != NULL) pp 382 net/netfilter/ipvs/ip_vs_proto.c unregister_ip_vs_protocol(pp); pp 13 net/netfilter/ipvs/ip_vs_proto_sctp.c sctp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp); pp 91 net/netfilter/ipvs/ip_vs_proto_sctp.c sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, pp 111 net/netfilter/ipvs/ip_vs_proto_sctp.c if (!sctp_csum_check(cp->af, skb, pp)) pp 138 net/netfilter/ipvs/ip_vs_proto_sctp.c sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, pp 158 net/netfilter/ipvs/ip_vs_proto_sctp.c if (!sctp_csum_check(cp->af, skb, pp)) pp 186 net/netfilter/ipvs/ip_vs_proto_sctp.c sctp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp) pp 205 net/netfilter/ipvs/ip_vs_proto_sctp.c IP_VS_DBG_RL_PKT(0, af, pp, skb, 0, pp 441 net/netfilter/ipvs/ip_vs_proto_sctp.c pd->pp->name, pp 32 net/netfilter/ipvs/ip_vs_proto_tcp.c tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp); pp 147 net/netfilter/ipvs/ip_vs_proto_tcp.c tcp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, pp 169 net/netfilter/ipvs/ip_vs_proto_tcp.c if (!tcp_csum_check(cp->af, skb, pp)) pp 217 net/netfilter/ipvs/ip_vs_proto_tcp.c pp->name, tcph->check, pp 225 net/netfilter/ipvs/ip_vs_proto_tcp.c tcp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, pp 247 net/netfilter/ipvs/ip_vs_proto_tcp.c if (!tcp_csum_check(cp->af, skb, pp)) pp 304 net/netfilter/ipvs/ip_vs_proto_tcp.c tcp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp) pp 327 net/netfilter/ipvs/ip_vs_proto_tcp.c IP_VS_DBG_RL_PKT(0, af, pp, skb, 0, pp 338 net/netfilter/ipvs/ip_vs_proto_tcp.c IP_VS_DBG_RL_PKT(0, af, pp, skb, 0, pp 544 net/netfilter/ipvs/ip_vs_proto_tcp.c pd->pp->name, pp 28 net/netfilter/ipvs/ip_vs_proto_udp.c udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp); pp 136 net/netfilter/ipvs/ip_vs_proto_udp.c udp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, pp 158 net/netfilter/ipvs/ip_vs_proto_udp.c if (!udp_csum_check(cp->af, skb, pp)) pp 211 net/netfilter/ipvs/ip_vs_proto_udp.c pp->name, udph->check, pp 219 net/netfilter/ipvs/ip_vs_proto_udp.c udp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, pp 241 net/netfilter/ipvs/ip_vs_proto_udp.c if (!udp_csum_check(cp->af, skb, pp)) pp 300 net/netfilter/ipvs/ip_vs_proto_udp.c udp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp) pp 330 net/netfilter/ipvs/ip_vs_proto_udp.c IP_VS_DBG_RL_PKT(0, af, pp, skb, 0, pp 341 net/netfilter/ipvs/ip_vs_proto_udp.c IP_VS_DBG_RL_PKT(0, af, pp, skb, 0, pp 968 net/netfilter/ipvs/ip_vs_sync.c struct ip_vs_protocol *pp; pp 998 net/netfilter/ipvs/ip_vs_sync.c pp = ip_vs_proto_get(s->protocol); pp 999 net/netfilter/ipvs/ip_vs_sync.c if (!pp) { pp 1004 net/netfilter/ipvs/ip_vs_sync.c if (state >= pp->num_states) { pp 1006 net/netfilter/ipvs/ip_vs_sync.c pp->name, state); pp 1077 net/netfilter/ipvs/ip_vs_sync.c struct ip_vs_protocol *pp; pp 1156 net/netfilter/ipvs/ip_vs_sync.c pp = ip_vs_proto_get(s->v4.protocol); pp 1157 net/netfilter/ipvs/ip_vs_sync.c if (!pp) { pp 1163 net/netfilter/ipvs/ip_vs_sync.c if (state >= pp->num_states) { pp 1165 net/netfilter/ipvs/ip_vs_sync.c pp->name, state); pp 691 net/netfilter/ipvs/ip_vs_xmit.c struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) pp 705 net/netfilter/ipvs/ip_vs_xmit.c struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) pp 734 net/netfilter/ipvs/ip_vs_xmit.c struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) pp 766 net/netfilter/ipvs/ip_vs_xmit.c struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) pp 802 net/netfilter/ipvs/ip_vs_xmit.c IP_VS_DBG_RL_PKT(10, AF_INET, pp, skb, ipvsh->off, pp 812 net/netfilter/ipvs/ip_vs_xmit.c IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, ipvsh->off, pp 826 net/netfilter/ipvs/ip_vs_xmit.c if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, ipvsh)) pp 831 net/netfilter/ipvs/ip_vs_xmit.c IP_VS_DBG_PKT(10, AF_INET, pp, skb, ipvsh->off, "After DNAT"); pp 854 net/netfilter/ipvs/ip_vs_xmit.c struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) pp 890 net/netfilter/ipvs/ip_vs_xmit.c IP_VS_DBG_RL_PKT(10, AF_INET6, pp, skb, ipvsh->off, pp 901 net/netfilter/ipvs/ip_vs_xmit.c IP_VS_DBG_RL_PKT(1, AF_INET6, pp, skb, ipvsh->off, pp 915 net/netfilter/ipvs/ip_vs_xmit.c if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, ipvsh)) pp 919 net/netfilter/ipvs/ip_vs_xmit.c IP_VS_DBG_PKT(10, AF_INET6, pp, skb, ipvsh->off, "After DNAT"); pp 1134 net/netfilter/ipvs/ip_vs_xmit.c struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) pp 1284 net/netfilter/ipvs/ip_vs_xmit.c struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) pp 1436 net/netfilter/ipvs/ip_vs_xmit.c struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) pp 1470 net/netfilter/ipvs/ip_vs_xmit.c struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) pp 1509 net/netfilter/ipvs/ip_vs_xmit.c struct ip_vs_protocol *pp, int offset, unsigned int hooknum, pp 1524 net/netfilter/ipvs/ip_vs_xmit.c rc = cp->packet_xmit(skb, cp, pp, iph); pp 1580 net/netfilter/ipvs/ip_vs_xmit.c ip_vs_nat_icmp(skb, pp, cp, 0); pp 1599 net/netfilter/ipvs/ip_vs_xmit.c struct ip_vs_protocol *pp, int offset, unsigned int hooknum, pp 1614 net/netfilter/ipvs/ip_vs_xmit.c rc = cp->packet_xmit(skb, cp, pp, ipvsh); pp 1669 net/netfilter/ipvs/ip_vs_xmit.c ip_vs_nat_icmp_v6(skb, pp, cp, 0); pp 16 net/netfilter/nf_internals.h void nf_hook_entries_delete_raw(struct nf_hook_entries __rcu **pp, pp 18 net/netfilter/nf_internals.h int nf_hook_entries_insert_raw(struct nf_hook_entries __rcu **pp, pp 56 net/netfilter/xt_ipvs.c struct ip_vs_protocol *pp; pp 81 net/netfilter/xt_ipvs.c pp = ip_vs_proto_get(iph.protocol); pp 82 net/netfilter/xt_ipvs.c if (unlikely(!pp)) { pp 90 net/netfilter/xt_ipvs.c cp = pp->conn_out_get(ipvs, family, skb, &iph); pp 73 net/nfc/digital_dep.c u8 pp; pp 85 net/nfc/digital_dep.c u8 pp; pp 443 net/nfc/digital_dep.c payload_bits = DIGITAL_PAYLOAD_PP_TO_BITS(atr_res->pp); pp 513 net/nfc/digital_dep.c atr_req->pp = DIGITAL_PAYLOAD_BITS_TO_PP(payload_bits); pp 516 net/nfc/digital_dep.c atr_req->pp |= DIGITAL_GB_BIT; pp 1519 net/nfc/digital_dep.c atr_res->pp = DIGITAL_PAYLOAD_BITS_TO_PP(payload_bits); pp 1524 net/nfc/digital_dep.c atr_res->pp |= DIGITAL_GB_BIT; pp 1597 net/nfc/digital_dep.c payload_bits = DIGITAL_PAYLOAD_PP_TO_BITS(atr_req->pp); pp 29 net/phonet/af_phonet.c const struct phonet_protocol *pp; pp 35 net/phonet/af_phonet.c pp = rcu_dereference(proto_tab[protocol]); pp 36 net/phonet/af_phonet.c if (pp && !try_module_get(pp->prot->owner)) pp 37 net/phonet/af_phonet.c pp = NULL; pp 40 net/phonet/af_phonet.c return pp; pp 43 net/phonet/af_phonet.c static inline void phonet_proto_put(const struct phonet_protocol *pp) pp 45 net/phonet/af_phonet.c module_put(pp->prot->owner); pp 460 net/phonet/af_phonet.c const struct phonet_protocol *pp) pp 467 net/phonet/af_phonet.c err = proto_register(pp->prot, 1); pp 475 net/phonet/af_phonet.c rcu_assign_pointer(proto_tab[protocol], pp); pp 483 net/phonet/af_phonet.c const struct phonet_protocol *pp) pp 486 net/phonet/af_phonet.c BUG_ON(proto_tab[protocol] != pp); pp 490 net/phonet/af_phonet.c proto_unregister(pp->prot); pp 107 net/rxrpc/call_accept.c struct rb_node *parent, **pp; pp 110 net/rxrpc/call_accept.c pp = &rx->calls.rb_node; pp 112 net/rxrpc/call_accept.c while (*pp) { pp 113 net/rxrpc/call_accept.c parent = *pp; pp 116 net/rxrpc/call_accept.c pp = &(*pp)->rb_left; pp 118 net/rxrpc/call_accept.c pp = &(*pp)->rb_right; pp 128 net/rxrpc/call_accept.c rb_link_node(&call->sock_node, parent, pp); pp 467 net/rxrpc/call_accept.c struct rb_node *parent, **pp; pp 484 net/rxrpc/call_accept.c pp = &rx->calls.rb_node; pp 486 net/rxrpc/call_accept.c while (*pp) { pp 487 net/rxrpc/call_accept.c parent = *pp; pp 491 net/rxrpc/call_accept.c pp = &(*pp)->rb_left; pp 493 net/rxrpc/call_accept.c pp = &(*pp)->rb_right; pp 522 net/rxrpc/call_accept.c pp = &rx->calls.rb_node; pp 524 net/rxrpc/call_accept.c while (*pp) { pp 525 net/rxrpc/call_accept.c parent = *pp; pp 529 net/rxrpc/call_accept.c pp = &(*pp)->rb_left; pp 531 net/rxrpc/call_accept.c pp = &(*pp)->rb_right; pp 552 net/rxrpc/call_accept.c rb_link_node(&call->sock_node, parent, pp); pp 227 net/rxrpc/call_object.c struct rb_node *parent, **pp; pp 254 net/rxrpc/call_object.c pp = &rx->calls.rb_node; pp 256 net/rxrpc/call_object.c while (*pp) { pp 257 net/rxrpc/call_object.c parent = *pp; pp 261 net/rxrpc/call_object.c pp = &(*pp)->rb_left; pp 263 net/rxrpc/call_object.c pp = &(*pp)->rb_right; pp 272 net/rxrpc/call_object.c rb_link_node(&call->sock_node, parent, pp); pp 283 net/rxrpc/conn_client.c struct rb_node *p, **pp, *parent; pp 370 net/rxrpc/conn_client.c pp = &local->client_conns.rb_node; pp 372 net/rxrpc/conn_client.c while (*pp) { pp 373 net/rxrpc/conn_client.c parent = *pp; pp 383 net/rxrpc/conn_client.c pp = &(*pp)->rb_left; pp 385 net/rxrpc/conn_client.c pp = &(*pp)->rb_right; pp 402 net/rxrpc/conn_client.c rb_link_node(&candidate->client_node, parent, pp); pp 68 net/rxrpc/conn_service.c struct rb_node **pp, *parent; pp 72 net/rxrpc/conn_service.c pp = &peer->service_conns.rb_node; pp 74 net/rxrpc/conn_service.c while (*pp) { pp 75 net/rxrpc/conn_service.c parent = *pp; pp 80 net/rxrpc/conn_service.c pp = &(*pp)->rb_left; pp 82 net/rxrpc/conn_service.c pp = &(*pp)->rb_right; pp 87 net/rxrpc/conn_service.c rb_link_node_rcu(&conn->service_node, parent, pp); pp 695 net/rxrpc/key.c struct rxrpc_key_token *token, **pp; pp 779 net/rxrpc/key.c pp = (struct rxrpc_key_token **)&prep->payload.data[0]; pp 780 net/rxrpc/key.c while (*pp) pp 781 net/rxrpc/key.c pp = &(*pp)->next; pp 782 net/rxrpc/key.c *pp = token; pp 8171 net/sctp/socket.c struct sctp_bind_bucket *pp; pp 8200 net/sctp/socket.c sctp_for_each_hentry(pp, &head->chain) pp 8201 net/sctp/socket.c if ((pp->port == rover) && pp 8202 net/sctp/socket.c net_eq(sock_net(sk), pp->net)) pp 8228 net/sctp/socket.c sctp_for_each_hentry(pp, &head->chain) { pp 8229 net/sctp/socket.c if ((pp->port == snum) && net_eq(pp->net, sock_net(sk))) pp 8233 net/sctp/socket.c pp = NULL; pp 8236 net/sctp/socket.c if (!hlist_empty(&pp->owner)) { pp 8246 net/sctp/socket.c if ((pp->fastreuse && reuse && pp 8248 net/sctp/socket.c (pp->fastreuseport && sk->sk_reuseport && pp 8249 net/sctp/socket.c uid_eq(pp->fastuid, uid))) pp 8262 net/sctp/socket.c sk_for_each_bound(sk2, &pp->owner) { pp 8285 net/sctp/socket.c if (!pp && !(pp = sctp_bucket_create(head, sock_net(sk), snum))) pp 8292 net/sctp/socket.c if (hlist_empty(&pp->owner)) { pp 8294 net/sctp/socket.c pp->fastreuse = 1; pp 8296 net/sctp/socket.c pp->fastreuse = 0; pp 8299 net/sctp/socket.c pp->fastreuseport = 1; pp 8300 net/sctp/socket.c pp->fastuid = uid; pp 8302 net/sctp/socket.c pp->fastreuseport = 0; pp 8305 net/sctp/socket.c if (pp->fastreuse && pp 8307 net/sctp/socket.c pp->fastreuse = 0; pp 8309 net/sctp/socket.c if (pp->fastreuseport && pp 8310 net/sctp/socket.c (!sk->sk_reuseport || !uid_eq(pp->fastuid, uid))) pp 8311 net/sctp/socket.c pp->fastreuseport = 0; pp 8321 net/sctp/socket.c sk_add_bind_node(sk, &pp->owner); pp 8322 net/sctp/socket.c sp->bind_hash = pp; pp 8536 net/sctp/socket.c struct sctp_bind_bucket *pp; pp 8538 net/sctp/socket.c pp = kmem_cache_alloc(sctp_bucket_cachep, GFP_ATOMIC); pp 8539 net/sctp/socket.c if (pp) { pp 8541 net/sctp/socket.c pp->port = snum; pp 8542 net/sctp/socket.c pp->fastreuse = 0; pp 8543 net/sctp/socket.c INIT_HLIST_HEAD(&pp->owner); pp 8544 net/sctp/socket.c pp->net = net; pp 8545 net/sctp/socket.c hlist_add_head(&pp->node, &head->chain); pp 8547 net/sctp/socket.c return pp; pp 8551 net/sctp/socket.c static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) pp 8553 net/sctp/socket.c if (pp && hlist_empty(&pp->owner)) { pp 8554 net/sctp/socket.c __hlist_del(&pp->node); pp 8555 net/sctp/socket.c kmem_cache_free(sctp_bucket_cachep, pp); pp 8566 net/sctp/socket.c struct sctp_bind_bucket *pp; pp 8569 net/sctp/socket.c pp = sctp_sk(sk)->bind_hash; pp 8573 net/sctp/socket.c sctp_bucket_destroy(pp); pp 9363 net/sctp/socket.c struct sctp_bind_bucket *pp; /* hash list port iterator */ pp 9388 net/sctp/socket.c pp = sctp_sk(oldsk)->bind_hash; pp 9389 net/sctp/socket.c sk_add_bind_node(newsk, &pp->owner); pp 9390 net/sctp/socket.c sctp_sk(newsk)->bind_hash = pp; pp 454 net/xfrm/xfrm_user.c struct xfrm_replay_state_esn *p, *pp, *up; pp 468 net/xfrm/xfrm_user.c pp = kzalloc(klen, GFP_KERNEL); pp 469 net/xfrm/xfrm_user.c if (!pp) { pp 475 net/xfrm/xfrm_user.c memcpy(pp, up, ulen); pp 478 net/xfrm/xfrm_user.c *preplay_esn = pp; pp 548 scripts/dtc/checks.c struct property **pp, *prop = NULL; pp 550 scripts/dtc/checks.c for (pp = &node->proplist; *pp; pp = &((*pp)->next)) pp 551 scripts/dtc/checks.c if (streq((*pp)->name, "name")) { pp 552 scripts/dtc/checks.c prop = *pp; pp 566 scripts/dtc/checks.c *pp = prop->next; pp 1167 sound/pci/au88x0/au88x0_core.c int page, p, pp, delta, i; pp 1190 sound/pci/au88x0/au88x0_core.c pp = dma->period_real + i; pp 1191 sound/pci/au88x0/au88x0_core.c if (pp >= 4) pp 1192 sound/pci/au88x0/au88x0_core.c pp -= 4; pp 1195 sound/pci/au88x0/au88x0_core.c VORTEX_ADBDMA_BUFBASE + (((adbdma << 2) + pp) << 2), pp 1200 sound/pci/au88x0/au88x0_core.c (((adbdma << 2) + pp) << 2)); pp 1218 sound/pci/au88x0/au88x0_core.c int p, pp, i; pp 1227 sound/pci/au88x0/au88x0_core.c pp = dma->period_real + i; pp 1229 sound/pci/au88x0/au88x0_core.c if (pp >= dma->nr_periods) pp 1230 sound/pci/au88x0/au88x0_core.c pp -= dma->nr_periods; pp 1233 sound/pci/au88x0/au88x0_core.c if (pp >= 4) pp 1234 sound/pci/au88x0/au88x0_core.c pp -= 4; pp 1237 sound/pci/au88x0/au88x0_core.c VORTEX_ADBDMA_BUFBASE + (((adbdma << 2) + pp) << 2), pp 1241 sound/pci/au88x0/au88x0_core.c hwread(vortex->mmio, VORTEX_ADBDMA_BUFBASE + (((adbdma << 2)+pp) << 2)); pp 1438 sound/pci/au88x0/au88x0_core.c int page, p, pp, delta, i; pp 1461 sound/pci/au88x0/au88x0_core.c pp = dma->period_real + i; pp 1462 sound/pci/au88x0/au88x0_core.c if (pp >= 4) pp 1463 sound/pci/au88x0/au88x0_core.c pp -= 4; pp 1466 sound/pci/au88x0/au88x0_core.c (((wtdma << 2) + pp) << 2), pp 1471 sound/pci/au88x0/au88x0_core.c (((wtdma << 2) + pp) << 2)); pp 3609 sound/pci/cs46xx/cs46xx_lib.c u8 pp; pp 3619 sound/pci/cs46xx/cs46xx_lib.c pci_read_config_byte(pdev, 0x41, &pp); pp 3620 sound/pci/cs46xx/cs46xx_lib.c chip->acpi_port = pp << 8; pp 42 sound/soc/sh/rcar/dma.c struct rsnd_dmapp pp; pp 55 sound/soc/sh/rcar/dma.c #define rsnd_dma_to_dmapp(dma) (&(dma)->dma.pp) pp 45 sound/synth/emux/soundfont.c static void init_voice_parm(struct soundfont_voice_parm *pp); pp 625 sound/synth/emux/soundfont.c init_voice_parm(struct soundfont_voice_parm *pp) pp 627 sound/synth/emux/soundfont.c memset(pp, 0, sizeof(*pp)); pp 629 sound/synth/emux/soundfont.c pp->moddelay = 0x8000; pp 630 sound/synth/emux/soundfont.c pp->modatkhld = 0x7f7f; pp 631 sound/synth/emux/soundfont.c pp->moddcysus = 0x7f7f; pp 632 sound/synth/emux/soundfont.c pp->modrelease = 0x807f; pp 634 sound/synth/emux/soundfont.c pp->voldelay = 0x8000; pp 635 sound/synth/emux/soundfont.c pp->volatkhld = 0x7f7f; pp 636 sound/synth/emux/soundfont.c pp->voldcysus = 0x7f7f; pp 637 sound/synth/emux/soundfont.c pp->volrelease = 0x807f; pp 639 sound/synth/emux/soundfont.c pp->lfo1delay = 0x8000; pp 640 sound/synth/emux/soundfont.c pp->lfo2delay = 0x8000; pp 642 sound/synth/emux/soundfont.c pp->cutoff = 0xff; pp 252 tools/arch/s390/include/uapi/asm/kvm.h __u64 pp; /* program parameter [ARCH0] */ pp 759 tools/firewire/nosy-dump.c struct phy_packet *pp = (struct phy_packet *) data; pp 765 tools/firewire/nosy-dump.c switch (pp->common.identifier) { pp 767 tools/firewire/nosy-dump.c if (!pp->phy_config.set_root && !pp->phy_config.set_gap_count) { pp 768 tools/firewire/nosy-dump.c printf("ext phy config: phy_id=%02x", pp->phy_config.root_id); pp 771 tools/firewire/nosy-dump.c if (pp->phy_config.set_root) pp 772 tools/firewire/nosy-dump.c printf(" set_root_id=%02x", pp->phy_config.root_id); pp 773 tools/firewire/nosy-dump.c if (pp->phy_config.set_gap_count) pp 774 tools/firewire/nosy-dump.c printf(" set_gap_count=%d", pp->phy_config.gap_count); pp 779 tools/firewire/nosy-dump.c printf("link-on packet, phy_id=%02x", pp->link_on.phy_id); pp 783 tools/firewire/nosy-dump.c if (pp->self_id.extended) { pp 785 tools/firewire/nosy-dump.c pp->ext_self_id.phy_id, pp->ext_self_id.sequence); pp 791 tools/firewire/nosy-dump.c pp->self_id.phy_id, pp 792 tools/firewire/nosy-dump.c (pp->self_id.link_active ? "active" : "not active"), pp 793 tools/firewire/nosy-dump.c pp->self_id.gap_count, pp 794 tools/firewire/nosy-dump.c speed_names[pp->self_id.phy_speed], pp 795 tools/firewire/nosy-dump.c (pp->self_id.contender ? ", irm contender" : ""), pp 796 tools/firewire/nosy-dump.c (pp->self_id.initiated_reset ? ", initiator" : "")); pp 244 tools/perf/builtin-timechart.c struct per_pid *p, *pp; pp 246 tools/perf/builtin-timechart.c pp = find_create_pid(tchart, ppid); pp 248 tools/perf/builtin-timechart.c if (pp->current && pp->current->comm && !p->current) pp 249 tools/perf/builtin-timechart.c pid_set_comm(tchart, pid, pp->current->comm); pp 826 tools/perf/util/callchain.c struct rb_node *p, **pp; pp 848 tools/perf/util/callchain.c pp = &p->rb_left; pp 850 tools/perf/util/callchain.c pp = &p->rb_right; pp 852 tools/perf/util/callchain.c rb_link_node(&new->rb_node_in, p, pp); pp 21 tools/perf/util/expr.h int expr__parse(double *final_val, struct parse_ctx *ctx, const char **pp); pp 18 tools/perf/util/expr.y %parse-param { const char **pp } pp 19 tools/perf/util/expr.y %lex-param { const char **pp } pp 39 tools/perf/util/expr.y static int expr__lex(YYSTYPE *res, const char **pp); pp 43 tools/perf/util/expr.y const char **pp __maybe_unused, pp 96 tools/perf/util/expr.y static int expr__symbol(YYSTYPE *res, const char *p, const char **pp) pp 120 tools/perf/util/expr.y *pp = p; pp 145 tools/perf/util/expr.y static int expr__lex(YYSTYPE *res, const char **pp) pp 149 tools/perf/util/expr.y const char *p = *pp; pp 158 tools/perf/util/expr.y return expr__symbol(res, p - 1, pp); pp 167 tools/perf/util/expr.y *pp = p; pp 217 tools/perf/util/probe-event.c static void clear_perf_probe_point(struct perf_probe_point *pp) pp 219 tools/perf/util/probe-event.c zfree(&pp->file); pp 220 tools/perf/util/probe-event.c zfree(&pp->function); pp 221 tools/perf/util/probe-event.c zfree(&pp->lazy_line); pp 353 tools/perf/util/probe-event.c struct perf_probe_point *pp, pp 364 tools/perf/util/probe-event.c if (!pp->function || pp->file) pp 372 tools/perf/util/probe-event.c map__for_each_symbol_by_name(map, pp->function, sym) { pp 384 tools/perf/util/probe-event.c pp->function, address); pp 391 tools/perf/util/probe-event.c result->offset += pp->offset; pp 392 tools/perf/util/probe-event.c result->line += pp->line; pp 393 tools/perf/util/probe-event.c result->retprobe = pp->retprobe; pp 423 tools/perf/util/probe-event.c struct perf_probe_point pp = { .function = lr->function, pp 433 tools/perf/util/probe-event.c ret = find_alternative_probe_point(dinfo, &pp, &result, pp 441 tools/perf/util/probe-event.c clear_perf_probe_point(&pp); pp 569 tools/perf/util/probe-event.c struct perf_probe_point *pp, pp 602 tools/perf/util/probe-event.c (unsigned long)addr, pp); pp 607 tools/perf/util/probe-event.c pp->retprobe = tp->retprobe; pp 1143 tools/perf/util/probe-event.c struct perf_probe_point *pp __maybe_unused, pp 1340 tools/perf/util/probe-event.c struct perf_probe_point *pp = &pev->point; pp 1431 tools/perf/util/probe-event.c pp->file = tmp; pp 1433 tools/perf/util/probe-event.c pp->function = tmp; pp 1445 tools/perf/util/probe-event.c pp->abs_address = strtoul(pp->function, &tmp, 0); pp 1458 tools/perf/util/probe-event.c pp->lazy_line = strdup(arg); /* let leave escapes */ pp 1459 tools/perf/util/probe-event.c if (pp->lazy_line == NULL) pp 1470 tools/perf/util/probe-event.c pp->line = strtoul(arg, &tmp, 0); pp 1478 tools/perf/util/probe-event.c pp->offset = strtoul(arg, &tmp, 0); pp 1486 tools/perf/util/probe-event.c if (pp->file) { pp 1490 tools/perf/util/probe-event.c pp->file = strdup_esc(arg); pp 1491 tools/perf/util/probe-event.c if (pp->file == NULL) pp 1496 tools/perf/util/probe-event.c pp->retprobe = 1; pp 1511 tools/perf/util/probe-event.c if (pp->lazy_line && pp->line) { pp 1517 tools/perf/util/probe-event.c if (pp->lazy_line && pp->offset) { pp 1522 tools/perf/util/probe-event.c if (pp->line && pp->offset) { pp 1527 tools/perf/util/probe-event.c if (!pp->line && !pp->lazy_line && pp->file && !pp->function) { pp 1533 tools/perf/util/probe-event.c if (pp->offset && !pp->function) { pp 1538 tools/perf/util/probe-event.c if ((pp->offset || pp->line || pp->lazy_line) && pp->retprobe) { pp 1545 tools/perf/util/probe-event.c pp->function, pp->file, pp->line, pp->offset, pp->retprobe, pp 1546 tools/perf/util/probe-event.c pp->lazy_line); pp 1900 tools/perf/util/probe-event.c char *synthesize_perf_probe_point(struct perf_probe_point *pp) pp 1909 tools/perf/util/probe-event.c if (pp->function) { pp 1910 tools/perf/util/probe-event.c if (strbuf_addstr(&buf, pp->function) < 0) pp 1912 tools/perf/util/probe-event.c if (pp->offset) pp 1913 tools/perf/util/probe-event.c err = strbuf_addf(&buf, "+%lu", pp->offset); pp 1914 tools/perf/util/probe-event.c else if (pp->line) pp 1915 tools/perf/util/probe-event.c err = strbuf_addf(&buf, ":%d", pp->line); pp 1916 tools/perf/util/probe-event.c else if (pp->retprobe) pp 1921 tools/perf/util/probe-event.c if (pp->file) { pp 1922 tools/perf/util/probe-event.c tmp = pp->file; pp 1925 tools/perf/util/probe-event.c tmp = strchr(pp->file + len - 30, '/'); pp 1926 tools/perf/util/probe-event.c tmp = tmp ? tmp + 1 : pp->file + len - 30; pp 1929 tools/perf/util/probe-event.c if (!err && !pp->function && pp->line) pp 1930 tools/perf/util/probe-event.c err = strbuf_addf(&buf, ":%d", pp->line); pp 2096 tools/perf/util/probe-event.c struct perf_probe_point *pp, pp 2124 tools/perf/util/probe-event.c pp->retprobe = tp->retprobe; pp 2125 tools/perf/util/probe-event.c pp->offset = addr - map->unmap_ip(map, sym->start); pp 2126 tools/perf/util/probe-event.c pp->function = strdup(sym->name); pp 2127 tools/perf/util/probe-event.c ret = pp->function ? 0 : -ENOMEM; pp 2138 tools/perf/util/probe-event.c struct perf_probe_point *pp, pp 2144 tools/perf/util/probe-event.c ret = find_perf_probe_point_from_dwarf(tp, pp, is_kprobe); pp 2147 tools/perf/util/probe-event.c ret = find_perf_probe_point_from_map(tp, pp, is_kprobe); pp 2154 tools/perf/util/probe-event.c pp->function = strdup(tp->symbol); pp 2155 tools/perf/util/probe-event.c pp->offset = tp->offset; pp 2160 tools/perf/util/probe-event.c pp->function = strdup(buf); pp 2161 tools/perf/util/probe-event.c pp->offset = 0; pp 2163 tools/perf/util/probe-event.c if (pp->function == NULL) pp 2166 tools/perf/util/probe-event.c pp->retprobe = tp->retprobe; pp 2887 tools/perf/util/probe-event.c struct perf_probe_point *pp = &pev->point; pp 2909 tools/perf/util/probe-event.c num_matched_functions = find_probe_functions(map, pp->function, syms); pp 2911 tools/perf/util/probe-event.c pr_err("Failed to find symbol %s in %s\n", pp->function, pp 2924 tools/perf/util/probe-event.c (!pp->retprobe || kretprobe_offset_is_supported())) { pp 2953 tools/perf/util/probe-event.c if (pp->offset > sym->end - sym->start) { pp 2955 tools/perf/util/probe-event.c pp->offset, sym->name); pp 2960 tools/perf/util/probe-event.c tp->address = map->unmap_ip(map, sym->start) + pp->offset; pp 2972 tools/perf/util/probe-event.c tp->offset = pp->offset; pp 2976 tools/perf/util/probe-event.c tp->retprobe = pp->retprobe; pp 3034 tools/perf/util/probe-event.c struct perf_probe_point *pp = &pev->point; pp 3063 tools/perf/util/probe-event.c tp->retprobe = pp->retprobe; pp 136 tools/perf/util/probe-event.h char *synthesize_perf_probe_point(struct perf_probe_point *pp); pp 912 tools/perf/util/probe-finder.c struct perf_probe_point *pp = &pf->pev->point; pp 927 tools/perf/util/probe-finder.c if (!pp->function || pp->line || pp->retprobe || pp->lazy_line || pp 928 tools/perf/util/probe-finder.c pp->offset || pp->abs_address) pp 945 tools/perf/util/probe-finder.c struct perf_probe_point *pp = &pf->pev->point; pp 949 tools/perf/util/probe-finder.c if (pp->lazy_line) pp 964 tools/perf/util/probe-finder.c pf->addr += pp->offset; pp 985 tools/perf/util/probe-finder.c struct perf_probe_point *pp = &pf->pev->point; pp 989 tools/perf/util/probe-finder.c !die_match_name(sp_die, pp->function)) pp 993 tools/perf/util/probe-finder.c if (pp->file && strtailcmp(pp->file, dwarf_decl_file(sp_die))) pp 999 tools/perf/util/probe-finder.c if (pp->line) { /* Function relative line */ pp 1001 tools/perf/util/probe-finder.c pf->lno += pp->line; pp 1012 tools/perf/util/probe-finder.c } else if (pp->lazy_line) pp 1016 tools/perf/util/probe-finder.c pf->addr += pp->offset; pp 1030 tools/perf/util/probe-finder.c if (strisglob(pp->function) && param->retval >= 0) { pp 1081 tools/perf/util/probe-finder.c struct perf_probe_point *pp = &pf->pev->point; pp 1093 tools/perf/util/probe-finder.c if (pp->function && !strisglob(pp->function)) { pp 1095 tools/perf/util/probe-finder.c .function = pp->function, pp 1096 tools/perf/util/probe-finder.c .file = pp->file, pp 1122 tools/perf/util/probe-finder.c if (pp->file) pp 1123 tools/perf/util/probe-finder.c pf->fname = cu_find_realpath(&pf->cu_die, pp->file); pp 1127 tools/perf/util/probe-finder.c if (!pp->file || pf->fname) { pp 1128 tools/perf/util/probe-finder.c if (pp->function) pp 1130 tools/perf/util/probe-finder.c else if (pp->lazy_line) pp 1133 tools/perf/util/probe-finder.c pf->lno = pp->line; pp 1272 tools/perf/util/probe-finder.c struct perf_probe_point *pp = &pf->pev->point; pp 1295 tools/perf/util/probe-finder.c pp->retprobe, pp->function, &tev->point); pp 1453 tools/perf/util/probe-finder.c struct perf_probe_point *pp = &pf->pev->point; pp 1475 tools/perf/util/probe-finder.c pp->retprobe, pp->function, &vl->point); pp 268 tools/power/x86/turbostat/turbostat.c struct msr_counter *pp; pp 782 tools/power/x86/turbostat/turbostat.c for (mp = sys.pp; mp; mp = mp->next) { pp 871 tools/power/x86/turbostat/turbostat.c for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) { pp 1152 tools/power/x86/turbostat/turbostat.c for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) { pp 1264 tools/power/x86/turbostat/turbostat.c for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) { pp 1478 tools/power/x86/turbostat/turbostat.c for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) pp 1573 tools/power/x86/turbostat/turbostat.c for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) { pp 1650 tools/power/x86/turbostat/turbostat.c for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) { pp 2015 tools/power/x86/turbostat/turbostat.c for (i = 0, mp = sys.pp; mp; i++, mp = mp->next) { pp 5378 tools/power/x86/turbostat/turbostat.c msrp->next = sys.pp; pp 5379 tools/power/x86/turbostat/turbostat.c sys.pp = msrp;